diff --git a/README.md b/README.md index 47e8c4d4..3446f579 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ More results and models are available in the [docs/MODEL_ZOO.md](docs/MODEL_ZOO.
-Please read [PoseFlow/README.md](PoseFlow/) for details. +Please read [trackers/README.md](trackers/) for details. ### CrowdPosediff --git a/alphapose/utils/vis.py b/alphapose/utils/vis.py index 8acda220..e1e0747f 100644 --- a/alphapose/utils/vis.py +++ b/alphapose/utils/vis.py @@ -136,7 +136,7 @@ def vis_frame_fast(frame, im_res, opt, format='coco'): if 'box' in human.keys(): bbox = human['box'] else: - from PoseFlow.poseflow_infer import get_box + from trackers.PoseFlow.poseflow_infer import get_box keypoints = [] for n in range(kp_scores.shape[0]): keypoints.append(float(kp_preds[n, 0])) @@ -284,7 +284,7 @@ def vis_frame(frame, im_res, opt, format='coco'): bbox = human['box'] bbox = [bbox[0], bbox[0]+bbox[2], bbox[1], bbox[1]+bbox[3]]#xmin,xmax,ymin,ymax else: - from PoseFlow.poseflow_infer import get_box + from trackers.PoseFlow.poseflow_infer import get_box keypoints = [] for n in range(kp_scores.shape[0]): keypoints.append(float(kp_preds[n, 0])) diff --git a/alphapose/utils/writer.py b/alphapose/utils/writer.py index 975035ba..d499aaf9 100644 --- a/alphapose/utils/writer.py +++ b/alphapose/utils/writer.py @@ -44,7 +44,7 @@ def __init__(self, cfg, opt, save_video=False, os.mkdir(opt.outputpath + '/vis') if opt.pose_flow: - from PoseFlow.poseflow_infer import PoseFlowWrapper + from trackers.PoseFlow.poseflow_infer import PoseFlowWrapper self.pose_flow_wrapper = PoseFlowWrapper(save_path=os.path.join(opt.outputpath, 'poseflow')) def start_worker(self, target): diff --git a/docs/run.md b/docs/run.md index f6b6c1b1..52059107 100644 --- a/docs/run.md +++ b/docs/run.md @@ -25,6 +25,9 @@ Here, we first list the flags and other parameters you can tune. Default paramet - `--min_box_area`: Min box area to filter out, you can set it like 100 to filter out small people. - `--gpus`: Choose which cuda device to use by index and input comma to use multi gpus, e.g. 0,1,2,3. (input -1 for cpu only) +- `--pose_track`: Enable tracking pipeline with human re-id feature, it is currently the best performance pose tracker +- `--pose_flow`: This flag will be depreciated. It enables the old tracking version of PoseFlow. + All the flags available here: [link](../scripts/demo_inference.py#L22) @@ -32,4 +35,4 @@ All the flags available here: [link](../scripts/demo_inference.py#L22) 1. yolo detector config is [here](../detector/yolo_cfg.py) - `CONFIDENCE`: Confidence threshold for human detection. Lower the value can improve the final accuracy but decrease the speed. Default is 0.05. - `NMS_THRES`: NMS threshold for human detection. Increase the value can improve the final accuracy but decrease the speed. Default is 0.6. -- `INP_DIM`: The input size of detection network. The inp_dim should be multiple of 32. Default is 608. Increase it may improve the accuracy. \ No newline at end of file +- `INP_DIM`: The input size of detection network. The inp_dim should be multiple of 32. Default is 608. Increase it may improve the accuracy. diff --git a/PoseFlow/README.md b/trackers/PoseFlow/README.md similarity index 100% rename from PoseFlow/README.md rename to trackers/PoseFlow/README.md diff --git a/PoseFlow/alpha-pose-results-sample.json b/trackers/PoseFlow/alpha-pose-results-sample.json similarity index 100% rename from PoseFlow/alpha-pose-results-sample.json rename to trackers/PoseFlow/alpha-pose-results-sample.json diff --git a/PoseFlow/matching.py b/trackers/PoseFlow/matching.py similarity index 100% rename from PoseFlow/matching.py rename to trackers/PoseFlow/matching.py diff --git a/PoseFlow/parallel_process.py b/trackers/PoseFlow/parallel_process.py similarity index 100% rename from PoseFlow/parallel_process.py rename to trackers/PoseFlow/parallel_process.py diff --git a/PoseFlow/poseflow_infer.py b/trackers/PoseFlow/poseflow_infer.py similarity index 100% rename from PoseFlow/poseflow_infer.py rename to trackers/PoseFlow/poseflow_infer.py diff --git a/PoseFlow/posetrack1.gif b/trackers/PoseFlow/posetrack1.gif similarity index 100% rename from PoseFlow/posetrack1.gif rename to trackers/PoseFlow/posetrack1.gif diff --git a/PoseFlow/posetrack2.gif b/trackers/PoseFlow/posetrack2.gif similarity index 100% rename from PoseFlow/posetrack2.gif rename to trackers/PoseFlow/posetrack2.gif diff --git a/PoseFlow/posetrack_data b/trackers/PoseFlow/posetrack_data similarity index 100% rename from PoseFlow/posetrack_data rename to trackers/PoseFlow/posetrack_data diff --git a/PoseFlow/poseval b/trackers/PoseFlow/poseval similarity index 100% rename from PoseFlow/poseval rename to trackers/PoseFlow/poseval diff --git a/PoseFlow/requirements.txt b/trackers/PoseFlow/requirements.txt similarity index 100% rename from PoseFlow/requirements.txt rename to trackers/PoseFlow/requirements.txt diff --git a/PoseFlow/tracker-baseline.py b/trackers/PoseFlow/tracker-baseline.py similarity index 100% rename from PoseFlow/tracker-baseline.py rename to trackers/PoseFlow/tracker-baseline.py diff --git a/PoseFlow/tracker-general.py b/trackers/PoseFlow/tracker-general.py similarity index 99% rename from PoseFlow/tracker-general.py rename to trackers/PoseFlow/tracker-general.py index 1e1b9e4a..2811c65d 100644 --- a/PoseFlow/tracker-general.py +++ b/trackers/PoseFlow/tracker-general.py @@ -175,7 +175,7 @@ def load_pose_boxes(img_name): tasks.append((img1_path,img2_path, image_dir, frame_id, next_frame_id)) # do the matching parallel - parallel_process(tasks, orb_matching, n_jobs=16) + parallel_process(tasks, orb_matching, n_jobs=8) print("Start pose tracking...\n") # tracking process diff --git a/PoseFlow/utils.py b/trackers/PoseFlow/utils.py similarity index 100% rename from PoseFlow/utils.py rename to trackers/PoseFlow/utils.py diff --git a/trackers/README.md b/trackers/README.md index 20493f58..cc9ad1c6 100644 --- a/trackers/README.md +++ b/trackers/README.md @@ -1,18 +1,34 @@ -# Pose Tracking -## Models +# Pose Tracking Module for AlphaPose + + +## Human-ReID based tracking (Recommended) +Currently the best performance tracking model. Paper coming soon. + +### Getting started Download [human reid model](https://mega.nz/#!YTZFnSJY!wlbo_5oa2TpDAGyWCTKTX1hh4d6DvJhh_RUA2z6i_so) and place it into `./trackers/weights/`. +Then simply run alphapose with additional flag `--pose_track` + You can try different person reid model by modifing `cfg.arch` and `cfg.loadmodel` in `./trackers/tracker_cfg.py`. If you want to train your own reid model, please refer to this [project](https://github.com/KaiyangZhou/deep-person-reid) -## Demo + +### Demo ``` bash ./scripts/inference.sh ${CONFIG} ${CHECKPOINT} ${VIDEO_NAME} ${OUTPUT_DIR}, --pose_track ``` -## Todo +### Todo - [] Evaluation Tools for PoseTrack - [] More Models - [] Training code for [PoseTrack Dataset](https://posetrack.net/) +## PoseFlow human tracking +This tracker is based on our BMVC 2018 paper PoseFlow. + +### Getting started + +Simply run alphapose with additional flag `--pose_flow` +### More info +For more info, please refer to [PoseFlow/README.md](PoseFlow/) \ No newline at end of file