Skip to content

Commit

Permalink
Add script to evaluate predicted time end2end
Browse files Browse the repository at this point in the history
  • Loading branch information
akucia committed Sep 10, 2022
1 parent a10ae7b commit 1c4179a
Show file tree
Hide file tree
Showing 10 changed files with 450 additions and 79 deletions.
3 changes: 2 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
update-dataset:
python scripts/download-dataset.py 1 --export-file datasets/watch-faces.json

update-image-cache: update-dataset
dvc repro -s download-images
add-images:
python scripts/add-images-to-label-studio-project.py --source-dir ./new-images --label-studio-project ${PROJECT_ID} --label-studio-host ${LABEL_STUDIO_URL} --label-studio-api-token ${LABEL_STUDIO_ACCESS_TOKEN} --n-images 50 --shuffle-images

Expand Down
86 changes: 53 additions & 33 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,43 +1,63 @@
# Metrics
| Path | AP @IoU=0.50 | AP @IoU=0.50:0.95 | AP @IoU=0.75 | AP @IoU=0.95 | AR @IoU=0.50 | AR @IoU=0.50:0.95 | AR @IoU=0.75 | AR @maxDets=1 | AR @maxDets=10 | AR @maxDets=100 | Num Images | eval.iou_score | eval.loss | step | train.iou_score | train.loss |
|----------------------------------|----------------|---------------------|----------------|----------------|----------------|---------------------|----------------|-----------------|------------------|-------------------|--------------|------------------|-------------|--------|-------------------|--------------|
| metrics/detector.json | - | - | - | - | - | - | - | - | - | - | - | - | 0.42029 | 59 | - | 0.04062 |
| metrics/detector/coco_train.json | 0.45856 | 0.32698 | 0.40273 | -1.0 | - | - | - | 0.42 | 0.455 | 0.455 | 127 | - | - | - | - | - |
| metrics/detector/coco_val.json | 1.0 | 0.7297 | 0.83168 | -1.0 | - | - | - | 0.75 | 0.75 | 0.75 | 6 | - | - | - | - | - |
| metrics/keypoint.json | - | - | - | - | - | - | - | - | - | - | - | 0.62028 | 0.37972 | 59 | 0.89873 | 0.10617 |
| metrics/keypoint/coco_train.json | 0.52226 | 0.36543 | 0.31471 | - | 0.65 | 0.50333 | 0.45833 | - | - | - | 127 | - | - | - | - | - |
| metrics/keypoint/coco_val.json | 1.0 | 0.75399 | 0.63168 | - | 1.0 | 0.78333 | 0.66667 | - | - | - | 6 | - | - | - | - | - |
| metrics/segmentation.json | - | - | - | - | - | - | - | - | - | - | - | 0.44843 | 0.38081 | 59 | 0.74792 | 0.11398 |

# Graph
```mermaid
flowchart TD
node1[checkpoints/detector.dvc]
node2[checkpoints/keypoint.dvc]
node3[checkpoints/segmentation.dvc]
node4[datasets/watch-faces.json.dvc]
node5[download-images]
node6[eval-detector]
node7[eval-keypoint]
node8[train-detector]
node9[train-keypoint]
node10[train-segmentation]
node11[update-metrics]
node1-->node8
node2-->node9
node3-->node10
node1["checkpoints/detector.dvc"]
node2["checkpoints/keypoint.dvc"]
node3["checkpoints/segmentation.dvc"]
node4["datasets/watch-faces.json.dvc"]
node5["download-images"]
node6["eval-detector"]
node7["eval-end-2-end"]
node8["eval-keypoint"]
node9["train-detector"]
node10["train-keypoint"]
node11["train-segmentation"]
node12["update-metrics"]
node1-->node9
node2-->node10
node3-->node11
node4-->node5
node5-->node8
node5-->node7
node5-->node9
node5-->node10
node6-->node11
node7-->node11
node8-->node6
node8-->node7
node8-->node11
node5-->node11
node6-->node12
node7-->node12
node8-->node12
node9-->node6
node9-->node7
node9-->node11
node10-->node11
node9-->node8
node9-->node12
node10-->node7
node10-->node8
node10-->node12
node11-->node7
node11-->node12
```
# Metrics
| Path | train.1-min_acc | train.10-min_acc | train.60-min_acc | val.1-min_acc | val.10-min_acc | val.60-min_acc |
|--------------------------------|-------------------|--------------------|--------------------|-----------------|------------------|------------------|
| metrics/end_2_end_summary.json | 0.066 | 0.082 | 0.168 | 0.013 | 0.026 | 0.039 |

| Path | AP @IoU=0.50 | AP @IoU=0.50:0.95 | AP @IoU=0.75 | AP @IoU=0.95 | AR @maxDets=1 | AR @maxDets=10 | AR @maxDets=100 | Num Images | eval.loss | step | train.loss |
|----------------------------------|----------------|---------------------|----------------|----------------|-----------------|------------------|-------------------|--------------|-------------|--------|--------------|
| metrics/detector.json | - | - | - | - | - | - | - | - | 0.42 | 59 | 0.041 |
| metrics/detector/coco_train.json | 0.459 | 0.327 | 0.403 | -1.0 | 0.42 | 0.455 | 0.455 | 127 | - | - | - |
| metrics/detector/coco_val.json | 1.0 | 0.73 | 0.832 | -1.0 | 0.75 | 0.75 | 0.75 | 6 | - | - | - |

| Path | AP @IoU=0.50 | AP @IoU=0.50:0.95 | AP @IoU=0.75 | AR @IoU=0.50 | AR @IoU=0.50:0.95 | AR @IoU=0.75 | Num Images | eval.iou_score | eval.loss | step | train.iou_score | train.loss |
|----------------------------------|----------------|---------------------|----------------|----------------|---------------------|----------------|--------------|------------------|-------------|--------|-------------------|--------------|
| metrics/keypoint.json | - | - | - | - | - | - | - | 0.62 | 0.38 | 59 | 0.899 | 0.106 |
| metrics/keypoint/coco_train.json | 0.522 | 0.365 | 0.315 | 0.65 | 0.503 | 0.458 | 127 | - | - | - | - | - |
| metrics/keypoint/coco_val.json | 1.0 | 0.754 | 0.632 | 1.0 | 0.783 | 0.667 | 6 | - | - | - | - | - |

| Path | eval.iou_score | eval.loss | step | train.iou_score | train.loss |
|---------------------------|------------------|-------------|--------|-------------------|--------------|
| metrics/segmentation.json | 0.448 | 0.381 | 59 | 0.748 | 0.114 |

# Metrics definition
Final metric for the entire system is 'x-min accuracy' which is the fraction of system predictions accurate within x minutes. Example:
$$ 1-min-acc = 1 - {|time - time_pred | < 1 min \over N_samples} $$
# Demo - version 2

<img src="example_data/IMG_0039_render.jpg?raw=true" width=400> <img src="example_data/IMG_0040_render.jpg?raw=true" width=400>
Expand Down
48 changes: 38 additions & 10 deletions dvc.lock
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ stages:
cmd: python scripts/download-images.py datasets/watch-faces.json --concurrent
deps:
- path: datasets/watch-faces.json
md5: e00b9915f01e7e120532a6d54a61c252
size: 585871
md5: b91771b4afbcc268f6e3b76ff195e06b
size: 1037986
- path: scripts/download-images.py
md5: a037cfab1a482a356f1d7d62804041d3
size: 3961
Expand All @@ -15,16 +15,16 @@ stages:
size: 16911707
nfiles: 46
- path: datasets/train
md5: 9a3f5b148227c37600c3af6a9b0b0161.dir
size: 82169507
nfiles: 154
md5: c517cd143a36c8444e7e19718ab7153c.dir
size: 94131818
nfiles: 218
- path: datasets/val
md5: 8ac3dabbeb73f54d330cb1c0f26e8d62.dir
size: 17982237
nfiles: 36
- path: datasets/watch-faces-local.json
md5: db7c3dda8302f2f04fc4d627941d0551
size: 581754
md5: 7bf58251585ed74a9dfbd79c2c97b908
size: 1031086
train-detector:
cmd: python watch_recognition/watch_recognition/train/object_detection_task.py --epochs
60 --batch-size 16 --confidence-threshold 0.5 --seed 42
Expand Down Expand Up @@ -139,9 +139,12 @@ stages:
cmd: python scripts/update-metrics-table-and-graph.py
deps:
- path: metrics
md5: f9487ab092c4acc02321245bec74e596.dir
size: 230176
nfiles: 36
md5: 54eb7ebdea76a1446a0446df24363dae.dir
size: 190697
nfiles: 33
- path: scripts/update-metrics-table-and-graph.py
md5: 6f25059ae2169ced7fd4c1d01fd04ea0
size: 2094
train-keypoint:
cmd: python watch_recognition/watch_recognition/train/heatmap_regression_task.py
--epochs 60 --batch-size 16 --confidence-threshold 0.5 --seed 42
Expand Down Expand Up @@ -290,3 +293,28 @@ stages:
md5: 3c12c44389ed179e56428f1ef23564cc.dir
size: 36518038
nfiles: 5
eval-end-2-end:
cmd: python watch_recognition/watch_recognition/eval/end-to-end-eval.py --concurrent
deps:
- path: datasets/watch-faces-local.json
md5: 7bf58251585ed74a9dfbd79c2c97b908
size: 1031086
- path: models/detector
md5: c8ee744e619baf859a79d31740caddad.dir
size: 155280910
nfiles: 6
- path: models/keypoint
md5: 3d14403bb1c0811c028e3ba8da69b497.dir
size: 36542592
nfiles: 6
- path: models/segmentation
md5: 3c12c44389ed179e56428f1ef23564cc.dir
size: 36518038
nfiles: 5
outs:
- path: metrics/end_2_end_eval.csv
md5: f521a679167cdc0eadd0c28342370aab
size: 14982
- path: metrics/end_2_end_summary.json
md5: 710a0a62121610d0eabb28bd982eb7ef
size: 298
13 changes: 13 additions & 0 deletions dvc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,20 @@ stages:
persist: true
- models/segmentation/:
persist: true
eval-end-2-end:
cmd: python watch_recognition/watch_recognition/eval/end-to-end-eval.py --concurrent
deps:
- watch_recognition/watch_recognition/eval/end-to-end-eval.py
- models/detector
- models/keypoint
- models/segmentation
- datasets/watch-faces-local.json

metrics:
- metrics/end_2_end_eval.csv
- metrics/end_2_end_summary.json
update-metrics:
cmd: python scripts/update-metrics-table-and-graph.py
deps:
- scripts/update-metrics-table-and-graph.py
- metrics
2 changes: 2 additions & 0 deletions metrics/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
/end_2_end_eval.csv
/end_2_end_summary.json
74 changes: 67 additions & 7 deletions scripts/update-metrics-table-and-graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,78 @@
with open("README.md") as f:
readme_lines = f.readlines()

graph_start = readme_lines.index("# Graph\n")
graph_end = readme_lines.index("# Metrics\n")
readme_before_graph = readme_lines[:graph_start]
graph_md = [
subprocess.run(["dvc", "dag", "--md"], stdout=subprocess.PIPE).stdout.decode(
"utf-8"
)
]

metrics_start = readme_lines.index("# Metrics\n")
metrics_end = readme_lines.index("# Demo - version 2\n")
cmd = ["dvc", "metrics", "show", "--md"]
output_metrics = subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode("utf-8")
cmd = ["dvc", "dag", "--md"]
output_graph = subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode("utf-8")
metrics_tables_md = []
commands = [
[
"dvc",
"metrics",
"show",
"metrics/end_2_end_summary.json",
"--precision",
"3",
"--md",
],
[
"dvc",
"metrics",
"show",
"metrics/detector.json",
"metrics/detector/coco_train.json",
"metrics/detector/coco_val.json",
"--precision",
"3",
"--md",
],
[
"dvc",
"metrics",
"show",
"metrics/keypoint.json",
"metrics/keypoint/coco_train.json",
"metrics/keypoint/coco_val.json",
"--precision",
"3",
"--md",
],
[
"dvc",
"metrics",
"show",
"metrics/segmentation.json",
"--precision",
"3",
"--md",
],
]
for cmd in commands:
print(" ".join(cmd))
metrics_tables_md.append(
subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode("utf-8")
)

readme_before_metrics = readme_lines[:metrics_start]
new_metrics = [readme_lines[metrics_start]] + [output_metrics, output_graph]
readme_after_metrics = readme_lines[metrics_end:]

new_readme = readme_before_metrics + new_metrics + readme_after_metrics
graph_header = [readme_lines[graph_start]]
metrics_header = [readme_lines[metrics_start]]
new_readme = (
readme_before_graph
+ graph_header
+ graph_md
+ metrics_header
+ metrics_tables_md
+ readme_after_metrics
)

with open("README.md", "w") as f:
f.writelines(new_readme)
2 changes: 1 addition & 1 deletion watch_recognition/watch_recognition/data_preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ def load_keypoints_data_as_kp(

def load_image(
image_path: str,
image_size: Optional[Tuple[int, int]],
image_size: Optional[Tuple[int, int]] = None,
preserve_aspect_ratio: bool = False,
):
if image_path.startswith("gs://"):
Expand Down
Loading

0 comments on commit 1c4179a

Please sign in to comment.