-
Notifications
You must be signed in to change notification settings - Fork 6
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
1 changed file
with
270 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,270 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 2, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"import cv2\n", | ||
"import numpy as np\n", | ||
"from PIL import Image\n", | ||
"from ultralytics import YOLO" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 4, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"model_path = \"./models/yolov8n.pt\"\n", | ||
"model = YOLO(model_path)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 15, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"img_path_1 = \"./photos/apprentices.jpg\"\n", | ||
"img_path_2 = \"./photos/digital_field_worker.jpg\"\n", | ||
"img_path_3 = \"./photos/JS in sunset.jpg\"" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 5, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"YOLOv8n summary: 225 layers, 3,157,200 parameters, 0 gradients, 8.9 GFLOPs\n" | ||
] | ||
}, | ||
{ | ||
"data": { | ||
"text/plain": [ | ||
"(225, 3157200, 0, 8.8575488)" | ||
] | ||
}, | ||
"execution_count": 5, | ||
"metadata": {}, | ||
"output_type": "execute_result" | ||
} | ||
], | ||
"source": [ | ||
"model.info()" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 23, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"\n", | ||
"image 1/1 /Users/TSUNDV/equinor/inspection-data-analyzer/anonymizer/photos/apprentices.jpg: 448x640 2 persons, 46.7ms\n", | ||
"Speed: 1.5ms preprocess, 46.7ms inference, 0.3ms postprocess per image at shape (1, 3, 448, 640)\n", | ||
"Results saved to \u001b[1m/Users/TSUNDV/equinor/inspection-data-analyzer/runs/detect/predict\u001b[0m\n", | ||
"2 labels saved to /Users/TSUNDV/equinor/inspection-data-analyzer/runs/detect/predict/labels\n", | ||
"[ultralytics.engine.results.Results object with attributes:\n", | ||
"\n", | ||
"boxes: ultralytics.engine.results.Boxes object\n", | ||
"keypoints: None\n", | ||
"masks: None\n", | ||
"names: {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle', 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'}\n", | ||
"obb: None\n", | ||
"orig_img: array([[[155, 159, 164],\n", | ||
" [150, 154, 159],\n", | ||
" [155, 160, 163],\n", | ||
" ...,\n", | ||
" [ 48, 57, 66],\n", | ||
" [ 50, 59, 68],\n", | ||
" [ 51, 60, 69]],\n", | ||
"\n", | ||
" [[156, 160, 165],\n", | ||
" [154, 158, 163],\n", | ||
" [156, 161, 164],\n", | ||
" ...,\n", | ||
" [ 52, 61, 70],\n", | ||
" [ 57, 66, 75],\n", | ||
" [ 53, 62, 71]],\n", | ||
"\n", | ||
" [[154, 158, 163],\n", | ||
" [154, 158, 163],\n", | ||
" [153, 157, 162],\n", | ||
" ...,\n", | ||
" [ 51, 60, 69],\n", | ||
" [ 53, 62, 71],\n", | ||
" [ 52, 61, 70]],\n", | ||
"\n", | ||
" ...,\n", | ||
"\n", | ||
" [[ 88, 82, 83],\n", | ||
" [ 93, 87, 88],\n", | ||
" [ 94, 88, 89],\n", | ||
" ...,\n", | ||
" [ 84, 91, 100],\n", | ||
" [ 79, 86, 95],\n", | ||
" [ 80, 87, 96]],\n", | ||
"\n", | ||
" [[ 89, 83, 84],\n", | ||
" [ 93, 87, 88],\n", | ||
" [ 98, 92, 93],\n", | ||
" ...,\n", | ||
" [ 83, 90, 99],\n", | ||
" [ 82, 89, 98],\n", | ||
" [ 84, 91, 100]],\n", | ||
"\n", | ||
" [[ 94, 88, 89],\n", | ||
" [ 97, 91, 92],\n", | ||
" [100, 94, 95],\n", | ||
" ...,\n", | ||
" [ 83, 90, 99],\n", | ||
" [ 83, 90, 99],\n", | ||
" [ 86, 93, 102]]], dtype=uint8)\n", | ||
"orig_shape: (1333, 2000)\n", | ||
"path: '/Users/TSUNDV/equinor/inspection-data-analyzer/anonymizer/photos/apprentices.jpg'\n", | ||
"probs: None\n", | ||
"save_dir: '/Users/TSUNDV/equinor/inspection-data-analyzer/runs/detect/predict'\n", | ||
"speed: {'preprocess': 1.516103744506836, 'inference': 46.656131744384766, 'postprocess': 0.3299713134765625}]\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"# Predict\n", | ||
"results = model(img_path_1)\n", | ||
"print(results)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 38, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"# Predict with segmentation\n", | ||
"model_seg = YOLO(\"./models/yolov8n-seg.pt\")" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"image_path = \"./photos/JS in sunset.jpg\"\n", | ||
"im1 = cv2.imread(image_path)\n", | ||
"results = model_seg.predict(source=im1, save=True, save_txt=True)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 49, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"\n", | ||
"0: 448x640 1 person, 47.7ms\n", | ||
"Speed: 1.6ms preprocess, 47.7ms inference, 0.9ms postprocess per image at shape (1, 3, 448, 640)\n", | ||
"Results saved to \u001b[1m/Users/TSUNDV/equinor/inspection-data-analyzer/runs/segment/predict4\u001b[0m\n", | ||
"1 label saved to /Users/TSUNDV/equinor/inspection-data-analyzer/runs/segment/predict4/labels\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"im1 = cv2.imread(img_path_2)\n", | ||
"results = model_seg.predict(source=im1, classes=[0], save=True) #, stream=True)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 79, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"\n", | ||
"0: 448x640 (no detections), 43.6ms\n", | ||
"No masks found\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"raw_image = np.array(Image.open(img_path_3))\n", | ||
"CLASS_PERSON = 0\n", | ||
"\n", | ||
"results = model_seg.predict(source=raw_image, classes=[CLASS_PERSON], stream=True)\n", | ||
"\n", | ||
"image_anonymized = raw_image\n", | ||
"\n", | ||
"for result in results:\n", | ||
" if not result.masks:\n", | ||
" print(\"No masks found\")\n", | ||
" break\n", | ||
"\n", | ||
" masks = result.masks.data.cpu().numpy()\n", | ||
" for mask in masks:\n", | ||
" h, w, _ = raw_image.shape\n", | ||
" mask = cv2.resize(mask, (w, h))\n", | ||
"\n", | ||
" colored_mask = np.expand_dims(mask, 0).repeat(3, axis=0)\n", | ||
" colored_mask = np.moveaxis(colored_mask, 0, -1)\n", | ||
"\n", | ||
" color = (0, 0, 0)\n", | ||
" masked = np.ma.MaskedArray(image_anonymized, mask=colored_mask, fill_value=color)\n", | ||
" image_anonymized = masked.filled()\n", | ||
"\n", | ||
"im = Image.fromarray(image_anonymized)\n", | ||
"im.save(\"your_file3.jpeg\")\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "venv", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.9.19" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 2 | ||
} |