This repository has been archived by the owner on Jan 5, 2025. It is now read-only.
-
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathpyproject.toml
95 lines (77 loc) · 3.4 KB
/
pyproject.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
[tool.poetry]
name = "wyzely-detect"
version = "0.2.1"
description = "Recognize faces/objects in a video stream (from a webcam or a security camera) and send notifications to your devices"
authors = ["slashtechno <[email protected]>"]
repository = "https://github.com/slashtechno/wyzely-detect"
keywords = ["object-detection", "face-detection", "wyze", "security", "yolov8", "unified-push"]
license = "MIT"
readme = "README.md"
packages = [{include = "wyzely_detect"}]
[tool.poetry.dependencies]
# Works on 3.10 and 3.11, at least in my testing
python = ">=3.10, <3.12"
python-dotenv = "^1.0.0"
httpx = "^0.25.0"
opencv-python = "^4.8.1.78"
ultralytics = "^8.0.190"
hjson = "^3.1.0"
numpy = "^1.23.2"
# https://github.com/python-poetry/poetry/issues/6409#issuecomment-1911735833
# If GPU support doesn't work, `poetry install -E cuda --with gpu` will force it to be installed from the GPU PyTorch repo
# However, PyPi's `torch` has CUDA 12.1 support by default on Linux, so in that case it should not be needed.
torch = [
{version = "^2.2.1", source = "pypi", markers = "extra!='cuda' and (platform_system=='Linux' or platform_system=='Darwin')"},
{version = "^2.2.1", source = "pytorch-cpu", markers = "extra!='cuda' and platform_system=='Windows'"},
]
# https://stackoverflow.com/a/76477590/18270659
# https://discfuss.tensorflow.org/t/tensorflow-io-gcs-filesystem-with-windows/18849/4
# https://github.com/python-poetry/poetry/issues/8271#issuecomment-1712020965
# Might be able to remove this version constraint later
# Working versions:
# Python version 3.10.12 and 3.10.5 both work
# CUDA version - 12.2
# cuDNN version - 8.8.1
# Installed from Nvidia website - nvidia-cuda-toolkit is not installed, but default PopOS drivers are installed
absl-py = "^2.1.0"
tensorflow = {version = "^2.13.0", markers = "extra!='cuda'"}
# TODO: Change platform to markers
tensorflow-macos = { version = "^2.13.0", platform = "darwin", markers = "platform_machine=='arm64'" }
tensorflow-intel = { version = "^2.13.0", platform = "win32" }
tensorflow-io-gcs-filesystem = [
{ version = "< 0.32.0", markers = "platform_system == 'Windows'" }
]
deepface = "^0.0.79"
prettytable = "^3.9.0"
[tool.poetry.group.gpu]
optional = true
[tool.poetry.group.gpu.dependencies]
torch = {version = "^2.2.1", source = "pytorch-cu121", markers = "extra=='cuda'"}
tensorflow = {version = "^2.14.0", extras = ["and-cuda"], markers = "extra=='cuda' and platform_system == 'Linux'"}
[tool.poetry.extras]
# Might be better to rename this to nocpu since it's more accurate
cuda = []
[[tool.poetry.source]]
name = "pytorch-cpu"
url = "https://download.pytorch.org/whl/cpu"
priority = "explicit"
[[tool.poetry.source]]
name = "pytorch-cu121"
url = "https://download.pytorch.org/whl/cu121"
priority = "explicit"
[tool.poetry.group.dev.dependencies]
black = "^23.9.1"
ruff = "^0.0.291"
ipykernel = "^6.25.2"
nbconvert = "^7.9.2"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.ruff]
# More than the default (88) of `black` to make comments less of a headache
# Where possible, `black` will attempt to format to 88 characters
# However, setting ruff to 135 will allow for longer lines that can't be auto-formatted
line-length = 135
extend-select= ["FIX002"]
[tool.poetry.scripts]
wyzely-detect = "wyzely_detect.__main__:main"