Skip to content

Commit

Permalink
basic types
Browse files Browse the repository at this point in the history
  • Loading branch information
amcc committed Mar 15, 2024
1 parent d223b22 commit 8618c52
Show file tree
Hide file tree
Showing 29 changed files with 470,840 additions and 4 deletions.
Binary file modified .DS_Store
Binary file not shown.
8 changes: 6 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
# easydetect

A collection of machine learning pose detection examples that make pose detection easy to use.
A collection of machine learning pose detection examples that make pose detection easy to use.

Have a look at the examples to see how to easily use ml5js PoseNet and Google MediaPipe code:

##mediapipe
[ml5js pose landmark](mediaPipe/poseLandmark/)

##examples
[ml5js posenet](examples/easy-ml5js-posenet)
[MediaPipe Landmark Detection](examples/easy-mediapipe-landmark-detection)
[MediaPipe Handpose](examples/easy-mediapipe-handpose)
[MediaPipe Handpose](examples/easy-mediapipe-handpose)
1 change: 0 additions & 1 deletion examples/easy-ml5js-posenet/easyPoseNet.js
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ function startPoseNet(videoOrImage) {
}

function addProps(obj) {
console.log(obj);
let result = "";
for (const i in obj) {
// Object.hasOwn() is used to exclude properties from the object's
Expand Down
26 changes: 25 additions & 1 deletion examples/easy-ml5js-posenet/sketch.js
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,30 @@ function setup() {
}

function draw() {
// define a list of variables you can use for x/y coordinates
// confused about this syntax ? check out the "destructuring assignment" in the JavaScript documentation
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Destructuring_assignment

let {
nose,
leftEye,
rightEye,
leftEar,
rightEar,
leftShoulder,
rightShoulder,
leftElbow,
rightElbow,
leftWrist,
rightWrist,
leftHip,
rightHip,
leftKnee,
rightKnee,
leftAnkle,
rightAnkle,
} = person;

background(220);

// display a video or image
Expand All @@ -48,7 +72,7 @@ function draw() {
noStroke();
fill("red");
fill(255, 255, 0);
circle(person.nose.x, person.nose.y, 50);
circle(nose.x, nose.y, 50);

fill(255, 0, 0);
keypoints.forEach((keypoint) => {
Expand Down
31 changes: 31 additions & 0 deletions mediaPipe/faceLandmarks/index.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
<!DOCTYPE html>
<html lang="en">
<head>
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.8.0/p5.js"></script>
<link rel="stylesheet" type="text/css" href="style.css" />
<meta charset="utf-8" />
<script type="module">
import { mediaPipe } from "./mediaPipe.js";
// this simple script is used to import the functions from mediaPipe.js
// rather than heavily modify mediaPipe.js to work with p5.js
// this is method to expose mediaPipe.js functions to the global scope

// A single object called "mediaPipe" is put into global scope

// within the "mediaPipe" object we can access all the predictions as follows:
// mediaPipe.predictWebcam() <- pass this your video
// mediaPipe.handednesses <- right/left handedness
// mediaPipe.landmarks <- 3d landmarks
// mediaPipe.worldLandmarks <- 3d landmarks in world coordinates

// make mediaPipe available everywhere
window.mediaPipe = mediaPipe;
</script>
</head>
<body>
<main>
<ul id="predictions"></ul>
</main>
<script src="sketch.js"></script>
</body>
</html>
72 changes: 72 additions & 0 deletions mediaPipe/faceLandmarks/mediaPipe.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import {
FaceLandmarker,
FilesetResolver,
} from "https://cdn.skypack.dev/@mediapipe/[email protected]";

// make an object to export
// at the end of the file this has the predictWebCam function added
// it is then exported for use in the sketch.js file
const mediaPipe = {
faceLandmarks: [],
faceBlendshapes: [],
parts: [],
};

let faceLandmarker;
let runningMode = "IMAGE";
// let video = null;
let lastVideoTime = -1;

// Before we can use PoseLandmarker class we must wait for it to finish
// loading. Machine Learning models can be large and take a moment to
// get everything needed to run.
const createFaceLandmarker = async () => {
const vision = await FilesetResolver.forVisionTasks(
"https://cdn.jsdelivr.net/npm/@mediapipe/[email protected]/wasm"
);
faceLandmarker = await FaceLandmarker.createFromOptions(vision, {
baseOptions: {
modelAssetPath: `https://storage.googleapis.com/mediapipe-models/face_landmarker/face_landmarker/float16/1/face_landmarker.task`,
delegate: "GPU",
},
outputFaceBlendshapes: true,
runningMode,
numFaces: 1,
});
};
createFaceLandmarker();

const predictWebcam = async (video) => {
// Now let's start detecting the stream.
let startTimeMs = performance.now();

if (lastVideoTime !== video.elt.currentTime && faceLandmarker) {
lastVideoTime = video.elt.currentTime;
let results = faceLandmarker.detect(video.elt, startTimeMs);
mediaPipe.faceLandmarks = results.faceLandmarks;
mediaPipe.faceBlendshapes = results.faceBlendshapes;
mediaPipe.parts = {
tesselation: FaceLandmarker.FACE_LANDMARKS_TESSELATION,
rightEye: FaceLandmarker.FACE_LANDMARKS_RIGHT_EYE,
leftEye: FaceLandmarker.FACE_LANDMARKS_LEFT_EYE,
rightEyebrow: FaceLandmarker.FACE_LANDMARKS_RIGHT_EYEBROW,
leftEyebrow: FaceLandmarker.FACE_LANDMARKS_LEFT_EYEBROW,
faceOval: FaceLandmarker.FACE_LANDMARKS_FACE_OVAL,
lips: FaceLandmarker.FACE_LANDMARKS_LIPS,
rightIris: FaceLandmarker.FACE_LANDMARKS_RIGHT_IRIS,
leftIris: FaceLandmarker.FACE_LANDMARKS_LEFT_IRIS,
};
}

// Call this function again to keep predicting when the browser is ready.
window.requestAnimationFrame(() => {
predictWebcam(video);
});
};

// add the predictWebcam function to the mediaPipe object
mediaPipe.predictWebcam = predictWebcam;

// export for use in sketch.js via an inline import script
// see the html file for more
export { mediaPipe };
Loading

0 comments on commit 8618c52

Please sign in to comment.