Creating streamAndDetect script that will stream from rtsp source and detect persons
This commit is contained in:
66
scripts/streamAndDetect.ts
Normal file
66
scripts/streamAndDetect.ts
Normal file
@ -0,0 +1,66 @@
|
||||
import { Rtsp } from "rtsp-stream/lib";
|
||||
import { FaceMatcher, nets } from "@vladmandic/face-api";
|
||||
import * as faceapi from "@vladmandic/face-api";
|
||||
import canvas from "canvas";
|
||||
import fs from "fs";
|
||||
import * as path from "path";
|
||||
import dotenv from "dotenv-extended";
|
||||
import { getFaceDetectorOptions, saveFile } from "../src/common";
|
||||
require("@tensorflow/tfjs-node");
|
||||
|
||||
const { Canvas, Image, ImageData } = canvas;
|
||||
//@ts-ignore
|
||||
faceapi.env.monkeyPatch({ Canvas, Image, ImageData });
|
||||
|
||||
const main = async () => {
|
||||
dotenv.load({
|
||||
silent: false,
|
||||
errorOnMissing: true,
|
||||
});
|
||||
|
||||
const modelDir = process.env.TRAINED_MODEL_DIR as string;
|
||||
|
||||
const rtsp = new Rtsp("rtsp://brandon:asdf1234@192.168.1.229/live", {
|
||||
rate: 10,
|
||||
});
|
||||
const faceDetectionNet = nets.ssdMobilenetv1;
|
||||
|
||||
await faceDetectionNet.loadFromDisk(path.join(__dirname, "../weights"));
|
||||
await nets.faceLandmark68Net.loadFromDisk(path.join(__dirname, "../weights"));
|
||||
await nets.faceRecognitionNet.loadFromDisk(
|
||||
path.join(__dirname, "../weights")
|
||||
);
|
||||
|
||||
const files = fs.readdirSync(modelDir);
|
||||
const matchers: Array<FaceMatcher> = [];
|
||||
for (const file of files) {
|
||||
const raw = fs.readFileSync(path.join(modelDir, file), "utf-8");
|
||||
const content = JSON.parse(raw);
|
||||
matchers.push(FaceMatcher.fromJSON(content));
|
||||
}
|
||||
|
||||
rtsp.on("data", async (data: Buffer) => {
|
||||
const img = new Image();
|
||||
img.src = data.toString("base64");
|
||||
const input = await canvas.loadImage(data, "base64");
|
||||
const resultsQuery = await faceapi
|
||||
.detectAllFaces(input, getFaceDetectorOptions(faceDetectionNet))
|
||||
.withFaceLandmarks()
|
||||
.withFaceDescriptors();
|
||||
|
||||
for (const res of resultsQuery) {
|
||||
for (const matcher of matchers) {
|
||||
const bestMatch = matcher.findBestMatch(res.descriptor);
|
||||
console.log(bestMatch.label);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
rtsp.on("error", (err) => {
|
||||
console.log(err);
|
||||
});
|
||||
|
||||
rtsp.start();
|
||||
};
|
||||
|
||||
main();
|
@ -2,9 +2,10 @@ import * as faceapi from "@vladmandic/face-api";
|
||||
import canvas from "canvas";
|
||||
import fs, { lstatSync } from "fs";
|
||||
import * as path from "path";
|
||||
import { LabeledFaceDescriptors, TNetInput } from "@vladmandic/face-api";
|
||||
import { TNetInput } from "@vladmandic/face-api";
|
||||
import * as mime from "mime-types";
|
||||
import dotenv from "dotenv-extended";
|
||||
import { getFaceDetectorOptions } from "../src/common";
|
||||
require("@tensorflow/tfjs-node");
|
||||
|
||||
const { Canvas, Image, ImageData } = canvas;
|
||||
@ -90,19 +91,4 @@ const main = async () => {
|
||||
}
|
||||
};
|
||||
|
||||
// SsdMobilenetv1Options
|
||||
const minConfidence = 0.5;
|
||||
|
||||
// TinyFaceDetectorOptions
|
||||
const inputSize = 408;
|
||||
const scoreThreshold = 0.5;
|
||||
|
||||
function getFaceDetectorOptions(net: faceapi.NeuralNetwork<any>) {
|
||||
return net === faceapi.nets.ssdMobilenetv1
|
||||
? new faceapi.SsdMobilenetv1Options({ minConfidence })
|
||||
: new faceapi.TinyFaceDetectorOptions({ inputSize, scoreThreshold });
|
||||
}
|
||||
|
||||
const baseDir = path.resolve(__dirname, "../out");
|
||||
|
||||
main();
|
||||
|
Reference in New Issue
Block a user