homebridge-face-location/scripts/streamAndDetect.ts

67 lines
1.9 KiB
TypeScript
Raw Normal View History

import { Rtsp } from "rtsp-stream/lib";
import { FaceMatcher, nets } from "@vladmandic/face-api";
import * as faceapi from "@vladmandic/face-api";
import canvas from "canvas";
import fs from "fs";
import * as path from "path";
import dotenv from "dotenv-extended";
import { getFaceDetectorOptions, saveFile } from "../src/common";
require("@tensorflow/tfjs-node");
const { Canvas, Image, ImageData } = canvas;
//@ts-ignore
faceapi.env.monkeyPatch({ Canvas, Image, ImageData });
const main = async () => {
dotenv.load({
silent: false,
errorOnMissing: true,
});
const modelDir = process.env.TRAINED_MODEL_DIR as string;
const rtsp = new Rtsp("rtsp://brandon:asdf1234@192.168.1.229/live", {
rate: 10,
});
const faceDetectionNet = nets.ssdMobilenetv1;
await faceDetectionNet.loadFromDisk(path.join(__dirname, "../weights"));
await nets.faceLandmark68Net.loadFromDisk(path.join(__dirname, "../weights"));
await nets.faceRecognitionNet.loadFromDisk(
path.join(__dirname, "../weights")
);
const files = fs.readdirSync(modelDir);
const matchers: Array<FaceMatcher> = [];
for (const file of files) {
const raw = fs.readFileSync(path.join(modelDir, file), "utf-8");
const content = JSON.parse(raw);
matchers.push(FaceMatcher.fromJSON(content));
}
rtsp.on("data", async (data: Buffer) => {
const img = new Image();
img.src = data.toString("base64");
const input = await canvas.loadImage(data, "base64");
const resultsQuery = await faceapi
.detectAllFaces(input, getFaceDetectorOptions(faceDetectionNet))
.withFaceLandmarks()
.withFaceDescriptors();
for (const res of resultsQuery) {
for (const matcher of matchers) {
const bestMatch = matcher.findBestMatch(res.descriptor);
console.log(bestMatch.label);
}
}
});
rtsp.on("error", (err) => {
console.log(err);
});
rtsp.start();
};
main();