homebridge-face-location/scripts/streamAndDetect.ts
2020-11-27 01:15:46 -05:00

62 lines
1.8 KiB
TypeScript

import { Rtsp } from "rtsp-stream/lib";
import { nets } from "@vladmandic/face-api";
import * as faceapi from "@vladmandic/face-api";
import canvas from "canvas";
import fs from "fs";
import * as path from "path";
import dotenv from "dotenv-extended";
import { getFaceDetectorOptions, saveFile } from "../src/common";
require("@tensorflow/tfjs-node");
const { Canvas, Image, ImageData } = canvas;
//@ts-ignore
faceapi.env.monkeyPatch({ Canvas, Image, ImageData });
const main = async () => {
dotenv.load({
silent: false,
errorOnMissing: true,
});
const modelDir = process.env.TRAINED_MODEL_DIR as string;
const rtsp = new Rtsp("rtsp://brandon:asdf1234@192.168.1.229/live", {
rate: 10,
});
const faceDetectionNet = nets.ssdMobilenetv1;
await faceDetectionNet.loadFromDisk(path.join(__dirname, "../weights"));
await nets.faceLandmark68Net.loadFromDisk(path.join(__dirname, "../weights"));
await nets.faceRecognitionNet.loadFromDisk(
path.join(__dirname, "../weights")
);
const raw = fs.readFileSync(path.join(modelDir, "data.json"), "utf-8");
const content = JSON.parse(raw);
const matcher = faceapi.FaceMatcher.fromJSON(content);
rtsp.on("data", async (data: Buffer) => {
const input = ((await canvas.loadImage(data)) as unknown) as ImageData;
const out = faceapi.createCanvasFromMedia(input);
// fs.writeFileSync(path.join(__dirname, "image.jpg"), data, "base64");
const resultsQuery = await faceapi
.detectAllFaces(out, getFaceDetectorOptions(faceDetectionNet))
.withFaceLandmarks()
.withFaceDescriptors();
for (const res of resultsQuery) {
const bestMatch = matcher.findBestMatch(res.descriptor);
console.log(bestMatch.label);
}
});
rtsp.on("error", (err) => {
console.log(err);
});
rtsp.start();
};
main();