import { Rtsp, IStreamEventArgs } from "../src/rtsp/rtsp"; import { nets } from "@vladmandic/face-api"; import * as faceapi from "@vladmandic/face-api"; import canvas from "canvas"; import fs from "fs"; import * as path from "path"; import dotenv from "dotenv-extended"; import { delay, getFaceDetectorOptions, saveFile } from "../src/common"; require("@tensorflow/tfjs-node"); const { Canvas, Image, ImageData } = canvas; //@ts-ignore faceapi.env.monkeyPatch({ Canvas, Image, ImageData }); const main = async () => { dotenv.load({ silent: false, errorOnMissing: true, }); const modelDir = process.env.TRAINED_MODEL_DIR as string; const rtsp = new Rtsp("rtsp://brandon:asdf1234@192.168.1.229/live", { rate: 0.5, image: true, codec: "copy", }); const faceDetectionNet = nets.ssdMobilenetv1; await faceDetectionNet.loadFromDisk(path.join(__dirname, "../weights")); await nets.faceLandmark68Net.loadFromDisk(path.join(__dirname, "../weights")); await nets.faceRecognitionNet.loadFromDisk( path.join(__dirname, "../weights") ); const raw = fs.readFileSync(path.join(modelDir, "data.json"), "utf-8"); const content = JSON.parse(raw); const matcher = faceapi.FaceMatcher.fromJSON(content); rtsp.dataEvent.push(async (sender: Rtsp, args: IStreamEventArgs) => { const input = ((await canvas.loadImage(args.data)) as unknown) as ImageData; const out = faceapi.createCanvasFromMedia(input); await saveFile(process.env.OUT_DIR as string, "image.jpg", args.data); const resultsQuery = await faceapi .detectAllFaces(out, getFaceDetectorOptions(faceDetectionNet)) .withFaceLandmarks() .withFaceDescriptors(); for (const res of resultsQuery) { const bestMatch = matcher.matchDescriptor(res.descriptor); console.log("Face Detected: " + bestMatch.label); } }); rtsp.start(); }; main();