I am following this tutorial creating a Tensorflow.js object detection system.
The full code is also available here. The App.js code:
// Import dependencies
import React, { useRef, useState, useEffect } from "react";
import * as tf from "@tensorflow/tfjs";
// 1. TODO - Import required model here
// e.g. import * as tfmodel from "@tensorflow-models/tfmodel";
import * as cocossd from "@tensorflow-models/coco-ssd";
import Webcam from "react-webcam";
import "./App.css";
// 2. TODO - Import drawing utility here
import { drawRect } from "./utilities";
function App() {
const webcamRef = useRef(null);
const canvasRef = useRef(null);
// Main function
const runCoco = async () => {
// 3. TODO - Load network
const net = await cocossd.load();
// Loop and detect hands
setInterval(() => {
detect(net);
}, 10);
};
const detect = async (net) => {
// Check data is available
if (
typeof webcamRef.current !== "undefined" &&
webcamRef.current !== null &&
webcamRef.current.video.readyState === 4
) {
// Get Video Properties
const video = webcamRef.current.video;
const videoWidth = webcamRef.current.video.videoWidth;
const videoHeight = webcamRef.current.video.videoHeight;
// Set video width
webcamRef.current.video.width = videoWidth;
webcamRef.current.video.height = videoHeight;
// Set canvas height and width
canvasRef.current.width = videoWidth;
canvasRef.current.height = videoHeight;
// 4. TODO - Make Detections
const obj = await net.detect(video);
console.log(obj);
// Draw mesh
const ctx = canvasRef.current.getContext("2d");
// 5. TODO - Update drawing utility
// drawSomething(obj, ctx)
drawRect(obj,ctx);
}
};
useEffect(()=>{runCoco()},[]);
return (
<div className="App">
<header className="App-header">
<Webcam
ref={webcamRef}
muted={true}
style={{
position: "absolute",
marginLeft: "auto",
marginRight: "auto",
left: 0,
right: 0,
textAlign: "center",
zindex: 9,
width: 640,
height: 480,
}}
/>
<canvas
ref={canvasRef}
style={{
position: "absolute",
marginLeft: "auto",
marginRight: "auto",
left: 0,
right: 0,
textAlign: "center",
zindex: 8,
width: 640,
height: 480,
}}
/>
</header>
</div>
);
}
export default App;
And the utilities.js
export const drawRect=(detections,ctx)=>{
detections.forEach(prediction => {
const[x,y,width,height] = prediction['bbox'];
const text=prediction['class'];
const color='white'
ctx.strokeSylt=color
ctx.font='18px Arial'
ctx.fillStyle='color'
ctx.beginPath()
ctx.fillText(text,x,y)
if(text=='person'){
ctx.fillText("A Handsome fellow",x,y+19)
}
ctx.rect(x,y,width,height)
ctx.stroke()
});
}
This works as expected in my Desktop browser, but when I accessed the app through my phone using http://mylocalIPaddress:3000, it loads the page but without any video stream.
What do I need to change so that I can access the desktop IP from my phone and get the stream from my phone camera?