status code 400 when using Clarifai's FACE_DETECT_MODEL - reactjs

i am using Clarifai's Api to detect faces in an image it was working fine and i deployed it to github pages. after some time it stopped working and started giving me status code 400 and status code 10020 at the network tab although i am using the correct image format that Clarifai wants which is base64. at the same time my app uses the Clarifai's apparels detection model which works perfectly fine.
below is the relevant code:
import React from 'react';
import Clarifai from 'clarifai';
import { connect } from 'react-redux';
import { setFaceBoundary, setApparelBoundary, numberOfFaces, setBoundingBox, setApparelsInfo, setWithSpinner } from '../../redux/box/box.actions';
import { setImageDimensions } from '../../redux/image/image.actions.js';
import './models-options.styles.css';
const app = new Clarifai.App({
apiKey: 'MY_API_KEY'
});
const ModelsOptions = ({ setFaceBoundary, setApparelBoundary, fileProperties, numberOfFaces, setBoundingBox, setApparelsInfo, setWithSpinner, setImageDimensions })=> {
const calculateApparel = (data) => {
const conceptsArray = data.outputs[0].data.regions.map(concepts => concepts.data.concepts);
setApparelsInfo(conceptsArray)
const outputs = data.outputs[0].data.regions.map(apparels => apparels.region_info.bounding_box);
console.log(outputs);
setBoundingBox(outputs)
const image = document.getElementById("inputImage");
console.log('image dimensions' ,image.naturalWidth, image.naturalHeight);
const width = image.naturalWidth;
const height = image.naturalHeight;
const apparelsLoaction = outputs.map(apparel => {
return {
leftCol: apparel.left_col * width,
topRow: apparel.top_row * height,
rightCol: width - apparel.right_col * width,
bottomRow: height - apparel.bottom_row * height
}
});
return apparelsLoaction;
}
const calculateFace = (data) => {
const faceNumber = data.outputs[0].data.regions.length;
numberOfFaces(faceNumber);
const outputs = data.outputs[0].data.regions.map((faces) => faces.region_info.bounding_box);
setBoundingBox(outputs);
const image = document.getElementById("inputImage");
const width = image.clientWidth;
const height = image.clientHeight;
const faceCordinates = outputs.map((face) => {
return {
leftCol: face.left_col * width,
topRow: face.top_row * height,
rightCol: width - face.right_col * width,
bottomRow: height - face.bottom_row * height,
}
});
return faceCordinates;
}
const detectFace = () => {
setWithSpinner(true)
app.models.predict(Clarifai.FACE_DETECT_MODEL, {base64: fileProperties}).then(
(response) => {
setFaceBoundary(calculateFace(response));
setWithSpinner(false)
},
(err) => {
console.log('There was an error', err);
}
);
setApparelsInfo({});
setApparelBoundary({});
}
const detectApparels = () => {
setWithSpinner(true)
app.models.predict('72c523807f93e18b431676fb9a58e6ad', {base64: fileProperties}).then(
(response) => {
console.log('response at the models',response)
setApparelBoundary(calculateApparel(response));
setWithSpinner(false)
},
(err) => {
console.log('There was an error', err);
}
);
setFaceBoundary({});
numberOfFaces(0)
}
return (
<div className="models-button">
<button onClick={detectFace}>Detect Face</button>
<button onClick={detectApparels}>Detect Apparels</button>
</div>
);
};
const mapStateToProps = ({image: {fileProperties}}) => ({
fileProperties
})
const mapDispatchToProps = dispatch => ({
setFaceBoundary: (facePostion) => dispatch(setFaceBoundary(facePostion)),
setApparelBoundary: (apparelPosition) => dispatch(setApparelBoundary(apparelPosition)),
numberOfFaces: (number) => dispatch(numberOfFaces(number)),
setApparelsInfo: (number) => dispatch(setApparelsInfo(number)),
setBoundingBox: (bounding) => dispatch(setBoundingBox(bounding)),
setWithSpinner: (spinner) => dispatch(setWithSpinner(spinner)),
setImageDimensions: (dimensions) => dispatch(setImageDimensions(dimensions)),
})
export default connect(mapStateToProps, mapDispatchToProps)(ModelsOptions);
here is a link to the webApp if it might help: https://abdullahgumi.github.io/smart-box/
any idea on how to solve this would be much appreciated. Thanks

Looks like there was an internal issue that now should be resolved. The webApp you've linked to is now working.
There is also a status page for the models at Clarifai Model Status Page which might be helpful, although in this case it was not reflecting the status of that model accurately unfortunately.

Related

Tensorflow.js Model Prediction Output Never Changes

I have a custom trained tensorflow.js graph model (link can be found in the getModel method) to recognize images of fly agarics. However, the prediction output (result from model.executeAsync()) never changes - this is evident in the console.log in the renderPredictions method.
Greatly appreciate any help.
webcamView.tsx
import { useEffect, useRef, useState } from "react";
import * as tf from '#tensorflow/tfjs-core';
import { loadGraphModel } from "#tensorflow/tfjs-converter";
import '#tensorflow/tfjs-backend-cpu';
export default function WebcamView() {
const videoWidth = 640;
const videoHeight = 500;
const videoRef = useRef<HTMLVideoElement>(null);
const [loading, setLoading] = useState<Boolean>(true);
let model: any = undefined;
const getModel = async () => {
model = await loadGraphModel('https://raw.githubusercontent.com/AlanChen4/FungEye/main/data/web_model_old/model.json');
return model;
};
const startVideo = async () => {
navigator.mediaDevices.getUserMedia({
video: {
facingMode: 'environment',
}
}).then(stream => {
if (videoRef.current !== null) {
const video = videoRef.current;
video.srcObject = stream;
video.onloadedmetadata = () => {
video.play();
video.addEventListener('loadeddata', processVideoInput);
}
}
})
};
const renderPredictions = (predictions: any) => {
const predictionBoxes = predictions[6].dataSync();
const predictionClasses = predictions[2].dataSync();
const predictionScores = predictions[4].dataSync();
// this always prints the same scores
console.log(predictionScores);
};
const processVideoInput = () => {
// classify the frame in the video stream and then repeat to process the next frame
if (videoRef.current !== null) {
classifyVideoInput(videoRef.current).then(() => {
window.requestAnimationFrame(processVideoInput);
})
}
}
const classifyVideoInput = async (videoImage: HTMLVideoElement) => {
// get next video frame
await tf.nextFrame();
// convert tensor to image
const tfImage = tf.browser.fromPixels(videoImage);
// convert image to smaller image in order to match detection size
const smallerImage = tf.image.resizeBilinear(tfImage, [videoHeight, videoWidth]);
// convert smaller image to format usable by model
const resizedImage = tf.cast(smallerImage, 'int32');
let tf4d_ = tf.tensor4d(Array.from(resizedImage.dataSync()), [1, videoHeight, videoWidth, 3]);
const tf4d = tf.cast(tf4d_, 'int32');
// generate predictions from model
let predictions = await model.executeAsync(tf4d);
renderPredictions(predictions);
tfImage.dispose();
smallerImage.dispose();
resizedImage.dispose();
tf4d.dispose();
};
useEffect(() => {
const start = async () => {
await getModel();
await startVideo();
setLoading(false);
};
start();
}, []);
return (
<div className="p-8">
<div className="flex justify-center p-5">
<div id="videoView" style={{ cursor: 'pointer' }}>
{loading
? <p className="text-center text-semibold p-5">Loading Model...</p>
: <video ref={videoRef} playsInline autoPlay muted width={videoWidth} height={videoHeight}></video>
}
</div>
</div>
</div>
);
};
I have checked to make sure that video processing only occurs after the video has loaded. I've checked numerous threads and posts but can not seem to find the cause of this issue. I am using tensorflow.js 3.1.0.

Loading a GLTF or GLB into React Native GL

I have been attempting to load a simple model into my React Native App, I have craeted a model in Blender and exported as GLTF and GLB. In my react native app I'm using expo-gl and threejs to load and render the scene. This is my page code:
import React, { useEffect } from 'react'
import { GLView } from 'expo-gl'
import { Renderer } from 'expo-three'
import * as THREE from 'three'
import OrbitControlsView from 'expo-three-orbit-controls'
import { GLTFLoader } from 'three/examples/jsm/loaders/GLTFLoader'
import { storage } from '../services/firebase'
import { ref, getDownloadURL } from 'firebase/storage'
class SphereMesh extends THREE.Mesh {
constructor() {
super(
new THREE.BoxGeometry(1, 1, 1),
new THREE.MeshBasicMaterial({ color: 'red' })
)
}
}
export function ThreeComponent() {
// Animmation clock
const clock = new THREE.Clock()
const [camera, setCamera] = React.useState<THREE.Camera | null>(null)
async function loadModel() {
getDownloadURL(ref(storage, 'niltontest/mymodel.gltf'))
.then(async (url) => {
// let blob = await fetch(url).then((r) => r.blob())
// GLTF loader
const gltfLoader = new GLTFLoader()
// await gltfLoader.parse(
// 'file:///Users/niltonsf/Desktop/react-native-3d/assets/mymodel.gltf',
// '',
// (gltf) => {
// console.log('here', gltf)
// },
// () => {
// console.log('fail')
// }
// )
gltfLoader.load(
// resource URL
url,
// called when the resource is loaded
function (gltf) {
console.log('sc')
},
// called while loading is progressing
function (xhr) {
console.log((xhr.loaded / xhr.total) * 100 + '% loaded')
},
// called when loading has errors
function (error) {
console.log('An error happened', error)
}
)
})
.catch((error) => {
// Handle any errors
})
}
// Mesh
const cube = new SphereMesh()
const tick = () => {
window.requestAnimationFrame(tick)
}
useEffect(() => {
loadModel()
}, [])
return (
<OrbitControlsView style={{ flex: 1 }} camera={camera}>
<GLView
style={{ flex: 1 }}
onContextCreate={async (gl) => {
// GL Parameter disruption
const {
drawingBufferWidth: width,
drawingBufferHeight: height
} = gl
// Scene
const scene = new THREE.Scene()
// Geometry
scene.add(cube)
// Camera
const camera = new THREE.PerspectiveCamera(
75,
width / height,
1,
1000
)
camera.position.z = 2
camera.position.x = 2
camera.position.y = 2
camera.lookAt(cube.position)
setCamera(camera)
scene.add(camera)
// Renderer
const renderer = new Renderer({ gl })
renderer.setSize(width, height)
renderer.setClearColor('#fff')
// Render function
const render = () => {
requestAnimationFrame(render)
renderer.render(scene, camera)
gl.endFrameEXP()
}
render()
}}
/>
</OrbitControlsView>
)
}
I have attemtpted to un a model from Firebase Storage and a local one. But either of them say the same error message, which is:
An error happened [Error: FileReader.readAsArrayBuffer is not implemented]

React native custom hook return null in the first time using react-native-geolocation-services custom

I am trying to create a custom hook to get user's current location. I am using react-native-geolocation-services.
It returns null for the first time.
However, when I try to re-run the app. The geo data shows again.
Is this issue happening in asyn data?
Am I wrongly implemented the usestate so that the data didn't show in the first time?
Map component
import {useCurrentLocation} from '../queries/getCurrentLocation';
const Map = () => {
const {coordinate, watchError} = useCurrentLocation();
console.log('data',coordinate)
return <View style={styles.container}><MapView /></View>;
};
Custome Hook
import React, {useRef, useState, useEffect } from 'react';
import Geolocation, {watchPosition} from 'react-native-geolocation-service';
import useLocationPermission from '../hooks/useLocationPermission';
export const useCurrentLocation = () => {
const [coordinate, setCoordinate] = useState(null);
const [watchError, setWatchError] = useState(null);
const watchId = useRef(null);
const {hasPermission, hasPermissionError} = useLocationPermission();
const startWatch = () => {
if (!hasPermission) return;
watchId.current = Geolocation.watchPosition(
position => {
const latitude = position.coords.latitude;
const longitude = position.coords.longitude;
const speed = position.coords.speed;
setCoordinate({latitude, longitude, speed});
},
error => {
setWatchError(error);
},
{
accuracy: {
android: 'high',
//TODO config to ios
//ios: 'best',
},
enableHighAccuracy: true,
distanceFilter: 0,
interval: 20000,
fastestInterval: 2000,
},
);
};
const stopWatch = () => {
if (watchId.current == null) return;
Geolocation.clearWatch(watchId.current);
watchId.current = null;
};
useEffect(() => {
if (hasPermission) {
getCurrentCoordinate(coordinate);
}
startWatch();
return () => {
stopWatch();
};
}, [coordinate]);
return {coordinate, watchError};
};
const getCurrentCoordinate = coordinate => {
Geolocation.getCurrentPosition(position => {
coordinate = position;
});
return coordinate;
};

Removing d3-flame-graph when loading new data

Recently I picked up a project that has d3-flame-graph on it and the graph is displayed according to the Filters defined on another component.
My issue is that when searching with new parameters I can't seem to clean the previous chart and I was wondering if someone could help me. Basically what I'm having right now is, when I first enter the page, the loading component, then I have my graph and when I search for a new date I have the loading component but on top of that I still have the previous graph
I figured I could use flamegraph().destroy() on const updateGraph but nothing is happening
import React, { FC, useEffect, useRef, useState, useCallback } from 'react'
import { useParams } from 'react-router-dom'
import moment from 'moment'
import * as d3 from 'd3'
import { flamegraph } from 'd3-flame-graph'
import Filters, { Filter } from '../../../../../../components/Filters'
import { getFlamegraph } from '../../../../../../services/flamegraph'
import { useQueryFilter } from '../../../../../../hooks/filters'
import FlamegraphPlaceholder from '../../../../../../components/Placeholders/Flamegraph'
import css from './flamegraph.module.css'
import ToastContainer, {
useToastContainerMessage,
} from '../../../../../../components/ToastContainer'
const defaultFilters = {
startDate: moment().subtract(1, 'month'),
endDate: moment(),
text: '',
limit: 10,
}
const getOffSet = (divElement: HTMLDivElement | null) => {
if (divElement !== null) {
const padding = 100
const minGraphHeight = 450
// ensure that the graph has a min height
return Math.max(
window.innerHeight - divElement.offsetTop - padding,
minGraphHeight
)
} else {
const fallBackNavigationHeight = 300
return window.innerHeight - fallBackNavigationHeight
}
}
const Flamegraph: FC = () => {
const [queryFilters, setQueryFilters] = useQueryFilter(defaultFilters)
const [fetching, setFetching] = useState(false)
const [graphData, setGraphData] = useState()
const {
messages: toastMessages,
addMessage: addMessageToContainer,
removeMessage: removeMessageFromContainer,
} = useToastContainerMessage()
const flameContainerRef = useRef<HTMLDivElement | null>(null)
const flameRef = useRef<HTMLDivElement | null>(null)
const graphRef = useRef<any>()
const graphDataRef = useRef<any>()
const timerRef = useRef<any>()
const { projectId, functionId } = useParams()
let [sourceId, sourceLine] = ['', '']
if (functionId) {
;[sourceId, sourceLine] = functionId.split(':')
}
const createGraph = () => {
if (flameContainerRef.current && flameRef.current) {
graphRef.current = flamegraph()
.width(flameContainerRef.current.offsetWidth)
.height(getOffSet(flameRef.current))
.cellHeight(30)
.tooltip(false)
.setColorMapper(function(d, originalColor) {
// Scale green component proportionally to box width (=> the wider the redder)
let greenHex = (192 - Math.round((d.x1 - d.x0) * 128)).toString(16)
return '#FF' + ('0' + greenHex).slice(-2) + '00'
})
}
}
const updateGraph = (newData: any) => {
setGraphData(newData)
graphDataRef.current = newData
if (graphRef.current) {
if (newData === null) {
graphRef.current.destroy()
graphRef.current = null
} else {
d3.select(flameRef.current)
.datum(newData)
.call(graphRef.current)
}
}
}
const fetchGraph = (filters: Filter) => {
setFetching(true)
getFlamegraph(
Number(projectId),
filters.startDate ? filters.startDate.unix() : 0,
filters.endDate ? filters.endDate.unix() : 0,
sourceId,
sourceLine
)
.then(graphData => {
if (!graphRef.current) {
createGraph()
}
updateGraph(graphData)
})
.catch(({ response }) => {
updateGraph(null)
if (response.data) {
addMessageToContainer(response.data.message, true)
}
})
.finally(() => {
setFetching(false)
})
}
const onResize = useCallback(() => {
clearTimeout(timerRef.current)
timerRef.current = setTimeout(() => {
if (graphRef.current && flameContainerRef.current) {
graphRef.current.width(flameContainerRef.current.offsetWidth)
d3.select(flameRef.current)
.datum(graphDataRef.current)
.call(graphRef.current)
}
}, 500)
}, [])
useEffect(() => {
fetchGraph(queryFilters)
window.addEventListener('resize', onResize)
return () => {
window.removeEventListener('resize', onResize)
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
const onChangeFilters = (filters: Filter) => {
setQueryFilters(filters)
fetchGraph(filters)
}
return (
<div className={css.host}>
<Filters
defaultValues={queryFilters}
searching={fetching}
onSearch={onChangeFilters}
/>
<div className={css.flameBox}>
<div className={css.flameContainer} ref={flameContainerRef}>
<div ref={flameRef} />
</div>
{fetching || !graphData ? (
<FlamegraphPlaceholder loading={fetching} />
) : null}
</div>
<ToastContainer
messages={toastMessages}
toastDismissed={removeMessageFromContainer}
/>
</div>
)
}
export default Flamegraph
Firstly, flamegraph() creates a new instance of flamegraph, you'd need to use graphref.current.destroy(). Secondly, you'd want to destroy this not when the data has already been loaded, but just as it starts to load, right? Because that's the operation that takes time.
Consider the following:
const cleanGraph = () => {
if (graphref.current !== undefined) {
graphref.current.destroy()
}
}
const fetchGraph = (filters: Filter) => {
setFetching(true)
cleanGraph()
getFlamegraph(
Number(projectId),
filters.startDate ? filters.startDate.unix() : 0,
filters.endDate ? filters.endDate.unix() : 0,
sourceId,
sourceLine
)
...
}

How do I clear out an Image.getSize request when unmounting a React Native component?

I have a React Native component that makes an Image.getSize request for each image in the component. Then within the callback of the Image.getSize requests, I set some state for my component. That all works fine, but the problem is that it's possible for a user to transition away from the screen the component is used on before one or more Image.getSize requests respond, which then causes the "no-op memory leak" error to pop up because I'm trying to change state after the component has been unmounted.
So my question is: How can I stop the Image.getSize request from trying to modify state after the component is unmounted? Here's a simplified version of my component code. Thank you.
const imgWidth = 300; // Not actually static in the component, but doesn't matter.
const SomeComponent = (props) => {
const [arr, setArr] = useState(props.someData);
const setImgDimens = (arr) => {
arr.forEach((arrItem, i) => {
if (arrItem.imgPath) {
const uri = `/path/to/${arrItem.imgPath}`;
Image.getSize(uri, (width, height) => {
setArr((currArr) => {
const newWidth = imgWidth;
const ratio = newWidth / width;
const newHeight = ratio * height;
currArr = currArr.map((arrItem, idx) => {
if (idx === i) {
arrItem.width = newWidth;
arrItem.height = newHeight;
}
return arrItem;
});
return currArr;
});
});
}
});
};
useEffect(() => {
setImgDimens(arr);
return () => {
// Do I need to do something here?!
};
}, []);
return (
<FlatList
data={arr}
keyExtractor={(arrItem) => arrItem.id.toString()}
renderItem={({ item }) => {
return (
<View>
{ item.imgPath ?
<Image
source={{ uri: `/path/to/${arrItem.imgPath}` }}
/>
:
null
}
</View>
);
}}
/>
);
};
export default SomeComponent;
I had to implement something similar, I start by initialising a variable called isMounted.
It sets to true when the component mounts and false to when the component will unmount.
Before calling setImgDimens there's a check to see if the component is mounted. If not, it won't call the function and thus will not update state.
const SomeComponent = (props) => {
const isMounted = React.createRef(null);
useEffect(() => {
// Component has mounted
isMounted.current = true;
if(isMounted.current) {
setImgDimens(arr);
}
return () => {
// Component will unmount
isMounted.current = false;
}
}, []);
}
Edit: This is the answer that worked for me, but for what it's worth, I had to move the isMounted variable to outside the SomeComponent function for it to work. Also, you can just use a regular variable instead of createRef to create a reference, etc.
Basically, the following worked for me:
let isMounted;
const SomeComponent = (props) => {
const setImgDimens = (arr) => {
arr.forEach((arrItem, i) => {
if (arrItem.imgPath) {
const uri = `/path/to/${arrItem.imgPath}`;
Image.getSize(uri, (width, height) => {
if (isMounted) { // Added this check.
setArr((currArr) => {
const newWidth = imgWidth;
const ratio = newWidth / width;
const newHeight = ratio * height;
currArr = currArr.map((arrItem, idx) => {
if (idx === i) {
arrItem.width = newWidth;
arrItem.height = newHeight;
}
return arrItem;
});
return currArr;
});
}
});
}
});
};
useEffect(() => {
isMounted = true;
setImgDimens(arr);
return () => {
isMounted = false;
}
}, []);
};

Resources