I have been attempting to load a simple model into my React Native App, I have craeted a model in Blender and exported as GLTF and GLB. In my react native app I'm using expo-gl and threejs to load and render the scene. This is my page code:
import React, { useEffect } from 'react'
import { GLView } from 'expo-gl'
import { Renderer } from 'expo-three'
import * as THREE from 'three'
import OrbitControlsView from 'expo-three-orbit-controls'
import { GLTFLoader } from 'three/examples/jsm/loaders/GLTFLoader'
import { storage } from '../services/firebase'
import { ref, getDownloadURL } from 'firebase/storage'
class SphereMesh extends THREE.Mesh {
constructor() {
super(
new THREE.BoxGeometry(1, 1, 1),
new THREE.MeshBasicMaterial({ color: 'red' })
)
}
}
export function ThreeComponent() {
// Animmation clock
const clock = new THREE.Clock()
const [camera, setCamera] = React.useState<THREE.Camera | null>(null)
async function loadModel() {
getDownloadURL(ref(storage, 'niltontest/mymodel.gltf'))
.then(async (url) => {
// let blob = await fetch(url).then((r) => r.blob())
// GLTF loader
const gltfLoader = new GLTFLoader()
// await gltfLoader.parse(
// 'file:///Users/niltonsf/Desktop/react-native-3d/assets/mymodel.gltf',
// '',
// (gltf) => {
// console.log('here', gltf)
// },
// () => {
// console.log('fail')
// }
// )
gltfLoader.load(
// resource URL
url,
// called when the resource is loaded
function (gltf) {
console.log('sc')
},
// called while loading is progressing
function (xhr) {
console.log((xhr.loaded / xhr.total) * 100 + '% loaded')
},
// called when loading has errors
function (error) {
console.log('An error happened', error)
}
)
})
.catch((error) => {
// Handle any errors
})
}
// Mesh
const cube = new SphereMesh()
const tick = () => {
window.requestAnimationFrame(tick)
}
useEffect(() => {
loadModel()
}, [])
return (
<OrbitControlsView style={{ flex: 1 }} camera={camera}>
<GLView
style={{ flex: 1 }}
onContextCreate={async (gl) => {
// GL Parameter disruption
const {
drawingBufferWidth: width,
drawingBufferHeight: height
} = gl
// Scene
const scene = new THREE.Scene()
// Geometry
scene.add(cube)
// Camera
const camera = new THREE.PerspectiveCamera(
75,
width / height,
1,
1000
)
camera.position.z = 2
camera.position.x = 2
camera.position.y = 2
camera.lookAt(cube.position)
setCamera(camera)
scene.add(camera)
// Renderer
const renderer = new Renderer({ gl })
renderer.setSize(width, height)
renderer.setClearColor('#fff')
// Render function
const render = () => {
requestAnimationFrame(render)
renderer.render(scene, camera)
gl.endFrameEXP()
}
render()
}}
/>
</OrbitControlsView>
)
}
I have attemtpted to un a model from Firebase Storage and a local one. But either of them say the same error message, which is:
An error happened [Error: FileReader.readAsArrayBuffer is not implemented]
Related
I am trying to create a custom hook to get user's current location. I am using react-native-geolocation-services.
It returns null for the first time.
However, when I try to re-run the app. The geo data shows again.
Is this issue happening in asyn data?
Am I wrongly implemented the usestate so that the data didn't show in the first time?
Map component
import {useCurrentLocation} from '../queries/getCurrentLocation';
const Map = () => {
const {coordinate, watchError} = useCurrentLocation();
console.log('data',coordinate)
return <View style={styles.container}><MapView /></View>;
};
Custome Hook
import React, {useRef, useState, useEffect } from 'react';
import Geolocation, {watchPosition} from 'react-native-geolocation-service';
import useLocationPermission from '../hooks/useLocationPermission';
export const useCurrentLocation = () => {
const [coordinate, setCoordinate] = useState(null);
const [watchError, setWatchError] = useState(null);
const watchId = useRef(null);
const {hasPermission, hasPermissionError} = useLocationPermission();
const startWatch = () => {
if (!hasPermission) return;
watchId.current = Geolocation.watchPosition(
position => {
const latitude = position.coords.latitude;
const longitude = position.coords.longitude;
const speed = position.coords.speed;
setCoordinate({latitude, longitude, speed});
},
error => {
setWatchError(error);
},
{
accuracy: {
android: 'high',
//TODO config to ios
//ios: 'best',
},
enableHighAccuracy: true,
distanceFilter: 0,
interval: 20000,
fastestInterval: 2000,
},
);
};
const stopWatch = () => {
if (watchId.current == null) return;
Geolocation.clearWatch(watchId.current);
watchId.current = null;
};
useEffect(() => {
if (hasPermission) {
getCurrentCoordinate(coordinate);
}
startWatch();
return () => {
stopWatch();
};
}, [coordinate]);
return {coordinate, watchError};
};
const getCurrentCoordinate = coordinate => {
Geolocation.getCurrentPosition(position => {
coordinate = position;
});
return coordinate;
};
I am trying to turnoff camera and flashlight when the component gets unmount , I am using react hook I have two function startCamera() and stopCamera() , I am calling startcamera when the component gets mount and stop camera when component gets unmount.
But its showing me error when stopCamera is called while unmounting
i have created an another button to test if StopCamera() working and i found its working , but i want to call the function when component is getting unmounted
my code:
CameraScreen.js
import "./styles.css";
import { useState, useEffect, useRef } from "react";
export default function CameraScreen() {
const videoElement = useRef(null);
const [facingMode, setFacingMode] = useState("environment");
const handleFacingModeToggle = () => {
stopCamera();
facingMode === "environment"
? setFacingMode("user")
: setFacingMode("environment");
};
useEffect(() => {
// const getUserMedia = async () => {
// try {
// const stream = await navigator.mediaDevices.getUserMedia({
// video: true
// });
// videoElement.current.srcObject = stream;
// } catch (err) {
// console.log(err);
// }
// };
// getUserMedia();
startCamera();
return function cleanup() {
stopCamera();
};
}, []);
const stopCamera = () =>
videoElement.current.srcObject &&
videoElement.current.srcObject.getTracks().forEach((t) => t.stop());
function startCamera() {
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices
.getUserMedia({
video: { facingMode: facingMode },
width: { ideal: 1280 },
height: { ideal: 720 }
})
.then(function (stream) {
if (videoElement.current) videoElement.current.srcObject = stream;
const track = stream.getVideoTracks()[0];
//Create image capture object and get camera capabilities
const imageCapture = new ImageCapture(track);
const photoCapabilities = imageCapture
.getPhotoCapabilities()
.then(() => {
//todo: check if camera has a torch
//let there be light!
track.applyConstraints({
advanced: [{ torch: true }]
});
});
})
.catch(function (error) {
alert("Please check your device permissions");
console.log("Something went wrong!");
console.log(error);
});
if (videoElement.current)
videoElement.current.onloadeddata = function () {
if (window.NativeDevice)
window.NativeDevice.htmlCameraReadyToRecord(true);
};
}
}
return (
<>
<video
autoPlay={true}
ref={videoElement}
style={{
minHeight: "67.82vh",
maxHeight: "67.82vh",
maxWidth: "100%",
minWidth: "100%"
}}
className="border-3rem bg-[#666]"
></video>
<button onClick={stopCamera}> stopCamera</button>
</>
);
}
App.js
import "./styles.css";
import { useState } from "react";
import CameraScreen from "./cameraScreen";
export default function App() {
const [switchS, setSwitchS] = useState(false);
return (
<div>
<button className="" onClick={() => setSwitchS(!switchS)} value="switch">
switch
</button>
{switchS && <CameraScreen />}
{!switchS && "Blank Screen"}
</div>
);
}
PS: the above code working at :https://5t2to.csb.app/
codesandbox link : https://codesandbox.io/s/practical-fast-5t2to?file=/src/cameraScreen.js
You can use useLayoutEffect hook. It works just before unmounting, like componentWillUnmount.
Here is an example to that
https://codesandbox.io/s/happy-swartz-ikqdn?file=/src/random.js
You can go to https://ikqdn.csb.app/rand in sandbox browser and check the console on clicking to home button.
You can see the difference in working while unmounting of both useEffect and useLayoutEffect
It preserves ref.current, so what you can do is, you can pass ref.current in the function that you are calling just before unmounting, to prevent ref to the dom elment.
It took a bit of debugging/sleuthing to find the issue. So even though you have a ref attached to the video element, when the component is unmounted the ref is still mutated and becomes undefined. The solution is to save a reference to the current videoElement ref value and use this in a cleanup function.
useEffect(() => {
startCamera();
const ref = videoElement.current;
return () => {
ref.srcObject.getTracks().forEach((t) => t.stop());
};
}, []);
Simply add useLayoutEffect to stop camera
useEffect(() => {
// const getUserMedia = async () => {
// try {
// const stream = await navigator.mediaDevices.getUserMedia({
// video: true
// });
// videoElement.current.srcObject = stream;
// } catch (err) {
// console.log(err);
// }
// };
// getUserMedia();
startCamera();
}, []);
useLayoutEffect(()=>()=>{
stopCamera();
},[]);
Just need to change useEffect() to useLayoutEffect()and it will works like a charm.
useLayoutEffect(() => {
const ref = videoElement;
console.log(ref);
startCamera();
return function cleanup() {
stopCamera();
};
}, []);
sanbox link :- https://codesandbox.io/s/naughty-murdock-by5tc?file=/src/cameraScreen.js:415-731
I am trying to load .gltf model file in RCA-typescipt project but fails.
I ceate a .tsx file named SceneComponent, which includes engine, scene, and canvas:
/* eslint-disable */
import { Engine, Scene } from "#babylonjs/core";
import React, { useEffect, useRef } from "react";
/* eslint-disable */
export default (props:any) => {
const reactCanvas = useRef(null);
const { antialias, engineOptions, adaptToDeviceRatio, sceneOptions, onRender, onSceneReady, ...rest } = props;
useEffect(() => {
if (reactCanvas.current) {
const engine = new Engine(reactCanvas.current, antialias, engineOptions, adaptToDeviceRatio);
const scene = new Scene(engine, sceneOptions);
if (scene.isReady()) {
props.onSceneReady(scene);
} else {
scene.onReadyObservable.addOnce((scene) => props.onSceneReady(scene));
}
engine.runRenderLoop(() => {
if (typeof onRender === "function") {
onRender(scene);
}
scene.render();
});
const resize = () => {
scene.getEngine().resize();
};
if (window) {
window.addEventListener("resize", resize);
}
return () => {
scene.getEngine().dispose();
if (window) {
window.removeEventListener("resize", resize);
}
};
}
}, [reactCanvas]);
return <canvas ref={reactCanvas} {...rest} />;
};
Then I use this component in my App.tsx file:
/* eslint-disable */
import React, { useEffect, useRef } from 'react';
import './App.css';
import { FreeCamera, Vector3, HemisphericLight, MeshBuilder, SceneLoader, Scene, Engine, AssetsManager } from '#babylonjs/core';
import "#babylonjs/loaders/glTF";
import { Nullable } from '#babylonjs/core/types';
import SceneComponent from './components/SceneComponent';
/* eslint-disable */
import model from "./assets/model2/scene.gltf";
const App: React.FC = () => {
const onSceneReady = (scene: any) => {
const canvas = scene.getEngine().getRenderingCanvas();
const camera = new FreeCamera("camera1", new Vector3(0, 5, -10), scene);
camera.setTarget(Vector3.Zero());
camera.attachControl(canvas, true);
const light = new HemisphericLight("light", new Vector3(0, 1, 0), scene);
light.intensity = 0.7;
// MeshBuilder.CreateBox("box", {}, scene);
// console.log(scene)
// SceneLoader.Append("./assets/model2/", "scene.gltf", scene, function (scene) {
// console.log(scene);
// });
SceneLoader.ImportMeshAsync(
"",
"./assets/model2/",
"scene.gltf",
scene,
);
console.log(scene)
};
const onRender = () => {
};
return (
<div>
<SceneComponent antialias onSceneReady={onSceneReady} onRender={onRender} id="my-canvas" />
</div>
);
}
export default App;
But When I tried to load gltf models in App.tsx, neither SceneLoader.Append() method nor SceneLoader.ImportMeshAsync() method can I load gltf model successfully, it showed nothing:
picture1
If I just create a box in the scene with MeshBuilder.CreateBox("box", {}, scene);, the model can be seen :
piture2
So how can I load gltf model file successfully? Any advice would be much appreciated!
This is an old question, but I see it asked a lot in the main forum. You need to make sure that your model path is relative to your /public folder (and that your assets are in that folder as well). The public folder is ./ on a running CRA.
If you are unsure then have a look in your Network tab in Developer Tools and it's possible that your model is being served as an index.html file and in the console there is often an error logged as well about parsing model file.
I am testing a react hook which gets the window-dimensions from window.innerheight and window.innerwidth. It uses the following code to handle server side rendering:
...
typeof window=="undefined" ? {width: 1024, height: 768} : functionToGetDimensions()
...
When trying to make tests for this line in react-testing-library i need to delete the window object:
...
const TestComponent = ({ }) => {
const { width, height } = useWindowDimensions();
return (
<div>
<p data-testid="width">{width}</p>
<p data-testid="height">{height}</p>
</div>)
}
describe('when window is undefined', () => {
//#ts-ignore
const testWidth = 1000;
const testHeight = 800;
const fallbackWidth = 1024; // when useWindowdimensions detects window = undefined
const { window } = global;
beforeAll(() => {
// #ts-ignore
delete global.window;
});
afterAll(() => {
global.window = window;
});
it('runs without error', () => {
render(<TestComponent />)
expect(screen.queryByTestId("height")).toHaveTextContent(fallbackWidth );
});
});
I get the following error:
ReferenceError: window is not defined
I've tried using node as test enviroment, but then document is not defined, and render does not work.
i am using Clarifai's Api to detect faces in an image it was working fine and i deployed it to github pages. after some time it stopped working and started giving me status code 400 and status code 10020 at the network tab although i am using the correct image format that Clarifai wants which is base64. at the same time my app uses the Clarifai's apparels detection model which works perfectly fine.
below is the relevant code:
import React from 'react';
import Clarifai from 'clarifai';
import { connect } from 'react-redux';
import { setFaceBoundary, setApparelBoundary, numberOfFaces, setBoundingBox, setApparelsInfo, setWithSpinner } from '../../redux/box/box.actions';
import { setImageDimensions } from '../../redux/image/image.actions.js';
import './models-options.styles.css';
const app = new Clarifai.App({
apiKey: 'MY_API_KEY'
});
const ModelsOptions = ({ setFaceBoundary, setApparelBoundary, fileProperties, numberOfFaces, setBoundingBox, setApparelsInfo, setWithSpinner, setImageDimensions })=> {
const calculateApparel = (data) => {
const conceptsArray = data.outputs[0].data.regions.map(concepts => concepts.data.concepts);
setApparelsInfo(conceptsArray)
const outputs = data.outputs[0].data.regions.map(apparels => apparels.region_info.bounding_box);
console.log(outputs);
setBoundingBox(outputs)
const image = document.getElementById("inputImage");
console.log('image dimensions' ,image.naturalWidth, image.naturalHeight);
const width = image.naturalWidth;
const height = image.naturalHeight;
const apparelsLoaction = outputs.map(apparel => {
return {
leftCol: apparel.left_col * width,
topRow: apparel.top_row * height,
rightCol: width - apparel.right_col * width,
bottomRow: height - apparel.bottom_row * height
}
});
return apparelsLoaction;
}
const calculateFace = (data) => {
const faceNumber = data.outputs[0].data.regions.length;
numberOfFaces(faceNumber);
const outputs = data.outputs[0].data.regions.map((faces) => faces.region_info.bounding_box);
setBoundingBox(outputs);
const image = document.getElementById("inputImage");
const width = image.clientWidth;
const height = image.clientHeight;
const faceCordinates = outputs.map((face) => {
return {
leftCol: face.left_col * width,
topRow: face.top_row * height,
rightCol: width - face.right_col * width,
bottomRow: height - face.bottom_row * height,
}
});
return faceCordinates;
}
const detectFace = () => {
setWithSpinner(true)
app.models.predict(Clarifai.FACE_DETECT_MODEL, {base64: fileProperties}).then(
(response) => {
setFaceBoundary(calculateFace(response));
setWithSpinner(false)
},
(err) => {
console.log('There was an error', err);
}
);
setApparelsInfo({});
setApparelBoundary({});
}
const detectApparels = () => {
setWithSpinner(true)
app.models.predict('72c523807f93e18b431676fb9a58e6ad', {base64: fileProperties}).then(
(response) => {
console.log('response at the models',response)
setApparelBoundary(calculateApparel(response));
setWithSpinner(false)
},
(err) => {
console.log('There was an error', err);
}
);
setFaceBoundary({});
numberOfFaces(0)
}
return (
<div className="models-button">
<button onClick={detectFace}>Detect Face</button>
<button onClick={detectApparels}>Detect Apparels</button>
</div>
);
};
const mapStateToProps = ({image: {fileProperties}}) => ({
fileProperties
})
const mapDispatchToProps = dispatch => ({
setFaceBoundary: (facePostion) => dispatch(setFaceBoundary(facePostion)),
setApparelBoundary: (apparelPosition) => dispatch(setApparelBoundary(apparelPosition)),
numberOfFaces: (number) => dispatch(numberOfFaces(number)),
setApparelsInfo: (number) => dispatch(setApparelsInfo(number)),
setBoundingBox: (bounding) => dispatch(setBoundingBox(bounding)),
setWithSpinner: (spinner) => dispatch(setWithSpinner(spinner)),
setImageDimensions: (dimensions) => dispatch(setImageDimensions(dimensions)),
})
export default connect(mapStateToProps, mapDispatchToProps)(ModelsOptions);
here is a link to the webApp if it might help: https://abdullahgumi.github.io/smart-box/
any idea on how to solve this would be much appreciated. Thanks
Looks like there was an internal issue that now should be resolved. The webApp you've linked to is now working.
There is also a status page for the models at Clarifai Model Status Page which might be helpful, although in this case it was not reflecting the status of that model accurately unfortunately.