Using a separate AudioContext / scriptProcessor Node in Wavesurfer - reactjs

I'm attempting to use a separate context/script processor from Wavesurfer's default so I can manipulate the pitch of the audio independent of playback rate. When I attempt to give the context/script processor as parameters, and playback the audio, I don't get any sound.
My Waveform component:
const playbackEngine = new PlaybackEngine({
emitter: emitter,
pitch: pitch,
});
const Waveform = WaveSurfer.create({
audioContext: playbackEngine.context,
audioScriptProcessor: playbackEngine.scriptProcessor,
barWidth: 1,
cursorWidth: 1,
pixelRatio: 1,
container: '#audio-spectrum',
progressColor: '#03a9f4',
height: 100,
normalize: true,
responsive: true,
waveColor: '#ccc',
cursorColor: '#4a74a5'
});
// called in ComponentDidMount()
function loadMediaUrl(url) {
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
// Decode asynchronously
request.onload = async function() {
let buffer = request.response;
// sets arrayBuffer for Playback Engine
const audioBuff = await playbackEngine.decodeAudioData(buffer, (error) => {
console.error(`Error decoding audio:`, error);
});
// sets audioBuffer for Wavesurfer to render Waveform (where I believe the problem
// begins)
Waveform.loadDecodedBuffer(audioBuff);
// sets audioBuffer for Playback Engine to playback audio
playbackEngine.setBuffer(audioBuff);
}
request.send();
}
Playback.js
const {SimpleFilter, SoundTouch} = require('./soundtouch');
const BUFFER_SIZE = 4096;
class PlaybackEngine {
constructor({emitter, pitch}) {
this.emitter = emitter;
this.context = new (window.AudioContext || window.webkitAudioContext);
this.scriptProcessor = this.context.createScriptProcessor(BUFFER_SIZE, 2, 2);
this.scriptProcessor.onaudioprocess = e => {
const l = e.outputBuffer.getChannelData(0);
const r = e.outputBuffer.getChannelData(1);
const framesExtracted = this.simpleFilter.extract(this.samples, BUFFER_SIZE);
if (framesExtracted === 0) {
this.emitter.emit('stop');
}
for (let i = 0; i < framesExtracted; i++) {
l[i] = this.samples[i * 2];
r[i] = this.samples[i * 2 + 1];
}
};
this.soundTouch = new SoundTouch();
this.soundTouch.pitch = pitch;
this.duration = undefined;
}
get pitch() {
return this.soundTouch.pitch;
}
set pitch(pitch) {
this.soundTouch.pitch = pitch;
}
decodeAudioData(data) {
return this.context.decodeAudioData(data);
}
setBuffer(buffer) {
const bufferSource = this.context.createBufferSource();
bufferSource.buffer = buffer;
this.samples = new Float32Array(BUFFER_SIZE * 2);
this.source = {
extract: (target, numFrames, position) => {
this.emitter.emit('time', (position / this.context.sampleRate));
const l = buffer.getChannelData(0);
const r = buffer.getChannelData(1);
for (let i = 0; i < numFrames; i++) {
target[i * 2] = l[i + position];
target[i * 2 + 1] = r[i + position];
}
return Math.min(numFrames, l.length - position);
},
};
this.simpleFilter = new SimpleFilter(this.source, this.soundTouch);
this.duration = buffer.duration;
this.emitter.emit('duration', buffer.duration);
}
play() {
this.scriptProcessor.connect(this.context.destination);
}
pause() {
this.scriptProcessor.disconnect(this.context.destination);
}
seekPercent(percent) {
if (this.simpleFilter !== undefined) {
this.simpleFilter.sourcePosition = Math.round(
percent / 100 * this.duration * this.context.sampleRate
);
}
}
}
export default PlaybackEngine;
In this setup, with Waveform.play() I can cause playback from the wavesurfer instance but cannot manipulate the pitch. Similarly, with playbackEngine.play() I can manipulate the pitch but lose all Wavesurfer functionality.
Though I'm pretty sure the problem stems from Wavesurfer and my Playback Engine using two separate AudioBuffers, I need to set up the buffer in my playback context, as well as render the waveform with wavesurfer.
I'd like to see if anyone can confirm how to use the Playback Engine's context, script processor, and AudioBuffer to control the Wavesurfer instance (ie. having Waveform.play() play audio from the Playback Engine, as well as update the Wavesurfer UI).
All help is appreciated.

So I ended up manually removing
audioScriptProcessor: playbackEngine.scriptProcessor,
from the Wavesurfer initialization, then attaching playbackEngine's script processor to the destinationNode manually. I had a previously attempt set up like this, and heard annoying popping sounds during playback. What I thought was annoying sample/buffer errors was actually coming from an EventEmitter instance I had constantly broadcasting time between the files. Removing that solved my noise issue (ツ)

Related

Web media recorder event too late to trigger

I am working on a web app on React.js + flask which undergoes speech recognition, and requires both the transcripted message and the recorded audio to be passed to a handler.
My problem is this.parse(transcript, audio) always passes a null object for audio. Debugging on Chrome indicates mediaRecorder.onstop and mediaRecorder.ondataavailable is only triggered after this.parse(transcript, audio) is handled so nothing is assigned to audio.
Is there a way where I can capture the audio from mediaRecorder before this.parse(transcript, audio) is executed?
Also, which variable should I pass (audio, audioURL, blob...) for the actual audio file?
class MessageParser {
constructor(actionProvider, state) {
var audio = null, blob = null, audioURL = null, mediaRecorder = null;
let chunks = [];
this.actionProvider = actionProvider;
this.state = state;
this.recognition = new SpeechRecognition()
this.recognition.continuous = false
this.recognition.interimResults = false
this.recognition.lang = 'en-US'
this.recognition.maxAlternatives = 1;
this.recognition.start()
this.recognition.onstart = function(){
if (navigator.mediaDevices.getUserMedia) {
//console.log('getUserMedia supported.');
let chunks = [];
var options = {
audioBitsPerSecond: 128000,
mimeTyoe: 'audio/webm'
}
navigator.mediaDevices.getUserMedia({ audio: true, })
.then(function (stream) {
mediaRecorder = new MediaRecorder(stream, options);
mediaRecorder.start();
})
} else {
console.log('getUserMedia Unsupported.');
}
};
this.recognition.onresult = (e) => {
const transcript = e.results[0][0].transcript;
var audioURL = null;
this.recognition.abort()
mediaRecorder.stop();
mediaRecorder.onstop = function (e) {
console.log("data available after MediaRecorder.stop() called.");
audio = document.createElement('audio');
blob = new Blob(chunks, { 'type': 'audio/webm; codecs=opus' });
audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
const recording = new Audio(audioURL)
recording.play()
}
mediaRecorder.ondataavailable = function (e) {
chunks.push(e.data);
}
this.parse(transcript, audio)
}
}

React threejs merge two tubes to create a tee-piece with CSG not working as expected

I am trying to create a tee-piece which is a fitting in the plumbing domain. It consist of 2 tubes that are merged together and has 3 openings as shown in this picture.
I have written some code in threejs where I am trying to create a tube mesh1 and another tube mesh2 and then try to union them into mesh3 with the library #enable3d/three-graphics/jsm/csg - thanks to #Marquizzo. After using the function CSG.union and adding the mesh to the scene I can see that I get one tee-piece but it has also created a hole in geometry 1, which was not expected. You can see a picture of the correct holes(green) and the wrongly created hole (red) here:
it should instead look like this and be as one geometry.
Can anyone tell me how CSG works and why I am getting an extra hole on the backside of the first geometry?
import React, { Component } from 'react';
import * as THREE from 'three';
import { OrbitControls } from 'three/examples/jsm/controls/OrbitControls';
import { CSG } from '#enable3d/three-graphics/jsm/csg';
export default class TubeViewer extends Component {
componentDidMount() {
//Add Scene
this.scene = new THREE.Scene();
//Add Renderer
this.renderer = new THREE.WebGLRenderer({ antialias: true });
this.renderer.setClearColor('#808080');
this.renderer.shadowMap.enabled = true;
this.renderer.shadowMap.type = THREE.PCFSoftShadowMap;
this.renderer.setPixelRatio(window.devicePixelRatio);
this.renderer.setSize(window.innerWidth, window.innerHeight);
this.mount.appendChild(this.renderer.domElement);
//Add Camera
const fov = 60;
const aspect = window.innerWidth / window.innerHeight;
const near = 1.0;
const far = 1000.0;
this.camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
this.camera.position.set(1, aspect, 1, 1000);
//Tee-piece
const curve1 = new THREE.LineCurve(new THREE.Vector3(2, 0, 0), new THREE.Vector3(2, 0, 0.1));
const curve11 = new THREE.LineCurve(new THREE.Vector3(2.0, 0, 0.05), new THREE.Vector3(2.05, 0, 0.05));
const geometry1 = new THREE.TubeGeometry(curve1, 20, 0.025, 8, false);
const geometry2 = new THREE.TubeGeometry(curve2, 20, 0.025, 8, false);
const material = new THREE.MeshBasicMaterial({ color: '#C0C0C0' });
const mesh1 = new THREE.Mesh(geometry1, material);
const mesh2 = new THREE.Mesh(geometry2, material);
const mesh3 = CSG.union(mesh1, mesh2);
this.scene.add(mesh3);
//Add raycaster to for interactivity
this.raycaster = new THREE.Raycaster();
this.mouse = new THREE.Vector2();
this.renderer.domElement.addEventListener('click', onClick.bind(this), false);
function onClick(event) {
event.preventDefault();
this.mouse.x = (event.clientX / window.innerWidth) * 2 - 1;
this.mouse.y = -(event.clientY / window.innerHeight) * 2 + 1;
this.raycaster.setFromCamera(this.mouse, this.camera);
var intersects = this.raycaster.intersectObjects(this.scene.children, true);
if (intersects.length > 0) {
console.log('Intersection:', intersects[0]);
//console.log(intersects[0].object.uuid);
// console.log(`GUID: ${intersects[0]}`);
let object = intersects[0].object;
object.material.color.set(Math.random() * 0xffffff);
}
}
//Settings
//Add Camera Controls
const controls = new OrbitControls(this.camera, this.renderer.domElement);
controls.addEventListener('change', this.render); // use if there is no animation loop
controls.minDistance = 2;
controls.maxDistance = 10;
controls.target.set(0, 0, -0.2);
controls.update();
///Add AMBIENT LIGHT
let light = new THREE.DirectionalLight(0xffffff, 1.0);
light.position.set(20, 100, 10);
light.target.position.set(0, 0, 0);
light.castShadow = true;
light.shadow.bias = -0.001;
light.shadow.mapSize.width = 2048;
light.shadow.mapSize.height = 2048;
light.shadow.camera.near = 0.1;
light.shadow.camera.far = 500.0;
light.shadow.camera.near = 0.5;
light.shadow.camera.far = 500.0;
light.shadow.camera.left = 100;
light.shadow.camera.right = -100;
light.shadow.camera.top = 100;
light.shadow.camera.bottom = -100;
this.scene.add(light);
light = new THREE.AmbientLight(0xffffff, 0.7);
this.scene.add(light);
//Start animation
this.start();
}
//Unmount when animation has stopped
componentWillUnmount() {
this.stop();
this.mount.removeChild(this.renderer.domElement);
}
//Function to start animation
start = () => {
//Rotate Models
if (!this.frameId) {
this.frameId = requestAnimationFrame(this.animate);
}
};
//Function to stop animation
stop = () => {
cancelAnimationFrame(this.frameId);
};
//Animate models here
animate = () => {
//ReDraw scene with camera and scene object
if (this.cubeMesh) this.cubeMesh.rotation.y += 0.01;
this.renderScene();
this.frameId = window.requestAnimationFrame(this.animate);
};
//Render the scene
renderScene = () => {
if (this.renderer) this.renderer.render(this.scene, this.camera);
};
render() {
return (
<div
style={{ width: '800px', height: '800px' }}
ref={(mount) => {
this.mount = mount;
}}
/>
);
}
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/react/16.6.3/umd/react.production.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/react-dom/16.6.3/umd/react-dom.production.min.js"></script>
enter code here?
For CSG you'll need solid bodies. These tubes are open.
I created an example using cylinders (tubes are involved to cap) so you can test it.
These cylinders are open ended, so they fail in the same way as your tubes.
https://codepen.io/flatworldstudio/pen/bGBjmrP
const geometry1 = new THREE.CylinderGeometry(0.1, 0.1, 0.5, 20, 1, true);
These are closed, and CSG works as expected.
https://codepen.io/flatworldstudio/pen/VwmBRoL
const geometry1 = new THREE.CylinderGeometry(0.1, 0.1, 0.5, 20, 1, false);
(I'm using a different version of CSG, but they all seem to be built on the same code)

Tensorflow.js prediction result doesn't change

I trained my model with Google Teachable Machines (Image) and inclueded the model into my Ionic Angular app. I loaded the model successfully and used the camera preview for predicting the class which is shown in the image from the camera.
The picture which is displayed in the canvas changes properly but the predict()-method returns the same result for every call.
import * as tmImage from '#teachablemachine/image';
...
async startPrediction() {
this.model = await tmImage.load(this.modelURL, this.metadataURL);
this.maxPredictions = this.model.getTotalClasses();
console.log('classes: ' + this.maxPredictions); //works properly
requestAnimationFrame(() => {
this.loop();
});
}
async loop() {
const imageAsBase64 = await this.cameraPreview.takeSnapshot({ quality: 60 });
const canvas = document.getElementById('output') as HTMLImageElement;
//image changes properly, I checked it with a canvas output
canvas.src = 'data:image/jpeg;base64,' + imageAsBase64;
const prediction = await this.model.predict(canvas);
for (let i = 0; i < this.maxPredictions; i++) {
const classPrediction =
prediction[i].className + ': ' + prediction[i].probability.toFixed(2);
//probability doesn't change, even if I hold the camera close over a trained image
}
requestAnimationFrame(() => {
this.loop();
});
}
The prediction result is e.g.: class1 = 0.34, class2 = 0.66 but doesn't change.
I hope you could help me to find my bug, thanks in advance!
The image has probably not yet been loaded before you are calling the prediction model. It has been discussed here and there
function load(url){
return new Promise((resolve, reject) => {
canvas.src = url
canvas.onload = () => {
resolve(canvas)
}
})
}
await load(base64Data)
// then the image can be used for prediction

Camera streaming service works only with localhost but not with IP address

I have implemented a service which streams camera output on html5. But it works only if I use localhost:8080 localhost if I use IP address or machine name then it does not even detect the camera.
/*global logger*/
/*
VisualInspection
========================
#file : VisualInspection.js
#version : 1.0.0
#author :
#date : 7/28/2019
#copyright :
#license : Apache 2
Documentation
========================
Describe your widget here.
*/
// Required module list. Remove unnecessary modules, you can always get them back from the boilerplate.
define([
"dojo/_base/declare",
"mxui/widget/_WidgetBase",
"dijit/_TemplatedMixin",
"mxui/dom",
"dojo/dom",
"dojo/dom-prop",
"dojo/dom-geometry",
"dojo/dom-class",
"dojo/dom-style",
"dojo/dom-construct",
"dojo/_base/array",
"dojo/_base/lang",
"dojo/text",
"dojo/html",
"dojo/_base/event",
"VisualInspection/lib/jquery-1.11.2",
"dojo/text!VisualInspection/widget/template/VisualInspection.html",
"VisualInspection/widget/template/tf.min",
// "dojo/text!VisualInspection/widget/template/labels.json",
// "dojo/text!VisualInspection/widget/template/model.json"
], function (declare, _WidgetBase, _TemplatedMixin, dom, dojoDom, dojoProp, dojoGeometry, dojoClass, dojoStyle, dojoConstruct, dojoArray, lang, dojoText, dojoHtml, dojoEvent, _jQuery, widgetTemplate, tf) {
"use strict";
var $ = _jQuery.noConflict(true);
var LABELS_URL = "http://pni6w2465:7777/EasyPlan/model_web/labels.json"
var MODEL_JSON = "http://pni6w2465:7777/EasyPlan/model_web/model.json"
// var tf = require(['../../VisualInspection/node_modules/#tensorflow/tfjs']);
//////////////
const TFWrapper = model => {
const calculateMaxScores = (scores, numBoxes, numClasses) => {
const maxes = []
const classes = []
for (let i = 0; i < numBoxes; i++) {
let max = Number.MIN_VALUE
let index = -1
for (let j = 0; j < numClasses; j++) {
if (scores[i * numClasses + j] > max) {
max = scores[i * numClasses + j]
index = j
}
}
maxes[i] = max
classes[i] = index
}
return [maxes, classes]
}
const buildDetectedObjects = (
width,
height,
boxes,
scores,
indexes,
classes
) => {
const count = indexes.length
const objects = []
for (let i = 0; i < count; i++) {
const bbox = []
for (let j = 0; j < 4; j++) {
bbox[j] = boxes[indexes[i] * 4 + j]
}
const minY = bbox[0] * height
const minX = bbox[1] * width
const maxY = bbox[2] * height
const maxX = bbox[3] * width
bbox[0] = minX
bbox[1] = minY
bbox[2] = maxX - minX
bbox[3] = maxY - minY
objects.push({
bbox: bbox,
class: classes[indexes[i]],
score: scores[indexes[i]]
})
}
return objects
}
var img = null;
const detect = input => {
const batched = tf.tidy(() => {
const img = tf.browser.fromPixels(input)
// Reshape to a single-element batch so we can pass it to executeAsync.
// var img = null;
// //sid
// var canvas = document.querySelector("#canvasElement");
// if (canvas.getContext) {
// var ctx = canvas.getContext("2d");
// img = canvas.toDataURL("image/png");
// }
return img.expandDims(0)
})
const height = batched.shape[1]
const width = batched.shape[2]
// const height = img.height
// const width = img.width
return model.executeAsync(batched).then(result => {
const scores = result[0].dataSync()
const boxes = result[1].dataSync()
// clean the webgl tensors
batched.dispose()
tf.dispose(result)
const [maxScores, classes] = calculateMaxScores(
scores,
result[0].shape[1],
result[0].shape[2]
)
const prevBackend = tf.getBackend()
// run post process in cpu
tf.setBackend('cpu')
const indexTensor = tf.tidy(() => {
const boxes2 = tf.tensor2d(boxes, [
result[1].shape[1],
result[1].shape[3]
])
return tf.image.nonMaxSuppression(
boxes2,
maxScores,
20, // maxNumBoxes
0.5, // iou_threshold
0.5 // score_threshold
)
})
const indexes = indexTensor.dataSync()
indexTensor.dispose()
// restore previous backend
tf.setBackend(prevBackend)
return buildDetectedObjects(
width,
height,
boxes,
maxScores,
indexes,
classes
)
})
}
return {
detect: detect
}
}
//////////////////////
// Declare widget's prototype.
return declare("VisualInspection.widget.VisualInspection", [_WidgetBase, _TemplatedMixin], {
// _TemplatedMixin will create our dom node using this HTML template.
templateString: widgetTemplate,
// DOM elements
inputNodes: null,
colorSelectNode: null,
colorInputNode: null,
infoTextNode: null,
// Parameters configured in the Modeler.
mfToExecute: "",
messageString: "",
backgroundColor: "",
// Internal variables. Non-primitives created in the prototype are shared between all widget instances.
_handles: null,
_contextObj: null,
_alertDiv: null,
_readOnly: false,
// dojo.declare.constructor is called to construct the widget instance. Implement to initialize non-primitive properties.
constructor: function () {
logger.debug(this.id + ".constructor");
this._handles = [];
},
// dijit._WidgetBase.postCreate is called after constructing the widget. Implement to do extra setup work.
postCreate: function () {
logger.debug(this.id + ".postCreate");
if (this.readOnly || this.get("disabled") || this.readonly) {
this._readOnly = true;
}
this._updateRendering();
this._setupEvents();
var video = document.querySelector("#videoElement");
var canvas = document.querySelector("#canvasElement");
// if (navigator.mediaDevices.getUserMedia) {
// navigator.mediaDevices.getUserMedia({ video: true })
// .then(function (stream) {
// video.srcObject = stream;
// })
// .catch(function (err0r) {
// console.log("Something went wrong!");
// });
// }
this.componentDidMount();
},
////////////////////////////////////////////////////////
componentDidMount: function () {
var video = document.querySelector("#videoElement");
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
const webCamPromise = navigator.mediaDevices
.getUserMedia({
audio: false,
video: {
facingMode: 'user'
}
})
.then(stream => {
window.stream = stream
video.srcObject = stream
return new Promise((resolve, _) => {
video.onloadedmetadata = () => {
resolve()
}
})
})
const modelPromise = tf.loadGraphModel(MODEL_JSON)
const labelsPromise = fetch(LABELS_URL).then(data => data.json())
Promise.all([modelPromise, labelsPromise, webCamPromise])
.then(values => {
const [model, labels] = values
this.detectFrame(video, model, labels)
})
.catch(error => {
console.error(error)
})
}
},
detectFrame: function (video, model, labels) {
TFWrapper(model)
.detect(video)
.then(predictions => {
this.renderPredictions(predictions, labels)
requestAnimationFrame(() => {
this.detectFrame(video, model, labels)
})
})
},
renderPredictions: function (predictions, labels) {
var canvas = document.querySelector("#canvasElement");
const ctx = canvas.getContext('2d')
ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height)
// Font options.
const font = '16px sans-serif'
ctx.font = font
ctx.textBaseline = 'top'
predictions.forEach(prediction => {
const x = prediction.bbox[0]
const y = prediction.bbox[1]
const width = prediction.bbox[2]
const height = prediction.bbox[3]
const label = labels[parseInt(prediction.class)]
// Draw the bounding box.
ctx.strokeStyle = '#00FFFF'
ctx.lineWidth = 4
ctx.strokeRect(x, y, width, height)
// Draw the label background.
ctx.fillStyle = '#00FFFF'
const textWidth = ctx.measureText(label).width
const textHeight = parseInt(font, 10) // base 10
ctx.fillRect(x, y, textWidth + 4, textHeight + 4)
})
predictions.forEach(prediction => {
const x = prediction.bbox[0]
const y = prediction.bbox[1]
const label = labels[parseInt(prediction.class)]
// Draw the text last to ensure it's on top.
ctx.fillStyle = '#000000'
ctx.fillText(label, x, y)
})
},
///////////////////////////////////////////////////////////
// mxui.widget._WidgetBase.update is called when context is changed or initialized. Implement to re-render and / or fetch data.
update: function (obj, callback) {
logger.debug(this.id + ".update");
this._contextObj = obj;
this._resetSubscriptions();
this._updateRendering(callback); // We're passing the callback to updateRendering to be called after DOM-manipulation
},
// mxui.widget._WidgetBase.enable is called when the widget should enable editing. Implement to enable editing if widget is input widget.
enable: function () {
logger.debug(this.id + ".enable");
},
// mxui.widget._WidgetBase.enable is called when the widget should disable editing. Implement to disable editing if widget is input widget.
disable: function () {
logger.debug(this.id + ".disable");
},
// mxui.widget._WidgetBase.resize is called when the page's layout is recalculated. Implement to do sizing calculations. Prefer using CSS instead.
resize: function (box) {
logger.debug(this.id + ".resize");
},
// mxui.widget._WidgetBase.uninitialize is called when the widget is destroyed. Implement to do special tear-down work.
uninitialize: function () {
logger.debug(this.id + ".uninitialize");
// Clean up listeners, helper objects, etc. There is no need to remove listeners added with this.connect / this.subscribe / this.own.
},
// We want to stop events on a mobile device
_stopBubblingEventOnMobile: function (e) {
logger.debug(this.id + "._stopBubblingEventOnMobile");
if (typeof document.ontouchstart !== "undefined") {
dojoEvent.stop(e);
}
},
// Attach events to HTML dom elements
_setupEvents: function () {
logger.debug(this.id + "._setupEvents");
this.connect(this.colorSelectNode, "change", function (e) {
// Function from mendix object to set an attribute.
this._contextObj.set(this.backgroundColor, this.colorSelectNode.value);
});
this.connect(this.infoTextNode, "click", function (e) {
// Only on mobile stop event bubbling!
this._stopBubblingEventOnMobile(e);
// If a microflow has been set execute the microflow on a click.
if (this.mfToExecute !== "") {
this._execMf(this.mfToExecute, this._contextObj.getGuid());
}
});
},
_execMf: function (mf, guid, cb) {
logger.debug(this.id + "._execMf");
if (mf && guid) {
mx.ui.action(mf, {
params: {
applyto: "selection",
guids: [guid]
},
callback: lang.hitch(this, function (objs) {
if (cb && typeof cb === "function") {
cb(objs);
}
}),
error: function (error) {
console.debug(error.description);
}
}, this);
}
},
// Rerender the interface.
_updateRendering: function (callback) {
logger.debug(this.id + "._updateRendering");
// Important to clear all validations!
this._clearValidations();
// The callback, coming from update, needs to be executed, to let the page know it finished rendering
this._executeCallback(callback, "_updateRendering");
},
// Handle validations.
_handleValidation: function (validations) {
logger.debug(this.id + "._handleValidation");
this._clearValidations();
var validation = validations[0],
message = validation.getReasonByAttribute(this.backgroundColor);
if (this._readOnly) {
validation.removeAttribute(this.backgroundColor);
} else if (message) {
this._addValidation(message);
validation.removeAttribute(this.backgroundColor);
}
},
// Clear validations.
_clearValidations: function () {
logger.debug(this.id + "._clearValidations");
dojoConstruct.destroy(this._alertDiv);
this._alertDiv = null;
},
// Show an error message.
_showError: function (message) {
logger.debug(this.id + "._showError");
if (this._alertDiv !== null) {
dojoHtml.set(this._alertDiv, message);
return true;
}
this._alertDiv = dojoConstruct.create("div", {
"class": "alert alert-danger",
"innerHTML": message
});
dojoConstruct.place(this._alertDiv, this.domNode);
},
// Add a validation.
_addValidation: function (message) {
logger.debug(this.id + "._addValidation");
this._showError(message);
},
// Reset subscriptions.
_resetSubscriptions: function () {
logger.debug(this.id + "._resetSubscriptions");
// Release handles on previous object, if any.
this.unsubscribeAll();
// When a mendix object exists create subscribtions.
if (this._contextObj) {
this.subscribe({
guid: this._contextObj.getGuid(),
callback: lang.hitch(this, function (guid) {
this._updateRendering();
})
});
this.subscribe({
guid: this._contextObj.getGuid(),
attr: this.backgroundColor,
callback: lang.hitch(this, function (guid, attr, attrValue) {
this._updateRendering();
})
});
this.subscribe({
guid: this._contextObj.getGuid(),
val: true,
callback: lang.hitch(this, this._handleValidation)
});
}
},
_executeCallback: function (cb, from) {
logger.debug(this.id + "._executeCallback" + (from ? " from " + from : ""));
if (cb && typeof cb === "function") {
cb();
}
}
});
});
require(["VisualInspection/widget/VisualInspection"]);
<div id="container">
<video autoplay="true" playsInline="true" width="600" height="500" id="videoElement" style="position: fixed;" >
</video>
<canvas id= "canvasElement" width="600" height="500" style="position: absolute;">
</canvas>
</div>
From Above code, When I run using localhost this code will execute :
<!-- begin snippet: js hide: false console: true babel: false -->
When I run using machine name or IP name :
This code is not considered in developer tool of chrome due to security or some reason

How to fix 'HTML5 currentTime always sets to zero'

I am building a videoplayer in reactjs.I have my custom sliderbar.When I click on the sliderbar,I am getting the value of the sliderbar and calculating current time of the video and setting the currenttime using document.getElementById('video').currentTime = value.
But each time when I give an input to sliderbar,the video current time is setted to 0.
But this works fine in firefox with a warning message "The Operation was aborted"
It not working in chrome.
const pos = this.seek_bar.value;
this.video = document.getElementById("video");
let new_time = (pos / 100) * this.video.duration;
if (!isNaN(new_time)) this.video.currentTime = new_time;
this.video.addEventListener("timeupdate", () => {
let value = (this.video.currentTime / this.video.duration) * 100;
if (!isNaN(value)) {
this.seek_bar.value = value;
}
});
I want the chrome to set the current time I provide.Please help me to solve this.
Got It.
The problem was with streaming the video from server.
Changed the existing code to
video(req, res) {
const videoPath = req.query.videoPath;
const stat = fs.statSync(videoPath);
const fileSize = stat.size;
const range = req.headers.range;
if (range) {
const parts = range.replace(/bytes=/, "").split("-");
const start = parseInt(parts[0], 10);
const end = parts[1] ? parseInt(parts[1], 10) : fileSize - 1;
const chunksize = end - start + 1;
const file = fs.createReadStream(videoPath, { start, end });
const head = {
"Content-Range": `bytes ${start}-${end}/${fileSize}`,
"Accept-Ranges": "bytes",
"Content-Length": chunksize,
"Content-Type": "video/mp4"
};
res.writeHead(206, head);
file.pipe(res);
} else {
const head = {
"Content-Length": fileSize,
"Content-Type": "video/mp4"
};
res.writeHead(200, head);
fs.createReadStream(videoPath).pipe(res);
}
}

Resources