I am trying to record a video and then save it to the server.
My issue is the file is not being saved to the server and I am not sure why. The issue seems to be that it is not creating the blob or maybe not able to get the file as a blob?
I say that because in the Console I see this error
stopRecording failure TypeError: Failed to execute 'createObjectURL' on 'URL': Overload resolution failed.
Here is my file
<html>
<head>
<script src="https://cdn.webrtc-experiment.com/RecordRTC.js"></script>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
</head>
<body>
<button id="btn-start-recording">Start Recording</button>
<button id="btn-stop-recording" disabled="disabled">Stop Recording</button>
<button id="btn-save-recording" disabled="disabled">Stop& Save Recording</button>
<!--
2. Include a video element that will display the current video stream
and as well to show the recorded video at the end.
-->
<hr>
<video id="my-preview" controls autoplay></video>
<!-- 4. Initialize and prepare the video recorder logic -->
<script>
// Store a reference of the preview video element and a global reference to the recorder instance
var video = document.getElementById('my-preview');
var recorder;
// When the user clicks on start video recording
document.getElementById('btn-start-recording').addEventListener("click", function(){
// Disable start recording button
this.disabled = true;
// Request access to the media devices
navigator.mediaDevices.getUserMedia({
audio: true,
video: true
}).then(function(stream) {
// Display a live preview on the video element of the page
setSrcObject(stream, video);
// Start to display the preview on the video element
// and mute the video to disable the echo issue !
video.play();
video.muted = true;
// Initialize the recorder
recorder = new RecordRTCPromisesHandler(stream, {
mimeType: 'video/webm',
bitsPerSecond: 128000
});
// Start recording the video
recorder.startRecording().then(function() {
console.info('Recording video ...');
}).catch(function(error) {
console.error('Cannot start video recording: ', error);
});
// release stream on stopRecording
recorder.stream = stream;
// Enable stop recording button
document.getElementById('btn-stop-recording').disabled = false;
document.getElementById('btn-save-recording').disabled = false;
}).catch(function(error) {
console.error("Cannot access media devices: ", error);
});
}, false);
// When the user clicks on Stop video recording
document.getElementById('btn-stop-recording').addEventListener("click", function(){
this.disabled = true;
recorder.stopRecording().then(function() {
console.info('stopRecording success');
// Retrieve recorded video as blob and display in the preview element
var videoBlob = recorder.getBlob();
video.src = URL.createObjectURL(videoBlob);
video.play();
// Unmute video on preview
video.muted = false;
// Stop the device streaming
recorder.stream.stop();
// Enable record button again !
document.getElementById('btn-start-recording').disabled = false;
}).catch(function(error) {
console.error('stopRecording failure', error);
});
}, false);
//lets save the video
document.getElementById('btn-save-recording').addEventListener("click", function(){
// Retrieve recorded video as blob and display in the preview element
var videoBlob = recorder.getBlob();
video.src = URL.createObjectURL(videoBlob);
video.play();
// Unmute video on preview
video.muted = false;
// Stop the device streaming
recorder.stream.stop();
var formData = new FormData();
formData.append('video', player.recordedData.video);
// Execute the ajax request, in this case we have a very simple PHP script
// that accepts and save the uploaded "video" file
xhr('upload-videoclaim.php', formData, function (fName) {
console.log("Video succesfully uploaded !");
})
// Helper function to send
function xhr(url, data, callback) {
var request = new XMLHttpRequest();
request.onreadystatechange = function () {
if (request.readyState == 4 && request.status == 200) {
callback(location.href + request.responseText);
}
};
request.open('POST', url);
request.send(data);
}
});
</script>
</body>
</html>
See the MDN docs on parameters for URL.createObjectURL:
A File, Blob, or MediaSource object to create an object URL for.
Just try passing something else to URL.createObjectURL that it doesn't expect like a number. You'll get a similar / same error message.
Now see the implementation of RecordRTCPromisesHandler.getBlob(). It returns a Promise:
/**
* This method returns the recorded blob.
* #method
* #memberof RecordRTCPromisesHandler
* #example
* recorder.stopRecording().then(function() {
* recorder.getBlob().then(function(blob) {})
* }).catch(errorCB);
*/
this.getBlob = function() {
return new Promise(function(resolve, reject) {
try {
resolve(self.recordRTC.getBlob());
} catch (e) {
reject(e);
}
});
};
Look at the #example given in the JSDoc comment. It does recorder.getBlob().then(<callback>). Follow the example.
Option 1 (nested Promise.then()):
recorder.stopRecording().then(function() {
console.info('stopRecording success');
// do whatever else you want here.
recorder.getBlob().then(videoBlob => {
video.src = URL.createObjectURL(videoBlob);
video.play();
video.muted = false;
// ...
});
}).catch(function(error) {
// ...
Option 2 (chained Promise.then()):
recorder.stopRecording().then(() => recorder.getBlob()).then(videoBlob => {
video.src = URL.createObjectURL(videoBlob);
video.play();
video.muted = false;
// ...
}).catch(function(error) {
// ...
Option 3 (async-await):
try {
await recorder.stopRecording();
const videoBlob = await recorder.getBlob();
video.src = URL.createObjectURL(videoBlob);
video.play();
video.muted = false;
// ...
} catch(error) {
// ...
I'm not sure, but part of why you didn't understand the problem might have been due to limitations of intellisense when writing JS in HTML, and particularly when using a library without downloading the source locally so that it's visible to the intellisense facilities. I wouldn't really fault you for that. If you think it's appropriate, you could consider politely asking the maintainer of the library to add to their library's website's docuentation to also document the Promise-based interfaces of the library, or even writing up PRs to do that.
Related
My code looks something like this (using angularjs):
angular.module('myApp').factory('photoService', function(...){
var camera = navigator.camera;
var cameraOpts = { destinationType: camera.DestinationType.DATA_URL };
cameraService.takePhoto = function(successCallback, failureCallback){
console.log(JSON.stringify(cameraOpts));
camera.getPicture(function(img){
console.log(img);
//handle success
}, function(e){
console.log(e);
//handle failure
}, cameraOptions);
}
});
My cameraOpts looks correctly populated, since its log shows {"destinationType":0} (i.e. destinationType != undefined, so it was correctly pulled from the camera object). My issue is that when I do console.log(img), I am still getting a file URI instead of the image as a base64 string. Why is this?
I am a developer/creator of a Video Streaming Service, in order to server the movies I am using
createObjectURL() (BLOB URL).
The Blob Url is created and served to the html5 video element
after the video is rendered the blob URL is revoked in order to prevent the user from accessing the private movie file, however is result of this when I try to seek through the video I get an error file not found.
Would i need to recreate the blob every time I seek the video or am I declaring my blob wrong?
async renderBlob(url){
const myPlayer = this.player.current;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'movie.mp4', true);
xhr.responseType = 'blob';
xhr.onload = () => {
this.setState({src: URL.createObjectURL(xhr.response)})
myPlayer.src = this.state.src
myPlayer.play()
};
xhr.onerror = function(e) {
alert("Error " + e.target.status + " occurred while receiving the document.");
};
xhr.send();
}
after the video is playing:
URL.revokeObjectURL(this.state.src)
Short videos SEEK okay where as Longer Videos 1) to longer to load and 2 do not seek
GET blob:http://localhost:3000/e5fd2c07-3f8a-407e-815f-7b9314d9156d net::ERR_FILE_NOT_FOUND
Hello I'm trying to use cheerio in a Alexa Skill to get data from website and add in skill.
The code of intent
const HelloWorldIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'all_titles';
},
handle(handlerInput) {
//Lógica para speak output
var options = {
uri: 'https://es.pagetest.com/',
transform: function (body) {
return cheerio.load(body);
}
};
rp(options)
.then(function ($) {
var arr_response = []
var titles = $('.ms-short-title');
titles.each((i, a) =>{
if(a.parent.attribs.title !== undefined)arr_response.push(a.parent.attribs.title);
});
const speakOutput = insert_in_string(arr_response);
return handlerInput.responseBuilder
.speak(speakOutput)
//.reprompt('add a reprompt if you want to keep the session open for the user to respond')
.getResponse();
function insert_in_string (arr_titulars){
var string_text = '';
for(var titular of arr_titulars){
string_text += titular + ' Siguiente titular. ';
}
return string_text;
}
})
.catch(function (err) {
return err;
});
}
};
I have tested the logic locally and it works ok, by putting it in alexa code editor, in test, return error message, but not a trace
¿Any idea? Thanks
If you are using Alexa-hosted Skills, you will have CloudWatch integration already built-in. Simply go to your Amazon Developer Console, navigate to your Skill's Code tab, scroll to the bottom and click the Logs: Amazon CloudWatch link on the bottom left.
Now, every time that you console.log, it will be sent to CloudWatch. So, in your catch handler, add console.log(err) and you should be able to see what's going wrong.
This blog post might also help: https://developer.amazon.com/blogs/alexa/post/71ac4c05-9e33-41d2-abbf-472ba66126cb/3-tips-to-troubleshoot-your-custom-alexa-skill-s-back-end
I want to make an option to "Select Image From Gallery or Camera". I have tried many modules but they are only providing access to the gallery directly. I am using expo tool for creating a react native application. First I want a popup to open then then the user has to pick an option then the user is redirected according to that option. If you have any suggestion, please help me.
I´ve seen it done with React Native Image Picker, look for it in github:
https://github.com/react-community/react-native-image-picker
Add dependencies:
dependencies {
compile project(':react-native-image-picker')
}
Add permissions:
<uses-permission android:name="android.permission.CAMERA" />
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE"/>
Usage:
var ImagePicker = require('react-native-image-picker');
// More info on all the options is below in the README...just some common use cases shown here
var options = {
title: 'Select Avatar',
customButtons: [
{name: 'fb', title: 'Choose Photo from Facebook'},
],
storageOptions: {
skipBackup: true,
path: 'images'
}
};
/**
* The first arg is the options object for customization (it can also be null or omitted for default options),
* The second arg is the callback which sends object: response (more info below in README)
*/
ImagePicker.showImagePicker(options, (response) => {
console.log('Response = ', response);
if (response.didCancel) {
console.log('User cancelled image picker');
}
else if (response.error) {
console.log('ImagePicker Error: ', response.error);
}
else if (response.customButton) {
console.log('User tapped custom button: ', response.customButton);
}
else {
let source = { uri: response.uri };
// You can also display the image using data:
// let source = { uri: 'data:image/jpeg;base64,' + response.data };
this.setState({
avatarSource: source
});
}
});
If you would like to directly start just the camera or the gallery, use it like this:
// Launch Camera:
ImagePicker.launchCamera(options, (response) => {
// Same code as in above section!
});
// Open Image Library:
ImagePicker.launchImageLibrary(options, (response) => {
// Same code as in above section!
});
Hope it helps.
I have an external webpage that contains only the following:-
{"date":"25 December 2017"}
Using node.js, how can I get Alexa to read (and say) the date from the webpage.
You can use "http" or "https" package in Node to do this. JSON.parse(responsestring) could easily parse the content you have shown above.
Your external webpage link would replace "yourendpoint" in below code.
var http = require("http");
http.get(yourendpoint, function (response) {
// console.log("response:" + response);
// data is streamed in chunks from the server
// so we have to handle the "data" event
var buffer = "", data;
response.on("data", function (chunk) {
buffer += chunk;
});
response.on("end", function (err) {
if(err) {
speechOutput = "I am sorry, I could not get the data from webpage ."
} else {
console.log("response:" + buffer);
// Parse your response the way you want
speechOutput = "<<Your desired output>>"
}
}
this.emit(':tell', speechOutput);
});
});