We'd like to pipe microphone directly to waton speech to text service , but it seems that we have to go through .wav first ? please take a look at the following codes, In particular I was trying to get the microphone streamed directly to the speechToText service. I believe this is the most common way of using mic, not piping it into a .wav and then stream the .wav file to stt:
var mic;
var SpeechToTextV1 = require('watson-developer-cloud/speech-to-text/v1');
var fs = require('fs');
var watson = require('watson-developer-cloud');
var cp = require('child_process');
mic = cp.spawn('arecord', ['--device=plughw:1,0', '--format=S16_LE', '--rate=44100', '--channels=1']); //, '--duration=10'
mic.stderr.pipe(process.stderr);
stt();
function stt() {
console.log("openCMDS");
var speech_to_text = new SpeechToTextV1({
username: '',
password: ''
});
var params = {
content_type: 'audio/wav',
model: 'zh-CN_BroadbandModel',
continuous: true,
inactivity_timeout: -1
};
recognizeStream = speech_to_text.createRecognizeStream(params);
mic.stdout.pipe(recognizeStream);
//mic.stdout.pipe(require('fs').createWriteStream('test.wav'));
// Pipe in the audio.
fs.createReadStream('test.wav').pipe(recognizeStream);
recognizeStream.pipe(fs.createWriteStream('transcription.txt'));
recognizeStream.setEncoding('utf8');
console.log("start record");
recognizeStream.on('data', function(event) { onEvent('Data:', event); });
recognizeStream.on('error', function(event) { onEvent('Error:', event); });
recognizeStream.on('close', function(event) { onEvent('Close:', event); });
// Display events on the console.
function onEvent(name, event) {
console.log(name, JSON.stringify(event, null, 2));
}
}
The Speech to Text service needs to know the format of the audio you are trying to send. 99% of the issues I've seen are because the service is expecting a different audio format than the one users are using.
'--format=S16_LE', '--rate=44100', '--channels=1'
That looks like a 44.1kHz PCM format.
In your code you are specifying:
content_type: 'audio/wav'
Take a look at the supported audio formats.
Maybe try with audio/l16; rate=44100;. You can also record the audio in a different format.
Finally, take a look at the javascript-speech-sdk. We have examples of how to stream the microphone from the browser.
Update
const mic = require('mic');
const SpeechToTextV1 = require('watson-developer-cloud/speech-to-text/v1');
const speechToText = new SpeechToTextV1({
username: 'YOUR USERNAME',
password: 'YOUR PASSWORD',
url: 'YOUR SERVICE URL',
version: 'v1'
});
// 1. Microphone settings
const micInstance = mic({
rate: 44100,
channels: 2,
debug: false,
exitOnSilence: 6
});
// 2. Service recognize settings
const recognizeStream = speechToText.createRecognizeStream({
content_type: 'audio/l16; rate=44100; channels=2',
model: 'zh-CN_BroadbandModel',
interim_results: true,
})
// 3. Start recording
const micInputStream = micInstance.getAudioStream();
micInstance.start();
console.log('Watson is listening, you may speak now.');
// 4. Pipe audio to service
const textStream = micInputStream.pipe(recognizeStream).setEncoding('utf8');
textStream.on('data', user_speech_text => console.log('Watson hears:', user_speech_text));
textStream.on('error', e => console.log(`error: ${e}`));
textStream.on('close', e => console.log(`close: ${e}`));
Related
I'm studying webRTC, with video chat project.
Now I'm really exhausted.. I don't know why it doesn't work.
my video stream that I can get works pretty well on the browser.
But, when I try to make a join it with new browser, there is no error messages.
and I can expect there are 'Two Video' view, but there is not. only one.
actually there are two video view(script), but peer's video view doesn't work.
I can find the peer's stream data on console.log(peerStream). It looks similar to myStream data.
but it doesn't work
here is the code
async function getMedia(deviceId) {
const initialConstraints = {
audio: true,
video: { facingMode: "user" },
};
const cameraConstraints = {
audio: true,
video: { deviceId: { exact: deviceId } },
};
try {
myStream = await navigator.mediaDevices.getUserMedia(
deviceId ? cameraConstraints : initialConstraints
);
// stream을 mute하는 것이 아니라 HTML video element를 mute한다.
console.log("myVideo : ", myStream, myVideo)
addVideoStream(myVideo.current, myStream);
//videoGrid.current.append(myVideo.current);
if (!deviceId) {
// mute default
myStream //
.getAudioTracks()
.forEach((track) => (track.enabled = false));
await getCameras();
}
} catch (error) {
console.log(error);
} }
function paintPeerFace(peerStream, id, remoteNickname) {
console.log("peerStream : ", peerStream, id, remoteNickname);
const peerVideo = document.createElement("video");
console.log("const peerVideo : ", peerVideo)
peerVideo.setAttribute("autoplay", "playsinline");
// peerVideo.autoplay = true;
// peerVideo.playsInline = true;
peerVideo.width = "400";
peerVideo.height = "400";
peerVideo.className = id;
console.log("const peerVideo : ", peerVideoTemp);
addVideoStream(peerVideoTemp.current, peerStream);
videoGrid.current.append(peerVideo);
setUsers(videoGrid.current.childElementCount);
//sortStreams();
}
and git storage address :
https://github.com/jsw4215/webRTC_prac.git
server : Express
client : React
with Readme, you can see it on the browser.
Thank you for your help! and I really appreciate it!
please help me!!
this is the last post which answer where my goal is in developing my project: RecordRTC with sending video chunks to server and record as webm or mp4 on server side. After recording the screen sharing stream, I decide to move forward to add video tracks to screen sharing. There are two ways for me to do it, by using addTracks function or create a new stream with contain the video from the screen sharing and audio from my media. However, both of them resulting me in the previous error in the aforementioned link: corrupted video.
FYI: Here is the link for anyone who wants to read more about Media Recorder: https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder
P/S: If you encourage me on switching to webRTC again, I would be appreciated if you could help me in solving the issue - the file ends up corrupted when using webRTC - in aforementioned link?
Here is the code from my backend and frontend:
Client code:
startMedia = ()=>{
this.setState({mediaState:"pending"})
navigator.mediaDevices.getDisplayMedia({video: true}).then( async (screenSharingStream)=>{
console.log(MediaRecorder.isTypeSupported('video/webm; codecs=vp8,opus'))
const socketIO = io.connect(baseURL,{query: {candidateID: this.candidateID,roundTest:this.roundTest }})
const mediaStream = await navigator.mediaDevices.getUserMedia({video:true,audio:true}).catch(e => {throw e});
const mediaReCorderoptions = {
videoBitsPerSecond : 128000,
audioBitsPerSecond:128000,
mimeType : 'video/webm; codecs=vp8,opus'
}
const [videoTrack] = screenSharingStream.getVideoTracks();
const [audioTrack] = mediaStream.getAudioTracks();
if (audioTrack && videoTrack)
videoTrack.addTrack(audioTrack)
const stream = new MediaStream([videoTrack, audioTrack]);
this.socketRef.current = socketIO;
this.mediaStream = mediaStream
this.screenSharingStream = stream
this.candidateVideoRef.current.srcObject = this.mediaStream;
this.mediaRecorder = new MediaRecorder(this.screenSharingStream,mediaReCorderoptions)
this.mediaRecorder.ondataavailable = function(event){
if (event && event.data.size>0){
const reader = new FileReader();
reader.onload = function(){
const dataURL = reader.result;
console.log('van chay')
const base64EncodedData = dataURL.split(',')[1];
//console.log(buffer)
socketIO.emit('SEND BLOB',base64EncodedData)
}
reader.readAsDataURL(event.data)
}
}
this.mediaRecorder.start(1000)
this.setState({mediaState:this.mediaRecorder.state})
}).catch(err=>{
console.log(err.name)
switch(err.name){
case 'NotAllowedError':
message.error('Candidate does not allow!!')
this.setState({mediaState:"Aborting"})
break;
default:
message.error('System Error. Please contact us!')
this.setState({mediaState:"Aborting"})
break;
}
})
}
stopMedia = () =>{
if (this.mediaStream){
this.mediaStream.getTracks().forEach((track)=>{
if (track.readyState==='live') {
track.stop()
this.candidateVideoRef.current.style.display='none';
}})
}
if (this.screenSharingStream) {
this.mediaRecorder.stop()
this.setState({mediaState:this.mediaRecorder.state})
}
}
Server code:
socket.on("SEND BLOB",chunk=>{
try {
//if (chunk instanceof Buffer){
const fileExtension = '.webm'
const dataBuffer = new Buffer(chunk, 'base64');
const fileStream = fs.createWriteStream(path.join(__dirname,'./videos/candidate/',candidateID + '-' + roundTest + fileExtension), {flags: 'a'});
fileStream.write(dataBuffer);
}
catch(e){
console.log(e)
}
})
I try currently to recording data in my database. I use base64 npm module to translate my blob in base64 standards then store it into the database.
But when I sent my records to my database, the Get/* methods returns me exactly the same raw text for each records. Note that I have made the records on differents session, not in the same sent-stream :
Here the return of my database :
[{"_id":"5b09c5a9db6839382475442b","sound":{"type":"Buffer","data":[87,50,57,105,97,109,86,106,100,67,66,67,98,71,57,105,88,81,61,61]},"__v":0},{"_id":"5b09c5b7db6839382475442c","sound":{"type":"Buffer","data":[87,50,57,105,97,109,86,106,100,67,66,67,98,71,57,105,88,81,61,61]},"__v":0},{"_id":"5b09c5c4db6839382475442d","sound":{"type":"Buffer","data":[87,50,57,105,97,109,86,106,100,67,66,67,98,71,57,105,88,81,61,61]},"__v":0},{"_id":"5b09c69bdb6839382475442e","sound":{"type":"Buffer","data":[87,50,57,105,97,109,86,106,100,67,66,67,98,71,57,105,88,81,61,61]},"__v":0},
(... and so on)
I wonder what can produce this behavior.
I try currently to directly retrieve the data URL and retrieve the binary text directly from it as - pseudo code :
blob:http://localhost:[path]
-> then retrieve data in this path
-> then store in database data retrieved
Meanwhile, here my App.js :
class RecordingAPI extends React.Component {
constructor(props) {
super(props);
this.deleteAudio = this.deleteAudio.bind(this);
this.handleSubmit=this.handleSubmit.bind(this);
this.state = {
recording: false,
audios: [],
blob : {}
};
}
handleSubmit(e){
e.preventDefault();
Axios.post("/api/words",{
"sound":this.state.blob
})
//.then((res) => res.json())
.then((data) => console.log(data))
//pass submitted value to true in order to declench allDelete function
alert("Message sent, congratulation =)")
//this.state.deleteAll();
this.deleteAll();
}
async componentDidMount() {
const stream = await navigator.mediaDevices.getUserMedia({audio: true});
// show it to user
this.audio.src = window.URL.createObjectURL(stream);
this.audio.play();
// init recording
this.mediaRecorder = new MediaRecorder(stream);
// init data storage for video chunks
this.chunks = [];
// listen for data from media recorder
this.deleteAll=this.deleteAll.bind(this) ;
this.mediaRecorder.ondataavailable = e => {
if (e.data && e.data.size > 0) {
this.chunks.push(e.data);
}
};
}
startRecording(e) {
e.preventDefault();
// wipe old data chunks
this.chunks = [];
// start recorder with 10ms buffer
this.mediaRecorder.start(10);
// say that we're recording
this.setState({recording: true});
}
stopRecording(e) {
e.preventDefault();
// stop the recorder
this.mediaRecorder.stop();
// say that we're not recording
this.setState({recording: false});
// save the video to memory
this.saveAudio();
}
saveAudio() {
// convert saved chunks to blob
const blob = new Blob(this.chunks, {type: audioType});
// generate video url from blob
const audioURL = window.URL.createObjectURL(blob);
console.log(audioURL);
// append videoURL to list of saved videos for rendering
const audios = this.state.audios.concat([audioURL]);
this.setState({audios});
var blob64 = Base64.encode(blob);
this.setState({blob : blob64})
}
Thanks.
I'm trying to encrypt a video using the CryptoJS library. The goal is to encrypt it and store it in Firebase Storage. Later, when you want to visualize, it is decrypted and added as an url to the HTML video tag. I can't get the video displayed after decrypting it, any idea of what the problem is? I have the feeling that the problem is with treating the output of the cipher as a string. I don't know in what way I should treat it so that it continues to maintain the properties of a video file. Thanks in advance.
The current code is:
//Encrypt and upload function
function almacenarFicheroGrabacionVideo(file) {
let storageRef = firebase.storage().ref('videos/' + file.name);
let reader = new FileReader();
reader.onload = function () {
let read = reader.result;
let task = storageRef.putString(CryptoJS.AES.encrypt(read, getCookie('key')).toString());
task.on('state_changed', function progress(snapshot) {
}, function error(err) {
console.log(err);
}, function complete() {
console.log("fichero subido");
});
};
reader.readAsText(file);
}
//Decrypt and visualize video
$scope.verVideo = function() {
firebase.storage().ref('videos/').child('Blurred Bokeh Video 2.mp4').getDownloadURL().then(function (url) {
fetch(url)
.then(res => res.blob()) // Gets the response and returns it as a blob
.then(blob => {
let reader = new FileReader();
reader.onload = function () {
let fileDown = CryptoJS.AES.decrypt(reader.result, getCookie('clave')).toString(CryptoJS.enc.Utf8);
var videoNode = document.getElementsByTagName('video')[0];
let blob = new Blob([fileDown], {type: "video/mp4"});
let url = URL.createObjectURL(blob);
let element = document.createElement('a');
element.setAttribute('href', url);
element.setAttribute('download', 'Blurred Bokeh Video 2.mp4');
element.style.display = 'none';
document.body.appendChild(element);
element.click();
document.body.removeChild(element);
};
reader.readAsText(blob);
});
}).catch(function (error) {
// Handle any errors
console.log(error);
});
};
I am making use of the pdfmake library for generating PDF documents in my node express application and want these to be sent straight back to the client to trigger the browser to automatically download the file.
As a reference point I have been using the following examples for my express middleware:
https://gist.github.com/w33ble/38c5e0220d491148de1c
https://github.com/bpampuch/pdfmake/issues/489
I have opted for sending a buffered response back, so the key part of my middleware looks like this:
function createPDFDocument(docDefinition, callback) {
var fontDescriptors = {
Roboto: {
normal: './src/server/fonts/Roboto-Regular.ttf',
bold: './src/server/fonts/Roboto-Medium.ttf',
italics: './src/server/fonts/Roboto-Italic.ttf',
bolditalics: './src/server/fonts/Roboto-MediumItalic.ttf'
}
};
var printer = new Printer(fontDescriptors);
var pdfDoc = printer.createPdfKitDocument(docDefinition);
// buffer the output
var chunks = [];
pdfDoc.on('data', function(chunk) {
chunks.push(chunk);
});
pdfDoc.on('end', function() {
var result = Buffer.concat(chunks);
callback(result);
});
pdfDoc.on('error', callback);
// close the stream
pdfDoc.end();
}
In my angular application I am using the $resource service and have an endpoint defined like so:
this.resource = $resource('api/document-requests/',
null,
<any>{
'save': {
method: 'POST',
responseType: 'arraybuffer'
}
});
When I try this out, I dont get any browser download kicking in, the response I receive is as follows when looking in Chrome:
And the response headers are as follows:
So it seems I'm not a million miles off, I have searched around and found solutions mentioning about converting to Blob, but I think that's only relevant if I were serving back a Base64 encoded string of the document.
Can anyone suggest what may be my issue here?
Thanks
Here's a router:
router.get('/get-pdf-doc', async (req, res, next)=>{ try {
var binaryResult = await createPdf();
res.contentType('application/pdf').send(binaryResult);
} catch(err){
saveError(err);
res.send('<h2>There was an error displaying the PDF document.
'</h2>Error message: ' + err.message);
}});
And here's a function to return the pdf.
const PdfPrinter = require('pdfmake');
const Promise = require("bluebird");
createPdf = async ()=>{
var fonts = {
Helvetica: {
normal: 'Helvetica',
bold: 'Helvetica-Bold',
italics: 'Helvetica-Oblique',
bolditalics: 'Helvetica-BoldOblique'
};
var printer = new PdfPrinter(fonts);
var docDefinition = {
content: [
'First paragraph',
'Another paragraph, this time a little bit longer to make sure,'+
' this line will be divided into at least two lines'
],
defaultStyle: {
font: 'Helvetica'
}
};
var pdfDoc = printer.createPdfKitDocument(docDefinition);
return new Promise((resolve, reject) =>{ try {
var chunks = [];
pdfDoc.on('data', chunk => chunks.push(chunk));
pdfDoc.on('end', () => resolve(Buffer.concat(chunks)));
pdfDoc.end();
} catch(err) {
reject(err);
}});
};
Everything seems fine to me, the only thing missing is the logic to trigger the download.
Check out this CodePen as an example.
Here I'm using base64 encoded data, but you can just use binary data as well, just don't forget to change the href, where I'm mentioning scope.dataURL = base64....
I had issue serving PDF files from Node.js as well, so I made use of phantomjs. You can checkout this repository for full codebase and implementation.
console.log('Loading web page')
const page = require('webpage').create()
const args = require('system').args
const url = 'www.google.com'
page.viewportSize = { width: 1024, height: 768 }
page.clipRect = { top: 0, left: 0 }
page.open(url, function(status) {
console.log('Page loaded')
setTimeout(function() {
page.render('docs/' + args[1] + '.pdf')
console.log('Page rendered')
phantom.exit()
}, 10000)
})