Cordova camera plugin: Why aren't my CameraOptions being picked up? - angularjs

My code looks something like this (using angularjs):
angular.module('myApp').factory('photoService', function(...){
var camera = navigator.camera;
var cameraOpts = { destinationType: camera.DestinationType.DATA_URL };
cameraService.takePhoto = function(successCallback, failureCallback){
console.log(JSON.stringify(cameraOpts));
camera.getPicture(function(img){
console.log(img);
//handle success
}, function(e){
console.log(e);
//handle failure
}, cameraOptions);
}
});
My cameraOpts looks correctly populated, since its log shows {"destinationType":0} (i.e. destinationType != undefined, so it was correctly pulled from the camera object). My issue is that when I do console.log(img), I am still getting a file URI instead of the image as a base64 string. Why is this?

Related

record and save video to server w/RecordRTC

I am trying to record a video and then save it to the server.
My issue is the file is not being saved to the server and I am not sure why. The issue seems to be that it is not creating the blob or maybe not able to get the file as a blob?
I say that because in the Console I see this error
stopRecording failure TypeError: Failed to execute 'createObjectURL' on 'URL': Overload resolution failed.
Here is my file
<html>
<head>
<script src="https://cdn.webrtc-experiment.com/RecordRTC.js"></script>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
</head>
<body>
<button id="btn-start-recording">Start Recording</button>
<button id="btn-stop-recording" disabled="disabled">Stop Recording</button>
<button id="btn-save-recording" disabled="disabled">Stop& Save Recording</button>
<!--
2. Include a video element that will display the current video stream
and as well to show the recorded video at the end.
-->
<hr>
<video id="my-preview" controls autoplay></video>
<!-- 4. Initialize and prepare the video recorder logic -->
<script>
// Store a reference of the preview video element and a global reference to the recorder instance
var video = document.getElementById('my-preview');
var recorder;
// When the user clicks on start video recording
document.getElementById('btn-start-recording').addEventListener("click", function(){
// Disable start recording button
this.disabled = true;
// Request access to the media devices
navigator.mediaDevices.getUserMedia({
audio: true,
video: true
}).then(function(stream) {
// Display a live preview on the video element of the page
setSrcObject(stream, video);
// Start to display the preview on the video element
// and mute the video to disable the echo issue !
video.play();
video.muted = true;
// Initialize the recorder
recorder = new RecordRTCPromisesHandler(stream, {
mimeType: 'video/webm',
bitsPerSecond: 128000
});
// Start recording the video
recorder.startRecording().then(function() {
console.info('Recording video ...');
}).catch(function(error) {
console.error('Cannot start video recording: ', error);
});
// release stream on stopRecording
recorder.stream = stream;
// Enable stop recording button
document.getElementById('btn-stop-recording').disabled = false;
document.getElementById('btn-save-recording').disabled = false;
}).catch(function(error) {
console.error("Cannot access media devices: ", error);
});
}, false);
// When the user clicks on Stop video recording
document.getElementById('btn-stop-recording').addEventListener("click", function(){
this.disabled = true;
recorder.stopRecording().then(function() {
console.info('stopRecording success');
// Retrieve recorded video as blob and display in the preview element
var videoBlob = recorder.getBlob();
video.src = URL.createObjectURL(videoBlob);
video.play();
// Unmute video on preview
video.muted = false;
// Stop the device streaming
recorder.stream.stop();
// Enable record button again !
document.getElementById('btn-start-recording').disabled = false;
}).catch(function(error) {
console.error('stopRecording failure', error);
});
}, false);
//lets save the video
document.getElementById('btn-save-recording').addEventListener("click", function(){
// Retrieve recorded video as blob and display in the preview element
var videoBlob = recorder.getBlob();
video.src = URL.createObjectURL(videoBlob);
video.play();
// Unmute video on preview
video.muted = false;
// Stop the device streaming
recorder.stream.stop();
var formData = new FormData();
formData.append('video', player.recordedData.video);
// Execute the ajax request, in this case we have a very simple PHP script
// that accepts and save the uploaded "video" file
xhr('upload-videoclaim.php', formData, function (fName) {
console.log("Video succesfully uploaded !");
})
// Helper function to send
function xhr(url, data, callback) {
var request = new XMLHttpRequest();
request.onreadystatechange = function () {
if (request.readyState == 4 && request.status == 200) {
callback(location.href + request.responseText);
}
};
request.open('POST', url);
request.send(data);
}
});
</script>
</body>
</html>
See the MDN docs on parameters for URL.createObjectURL:
A File, Blob, or MediaSource object to create an object URL for.
Just try passing something else to URL.createObjectURL that it doesn't expect like a number. You'll get a similar / same error message.
Now see the implementation of RecordRTCPromisesHandler.getBlob(). It returns a Promise:
/**
* This method returns the recorded blob.
* #method
* #memberof RecordRTCPromisesHandler
* #example
* recorder.stopRecording().then(function() {
* recorder.getBlob().then(function(blob) {})
* }).catch(errorCB);
*/
this.getBlob = function() {
return new Promise(function(resolve, reject) {
try {
resolve(self.recordRTC.getBlob());
} catch (e) {
reject(e);
}
});
};
Look at the #example given in the JSDoc comment. It does recorder.getBlob().then(<callback>). Follow the example.
Option 1 (nested Promise.then()):
recorder.stopRecording().then(function() {
console.info('stopRecording success');
// do whatever else you want here.
recorder.getBlob().then(videoBlob => {
video.src = URL.createObjectURL(videoBlob);
video.play();
video.muted = false;
// ...
});
}).catch(function(error) {
// ...
Option 2 (chained Promise.then()):
recorder.stopRecording().then(() => recorder.getBlob()).then(videoBlob => {
video.src = URL.createObjectURL(videoBlob);
video.play();
video.muted = false;
// ...
}).catch(function(error) {
// ...
Option 3 (async-await):
try {
await recorder.stopRecording();
const videoBlob = await recorder.getBlob();
video.src = URL.createObjectURL(videoBlob);
video.play();
video.muted = false;
// ...
} catch(error) {
// ...
I'm not sure, but part of why you didn't understand the problem might have been due to limitations of intellisense when writing JS in HTML, and particularly when using a library without downloading the source locally so that it's visible to the intellisense facilities. I wouldn't really fault you for that. If you think it's appropriate, you could consider politely asking the maintainer of the library to add to their library's website's docuentation to also document the Promise-based interfaces of the library, or even writing up PRs to do that.

convert PDF data into octet-stream for printer autodetection

I have a printer that only accepts application/octet-stream over IPP. My program downloads binary PDF data and I need to convert that into application/octet-stream, which would (supposedly) let the printer decide what to print. However, when I send the data, it just prints binary data as text and not as formatted PDF. I'm using node with npm package 'ipp'.
I had a problem similar to that, in this link! I found a working example that, I modified a little like this to work (mixed some with the pdfkit! example, but shorted).
Here is my working version (node v16.17.0 | npm 8.15.0 | windows 11)
var ipp = require("ipp");
var concat = require("concat-stream");
var PDFDocument = require('pdfkit');
const doc = new PDFDocument();
// Pipe its output somewhere, like to a file or HTTP response
// Render some text
doc
.fontSize(25)
.text('Some text with an embedded font!', 100, 100);
// Add an image, constrain it to a given size, and center it vertically and horizontally
doc.image('./my-image.png', {
fit: [250, 300],
align: 'center',
valign: 'center'
});
doc.pipe(concat(function (data) {
//I used this url with a Brother printer, because the 631 port had some weird problem
var printer = ipp.Printer("http://<ip address>:80/ipp",{version:'2.0'});
var file = {
"operation-attributes-tag":{
"requesting-user-name": "User",
"job-name": "Print Job",
"document-format": "application/octet-stream"
},
data: data
};
printer.execute("Print-Job", file, function (err, res) {
//in case of error
console.log("Error: ",err);
console.log('res',res);
});
}));
//This last line is very important!
doc.end();
note that the version you have to check if your printer supports it
I checked that with this code: (I lost the link where I found this, so that is why there is not reference to it)
var ipp = require('ipp');
var uri = "http://<ip address>:80/ipp";
var data = ipp.serialize({
"operation":"Get-Printer-Attributes",
"operation-attributes-tag": {
"attributes-charset": "utf-8",
"attributes-natural-language": "en",
"printer-uri": uri
}
});
ipp.request(uri, data, function(err, res){
if(err){
return console.log(err);
}
console.log(JSON.stringify(res,null,2));
})

Retrieve details on Bus Line or Subway in Google Map API

I'm looking for doing something like this website : WEBSITE GMAPS
For the moment i can get everything around 500meters like if it's a bus stop or something else but i can't retrieve the Bus line.
var mapControl = $scope.map.control.getGMap();
service = new google.maps.places.PlacesService(mapControl);
var transitLayer = new google.maps.TransitLayer();
transitLayer.setMap(mapControl);
var directionsService = new google.maps.DirectionsService();
service.search(request, callback);
function callback(results, status) {
if (status == google.maps.places.PlacesServiceStatus.OK) {
$scope.placesTransit = [];
$scope.timeTotal = [];
angular.forEach(results, function(result, key) {
var timeTotal;
directionsService.route(
{
origin: new google.maps.LatLng($scope.map.center.latitude, $scope.map.center.longitude),
destination: result.geometry.location,
travelMode: google.maps.DirectionsTravelMode.WALKING,
unitSystem: google.maps.UnitSystem.METRIC
}, function (response, status)
{
if (status == google.maps.DirectionsStatus.OK)
{
result.timeTotal = response.routes[0].legs[0].duration.text;
if (result.types[0] === "subway_station") {
result.typeTransport = "Métro";
}
console.log(result);
$scope.placesTransit.push(result);
}
});
});
}
}
If someone can helps me how to do that :
Apparently i can retrieve this information on this page , but there's nothing to get this with Google MAP API v3 . Any Idea ? Thanks
Yes, Google has a place where you can get the information about public transportation.
First, you need to check if your city is supported:
https://maps.google.com/landing/transit/cities/
Then, if it is supported you can get more information here:
https://maps.google.com/landing/transit/index.html
To access it programatically, you can both the following APIs:
https://developers.google.com/transit/
https://developers.google.com/maps/documentation/directions/
I would go with the second one.
With this in mind, if the company you are trying to check is registered, you will be able to easily access all of its bus routes (and more).
If the company is not listed however, it means that they coded the path on the map that you see, and you must do the same using overlays:
https://developers.google.com/maps/documentation/javascript/examples/overlay-simple
Hope it helps!

Ionic Photo Capture & Crop

I have a basic ionic application, i'd like the app to take a photo of the user, and the user can then crop the taken photo to a passport sized photo.
Does anybody know how I can achieve such a thing? I've tried jrcrop but for the life of me I can't get it working.
For my Ionic app, I used a combination of ng-flow for the upload (on the flow-file-added, validate the file is OK (met file extension/upload requirements etc.)) then initiated an instance of ngCropper to perform the cropping. Once cropping is complete, initiated the flow.upload() on the flowjs object to perform the upload.
It's not possible to provide all my code to this solution, but the real stitching to make this happen occurs after cropping is:
First, retrieve the data URL of the cropped canvas, via a command like var dataUrl = this.$cropCanvas.cropper('getCroppedCanvas').toDataURL();
Create a blob from it, something like this JS function works well.
Remove the original queued upload file (the full image)
Replace it with the cropped blob
Upload.
The replace and upload technique looks like this:
var theBlob = that.dataURLToBlob(dataUrl);
theBlob.name = Utility.generateGuid() + '.jpg'; // give it a new name if you like
// Remove existing image which was added to flow files cache on image dialog select
$scope.flowTileObj.flow.removeFile($scope.flowTileObj.flow.files[0]);
$scope.flowTileObj.flow.addFile(theBlob);
// Perform upload
$scope.flowTileObj.flow.upload();
Best of luck.
U need to add this plugin
bower install --save ngCordova
cordova plugin add cordova-plugin-camera
cordova plugin add cordova-plugin-file
ionic platform add ios
And use this code
<img ng-repeat="image in images" ng-src="{{urlForImage(image)}}" height="200px"/>
$scope.addImage = function() {
var options = {
destinationType : Camera.DestinationType.FILE_URI,
sourceType : Camera.PictureSourceType.CAMERA, // Camera.PictureSourceType.PHOTOLIBRARY
allowEdit : false,
encodingType: Camera.EncodingType.JPEG,
popoverOptions: CameraPopoverOptions,
};
$cordovaCamera.getPicture(options).then(function(imageData) {
onImageSuccess(imageData);
function onImageSuccess(fileURI) {
createFileEntry(fileURI);
}
function createFileEntry(fileURI) {
window.resolveLocalFileSystemURL(fileURI, copyFile, fail);
}
function copyFile(fileEntry) {
var name = fileEntry.fullPath.substr(fileEntry.fullPath.lastIndexOf('/') + 1);
var newName = makeid() + name;
window.resolveLocalFileSystemURL(cordova.file.dataDirectory, function(fileSystem2) {
fileEntry.copyTo(
fileSystem2,
newName,
onCopySuccess,
fail
);
},
fail);
}
function onCopySuccess(entry) {
$scope.$apply(function () {
$scope.images.push(entry.nativeURL);
});
}
function fail(error) {
console.log("fail: " + error.code);
}
function makeid() {
var text = "";
var possible = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
for (var i=0; i < 5; i++) {
text += possible.charAt(Math.floor(Math.random() * possible.length));
}
return text;
}
}, function(err) {
console.log(err);
});
}

CasperJs not capturing screenshot for some sites

I have written some test case using CasperJS.
var baseUrl = "http://cng20018241:9090";
var require = patchRequire(require);
casper.options.viewportSize = {
width: 1024,
height: 800
}
casper.test.begin('Connecting to site', function suite(test) {
casper.start(baseUrl, function() {
this.wait(5000, function() {
this.echo("I've waited for 5 seconds.");
});
})
.then(function(){
this.captureSelector('c:\\temp\\screenshot.png', 'html')}
)
.run(function() {
casper.echo("TESTS COMPLETED", "GREEN_BAR");
casper.exit();
});
});
var partial = function(func /*, 0..n args */) {
var args = Array.prototype.slice.call(arguments, 1);
return function() {
var allArguments = args.concat(Array.prototype.slice.call(arguments));
return func.apply(this, allArguments);
};
};
The above code is capturing the screen shot for a site developed using meteor. But if I change the url to a site developed using ExtJs, casperjs only produce screenshot which is black. it does not capture the actual screen.
Both the sites are internal sites. The site developed using ExtJs (not working) uses windows authentication.
What options do I have now?
After registering various events, I am getting following error:
ResourceError: {
"errorCode": 5,
"errorString": "Operation canceled",
"id": 0,
"url": "http://cng20018241:9090/"
}

Resources