How can I solve the html2canvas image cross origin issue? - reactjs

I'm attempting to convert a DOM element to a PNG image. Everything works OK except for the cross origin images, which appear as empty spaces in the image. I tried adding extra props of useCors: true, but that doesn't work.
Here's the code:
const exportAsPicture = () => {
var html = document.getElementsByTagName("HTML")[0];
var body = document.getElementsByTagName("BODY")[0];
var htmlWidth = html.clientWidth;
var bodyWidth = body.clientWidth;
var data = document.getElementById(`${idea.slug}`);
var newWidth = data.scrollWidth - data.clientWidth;
if (newWidth > data.clientWidth) {
htmlWidth += newWidth;
bodyWidth += newWidth;
}
html.style.width = htmlWidth + "px";
body.style.width = bodyWidth + "px";
data.style.boxShadow = "none";
console.log(data);
html2canvas(data, {
logging: true,
letterRendering: 1,
proxy: "https://notepd.s3.amazonaws.com/",
useCORS: true,
})
.then((canvas) => {
var image = canvas.toDataURL("image/png", 0.1);
data.style.boxShadow = "0px 4px 4px rgba(0, 0, 0, 0.08)";
return image;
})
.then((image) => {
saveAs(image, `NotePD | ${idea.title}.png`);
html.style.width = null;
body.style.width = null;
});
};
const saveAs = (blob, fileName) => {
var elem = window.document.createElement("a");
elem.href = blob;
elem.download = fileName;
elem.style = "display:none;";
(document.body || document.documentElement).appendChild(elem);
if (typeof elem.click === "function") {
elem.click();
} else {
elem.target = "_blank";
elem.dispatchEvent(
new MouseEvent("click", {
view: window,
bubbles: true,
cancelable: true,
})
);
}
URL.revokeObjectURL(elem.href);
elem.remove();
};
The picture is coming from the s3Bucket the same origin images renders well

Just add crossOrigin="anonymous" to your image it will solve the issue

Related

I wish I could loop this array of videos and images, how can I do?

In this demo http://jsfiddle.net/mdz82oLn/ I have inserted videos (IDs) and images (png, jpeg, etc) which are also displayed by means of customized buttons. The demo works perfectly in the vision but reached the end of the last video it returns in loop when instead it should restart from the first video in the list, how to integrate this function?
var tag = document.createElement('script');
tag.src = "https://www.youtube.com/iframe_api";
var firstScriptTag = document.getElementsByTagName('script')[0];
firstScriptTag.parentNode.insertBefore(tag, firstScriptTag);
const playerElement = document.querySelector('#player');
const imageElement = document.querySelector('#slide');
const videos = {
'RGpr3Y6Q-1M': 'http://nothingbutgeek.com/wp-content/uploads/2018/06/automata_16x9.png',
'btxdcqLOGuc': 'https://live.staticflickr.com/2400/2078946248_d063d5a563_b.jpg',
'CIx0a1vcYPc': 'https://i.ytimg.com/vi/CIx0a1vcYPc/maxresdefault.jpg',
};
const videoIds = Object.keys(videos);
function onYouTubeIframeAPIReady() {
function onPlayerReady({ target }) {
var playButton = document.getElementById("play-button");
playButton.addEventListener("click", function() {
target.playVideo();
});
var pauseButton = document.getElementById("pause-button");
pauseButton.addEventListener("click", function() {
target.pauseVideo();
});
var next = document.getElementById("next");
next.addEventListener("click", function() {
target.nextVideo();
});
var pre = document.getElementById("previous");
pre.addEventListener("click", function() {
target.previousVideo();
});
target.loadPlaylist({
playlist: videoIds
});
}
function onPlayerStateChange({ data, target }) {
switch(data) {
case YT.PlayerState.ENDED:
target.nextVideo();
break;
case YT.PlayerState.BUFFERING:
const playlist = target.getPlaylist();
const playlistIndex = target.getPlaylistIndex();
const currentId = playlist[playlistIndex];
const image = videos[currentId];
if (imageElement.src !== image) {
imageElement.src = image;
}
break;
}
}
const player = new YT.Player(playerElement, {
height: '405',
width: '720',
playerVars: {
controls: 1,
},
events: {
'onReady': onPlayerReady,
'onStateChange': onPlayerStateChange
}
});
}
If I understand correctly you are stuck on last video and want to start the loop from 1 video:
In that case a change like this should work:
var next = document.getElementById("next");
next.addEventListener("click", function() {
target.nextVideo();
if(target.getPlaylistIndex() == videoIds.length -1){
target.loadPlaylist({
playlist: videoIds
});
}
});
Updated your Example
you can also update the switch case like this:
case YT.PlayerState.ENDED: {
if (target.getPlaylistIndex() == videoIds.length - 1) {
target.loadPlaylist({
playlist: videoIds
});
}
break;
}

video recording issue in reactjs

I have implemented video recording functionality in reactjs by using MediaDevices.getUserMedia() but audio quality is not good, System is creating lots of annoying voice from a background that is very irritable.
link:https://devionashell.azurewebsites.net/uco
startVideos = async () => {
stopCountdown = false;
this.setState({
open: true
});
var constraints = {
audio: {
sampleRate: 44800,
channelCount: 2,
volume: 0.2,
autoGainControl: false,
echoCancellation: false,
noiseSuppression: false,
googleAutoGainControl: false,
sampleSize: 16
},
video: {
facingMode: "environment"
}
}
navigator.mediaDevices.getUserMedia = navigator.mediaDevices.getUserMedia ||
navigator.mediaDevices.webkitGetUserMedia ||
navigator.mediaDevices.mozGetUserMedia;
if (navigator.mediaDevices.getUserMedia) {
const stream = await navigator.mediaDevices.getUserMedia(constraints);
// show it to user
vid = stream;
this.video.srcObject = stream;
this.video.volume = 0.1;
// var vid_volume = document.getElementById("myVideo");
console.log('vid_volume')
// console.log(vid_volume)
console.log('vid_volume')
//this.video.volumeObject = 0.2;
this.video.play();
// init recording
this.mediaRecorder = new MediaRecorder(stream, {
mimeType: videoType,
});
// init data storage for video chunks
this.chunks = [];
// listen for data from media recorder
this.mediaRecorder.ondataavailable = e => {
if (e.data && e.data.size > 0) {
this.chunks.push(e.data);
}
};
this.render();
} else {
console.log("getUserMedia not supported");
}
}

Camera streaming service works only with localhost but not with IP address

I have implemented a service which streams camera output on html5. But it works only if I use localhost:8080 localhost if I use IP address or machine name then it does not even detect the camera.
/*global logger*/
/*
VisualInspection
========================
#file : VisualInspection.js
#version : 1.0.0
#author :
#date : 7/28/2019
#copyright :
#license : Apache 2
Documentation
========================
Describe your widget here.
*/
// Required module list. Remove unnecessary modules, you can always get them back from the boilerplate.
define([
"dojo/_base/declare",
"mxui/widget/_WidgetBase",
"dijit/_TemplatedMixin",
"mxui/dom",
"dojo/dom",
"dojo/dom-prop",
"dojo/dom-geometry",
"dojo/dom-class",
"dojo/dom-style",
"dojo/dom-construct",
"dojo/_base/array",
"dojo/_base/lang",
"dojo/text",
"dojo/html",
"dojo/_base/event",
"VisualInspection/lib/jquery-1.11.2",
"dojo/text!VisualInspection/widget/template/VisualInspection.html",
"VisualInspection/widget/template/tf.min",
// "dojo/text!VisualInspection/widget/template/labels.json",
// "dojo/text!VisualInspection/widget/template/model.json"
], function (declare, _WidgetBase, _TemplatedMixin, dom, dojoDom, dojoProp, dojoGeometry, dojoClass, dojoStyle, dojoConstruct, dojoArray, lang, dojoText, dojoHtml, dojoEvent, _jQuery, widgetTemplate, tf) {
"use strict";
var $ = _jQuery.noConflict(true);
var LABELS_URL = "http://pni6w2465:7777/EasyPlan/model_web/labels.json"
var MODEL_JSON = "http://pni6w2465:7777/EasyPlan/model_web/model.json"
// var tf = require(['../../VisualInspection/node_modules/#tensorflow/tfjs']);
//////////////
const TFWrapper = model => {
const calculateMaxScores = (scores, numBoxes, numClasses) => {
const maxes = []
const classes = []
for (let i = 0; i < numBoxes; i++) {
let max = Number.MIN_VALUE
let index = -1
for (let j = 0; j < numClasses; j++) {
if (scores[i * numClasses + j] > max) {
max = scores[i * numClasses + j]
index = j
}
}
maxes[i] = max
classes[i] = index
}
return [maxes, classes]
}
const buildDetectedObjects = (
width,
height,
boxes,
scores,
indexes,
classes
) => {
const count = indexes.length
const objects = []
for (let i = 0; i < count; i++) {
const bbox = []
for (let j = 0; j < 4; j++) {
bbox[j] = boxes[indexes[i] * 4 + j]
}
const minY = bbox[0] * height
const minX = bbox[1] * width
const maxY = bbox[2] * height
const maxX = bbox[3] * width
bbox[0] = minX
bbox[1] = minY
bbox[2] = maxX - minX
bbox[3] = maxY - minY
objects.push({
bbox: bbox,
class: classes[indexes[i]],
score: scores[indexes[i]]
})
}
return objects
}
var img = null;
const detect = input => {
const batched = tf.tidy(() => {
const img = tf.browser.fromPixels(input)
// Reshape to a single-element batch so we can pass it to executeAsync.
// var img = null;
// //sid
// var canvas = document.querySelector("#canvasElement");
// if (canvas.getContext) {
// var ctx = canvas.getContext("2d");
// img = canvas.toDataURL("image/png");
// }
return img.expandDims(0)
})
const height = batched.shape[1]
const width = batched.shape[2]
// const height = img.height
// const width = img.width
return model.executeAsync(batched).then(result => {
const scores = result[0].dataSync()
const boxes = result[1].dataSync()
// clean the webgl tensors
batched.dispose()
tf.dispose(result)
const [maxScores, classes] = calculateMaxScores(
scores,
result[0].shape[1],
result[0].shape[2]
)
const prevBackend = tf.getBackend()
// run post process in cpu
tf.setBackend('cpu')
const indexTensor = tf.tidy(() => {
const boxes2 = tf.tensor2d(boxes, [
result[1].shape[1],
result[1].shape[3]
])
return tf.image.nonMaxSuppression(
boxes2,
maxScores,
20, // maxNumBoxes
0.5, // iou_threshold
0.5 // score_threshold
)
})
const indexes = indexTensor.dataSync()
indexTensor.dispose()
// restore previous backend
tf.setBackend(prevBackend)
return buildDetectedObjects(
width,
height,
boxes,
maxScores,
indexes,
classes
)
})
}
return {
detect: detect
}
}
//////////////////////
// Declare widget's prototype.
return declare("VisualInspection.widget.VisualInspection", [_WidgetBase, _TemplatedMixin], {
// _TemplatedMixin will create our dom node using this HTML template.
templateString: widgetTemplate,
// DOM elements
inputNodes: null,
colorSelectNode: null,
colorInputNode: null,
infoTextNode: null,
// Parameters configured in the Modeler.
mfToExecute: "",
messageString: "",
backgroundColor: "",
// Internal variables. Non-primitives created in the prototype are shared between all widget instances.
_handles: null,
_contextObj: null,
_alertDiv: null,
_readOnly: false,
// dojo.declare.constructor is called to construct the widget instance. Implement to initialize non-primitive properties.
constructor: function () {
logger.debug(this.id + ".constructor");
this._handles = [];
},
// dijit._WidgetBase.postCreate is called after constructing the widget. Implement to do extra setup work.
postCreate: function () {
logger.debug(this.id + ".postCreate");
if (this.readOnly || this.get("disabled") || this.readonly) {
this._readOnly = true;
}
this._updateRendering();
this._setupEvents();
var video = document.querySelector("#videoElement");
var canvas = document.querySelector("#canvasElement");
// if (navigator.mediaDevices.getUserMedia) {
// navigator.mediaDevices.getUserMedia({ video: true })
// .then(function (stream) {
// video.srcObject = stream;
// })
// .catch(function (err0r) {
// console.log("Something went wrong!");
// });
// }
this.componentDidMount();
},
////////////////////////////////////////////////////////
componentDidMount: function () {
var video = document.querySelector("#videoElement");
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
const webCamPromise = navigator.mediaDevices
.getUserMedia({
audio: false,
video: {
facingMode: 'user'
}
})
.then(stream => {
window.stream = stream
video.srcObject = stream
return new Promise((resolve, _) => {
video.onloadedmetadata = () => {
resolve()
}
})
})
const modelPromise = tf.loadGraphModel(MODEL_JSON)
const labelsPromise = fetch(LABELS_URL).then(data => data.json())
Promise.all([modelPromise, labelsPromise, webCamPromise])
.then(values => {
const [model, labels] = values
this.detectFrame(video, model, labels)
})
.catch(error => {
console.error(error)
})
}
},
detectFrame: function (video, model, labels) {
TFWrapper(model)
.detect(video)
.then(predictions => {
this.renderPredictions(predictions, labels)
requestAnimationFrame(() => {
this.detectFrame(video, model, labels)
})
})
},
renderPredictions: function (predictions, labels) {
var canvas = document.querySelector("#canvasElement");
const ctx = canvas.getContext('2d')
ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height)
// Font options.
const font = '16px sans-serif'
ctx.font = font
ctx.textBaseline = 'top'
predictions.forEach(prediction => {
const x = prediction.bbox[0]
const y = prediction.bbox[1]
const width = prediction.bbox[2]
const height = prediction.bbox[3]
const label = labels[parseInt(prediction.class)]
// Draw the bounding box.
ctx.strokeStyle = '#00FFFF'
ctx.lineWidth = 4
ctx.strokeRect(x, y, width, height)
// Draw the label background.
ctx.fillStyle = '#00FFFF'
const textWidth = ctx.measureText(label).width
const textHeight = parseInt(font, 10) // base 10
ctx.fillRect(x, y, textWidth + 4, textHeight + 4)
})
predictions.forEach(prediction => {
const x = prediction.bbox[0]
const y = prediction.bbox[1]
const label = labels[parseInt(prediction.class)]
// Draw the text last to ensure it's on top.
ctx.fillStyle = '#000000'
ctx.fillText(label, x, y)
})
},
///////////////////////////////////////////////////////////
// mxui.widget._WidgetBase.update is called when context is changed or initialized. Implement to re-render and / or fetch data.
update: function (obj, callback) {
logger.debug(this.id + ".update");
this._contextObj = obj;
this._resetSubscriptions();
this._updateRendering(callback); // We're passing the callback to updateRendering to be called after DOM-manipulation
},
// mxui.widget._WidgetBase.enable is called when the widget should enable editing. Implement to enable editing if widget is input widget.
enable: function () {
logger.debug(this.id + ".enable");
},
// mxui.widget._WidgetBase.enable is called when the widget should disable editing. Implement to disable editing if widget is input widget.
disable: function () {
logger.debug(this.id + ".disable");
},
// mxui.widget._WidgetBase.resize is called when the page's layout is recalculated. Implement to do sizing calculations. Prefer using CSS instead.
resize: function (box) {
logger.debug(this.id + ".resize");
},
// mxui.widget._WidgetBase.uninitialize is called when the widget is destroyed. Implement to do special tear-down work.
uninitialize: function () {
logger.debug(this.id + ".uninitialize");
// Clean up listeners, helper objects, etc. There is no need to remove listeners added with this.connect / this.subscribe / this.own.
},
// We want to stop events on a mobile device
_stopBubblingEventOnMobile: function (e) {
logger.debug(this.id + "._stopBubblingEventOnMobile");
if (typeof document.ontouchstart !== "undefined") {
dojoEvent.stop(e);
}
},
// Attach events to HTML dom elements
_setupEvents: function () {
logger.debug(this.id + "._setupEvents");
this.connect(this.colorSelectNode, "change", function (e) {
// Function from mendix object to set an attribute.
this._contextObj.set(this.backgroundColor, this.colorSelectNode.value);
});
this.connect(this.infoTextNode, "click", function (e) {
// Only on mobile stop event bubbling!
this._stopBubblingEventOnMobile(e);
// If a microflow has been set execute the microflow on a click.
if (this.mfToExecute !== "") {
this._execMf(this.mfToExecute, this._contextObj.getGuid());
}
});
},
_execMf: function (mf, guid, cb) {
logger.debug(this.id + "._execMf");
if (mf && guid) {
mx.ui.action(mf, {
params: {
applyto: "selection",
guids: [guid]
},
callback: lang.hitch(this, function (objs) {
if (cb && typeof cb === "function") {
cb(objs);
}
}),
error: function (error) {
console.debug(error.description);
}
}, this);
}
},
// Rerender the interface.
_updateRendering: function (callback) {
logger.debug(this.id + "._updateRendering");
// Important to clear all validations!
this._clearValidations();
// The callback, coming from update, needs to be executed, to let the page know it finished rendering
this._executeCallback(callback, "_updateRendering");
},
// Handle validations.
_handleValidation: function (validations) {
logger.debug(this.id + "._handleValidation");
this._clearValidations();
var validation = validations[0],
message = validation.getReasonByAttribute(this.backgroundColor);
if (this._readOnly) {
validation.removeAttribute(this.backgroundColor);
} else if (message) {
this._addValidation(message);
validation.removeAttribute(this.backgroundColor);
}
},
// Clear validations.
_clearValidations: function () {
logger.debug(this.id + "._clearValidations");
dojoConstruct.destroy(this._alertDiv);
this._alertDiv = null;
},
// Show an error message.
_showError: function (message) {
logger.debug(this.id + "._showError");
if (this._alertDiv !== null) {
dojoHtml.set(this._alertDiv, message);
return true;
}
this._alertDiv = dojoConstruct.create("div", {
"class": "alert alert-danger",
"innerHTML": message
});
dojoConstruct.place(this._alertDiv, this.domNode);
},
// Add a validation.
_addValidation: function (message) {
logger.debug(this.id + "._addValidation");
this._showError(message);
},
// Reset subscriptions.
_resetSubscriptions: function () {
logger.debug(this.id + "._resetSubscriptions");
// Release handles on previous object, if any.
this.unsubscribeAll();
// When a mendix object exists create subscribtions.
if (this._contextObj) {
this.subscribe({
guid: this._contextObj.getGuid(),
callback: lang.hitch(this, function (guid) {
this._updateRendering();
})
});
this.subscribe({
guid: this._contextObj.getGuid(),
attr: this.backgroundColor,
callback: lang.hitch(this, function (guid, attr, attrValue) {
this._updateRendering();
})
});
this.subscribe({
guid: this._contextObj.getGuid(),
val: true,
callback: lang.hitch(this, this._handleValidation)
});
}
},
_executeCallback: function (cb, from) {
logger.debug(this.id + "._executeCallback" + (from ? " from " + from : ""));
if (cb && typeof cb === "function") {
cb();
}
}
});
});
require(["VisualInspection/widget/VisualInspection"]);
<div id="container">
<video autoplay="true" playsInline="true" width="600" height="500" id="videoElement" style="position: fixed;" >
</video>
<canvas id= "canvasElement" width="600" height="500" style="position: absolute;">
</canvas>
</div>
From Above code, When I run using localhost this code will execute :
<!-- begin snippet: js hide: false console: true babel: false -->
When I run using machine name or IP name :
This code is not considered in developer tool of chrome due to security or some reason

migrate angular1 directive to angular4

i have a directive wriiten in angular 1.which supports drag and drop of images into the web app.
below is the code:
'use strict';
angular.module('abc').directive("imageFileRead", ['$document', '$q', '$window', function ($document, $q, $window) {
var URL = $window.webkitURL || $window.URL;
//allowed extensions
var fileExtension = ['image/jpeg', 'image/jpg', 'image/png', 'image/gif', 'image/bmp'];
var isFileTypeAllowed = function (uploadedFile) {
try{
return $.inArray(uploadedFile.type, fileExtension) == -1 ? false : true;
}
catch(Ex){
}
};
var getResizeArea = function () {
var resizeArea = document.createElement('canvas');
resizeArea.id = 'result_image';
resizeArea.style.visibility = 'hidden';
document.body.appendChild(resizeArea);
return resizeArea;
};
var resizeImage = function (origImage, options) {
var maxHeight = options.resizeMaxHeight;
var maxWidth = options.resizeMaxWidth;
var quality = options.resizeQuality;
var type = options.resizeType;
var canvas = getResizeArea();
var height = origImage.height;
var width = origImage.width;
// calculate the width and height, constraining the proportions
if (width > height) {
if (width > maxWidth) {
height = Math.round(height *= maxWidth / width);
width = maxWidth;
}
} else {
if (height > maxHeight) {
width = Math.round(width *= maxHeight / height);
height = maxHeight;
}
}
canvas.width = width;
canvas.height = height;
//draw image on canvas
var ctx = canvas.getContext("2d");
ctx.drawImage(origImage, 0, 0, width, height);
// get the data from canvas as 70% jpg (or specified type).
return canvas.toDataURL(type, quality);
};
var createImage = function (url, callback) {
var image = new Image();
image.onload = function () {
callback(image);
};
image.src = url;
};
var fileToDataURL = function (file) {
var deferred = $q.defer();
var reader = new FileReader();
reader.onload = function (e) {
deferred.resolve(e.target.result);
};
reader.readAsDataURL(file);
return deferred.promise;
};
return {
restrict: 'A',
scope: {
resizeMaxHeight: '#?',
resizeMaxWidth: '#?',
resizeQuality: '#?',
resizeType: '#?',
whenToCompress: '#?',
onImageDropCtrlFn: '&onImageDrop'
},
link: function (scope, element, attrs, ctrl) {
scope.fileDetails = { fileData: {}, base64FileData: '', isValid: false };
scope.options = {
resizeMaxHeight: parseInt(scope.resizeMaxHeight) || 300,
resizeMaxWidth: parseInt(scope.resizeMaxHeight) || 250,
resizeQuality: parseInt(scope.resizeMaxHeight) || 0.9,
resizeType: scope.resizeType || 'image/png'
};
var doResizing = function (imageResult, callback) {
createImage(imageResult.url, function (image) {
var dataURL = resizeImage(image, scope.options);
imageResult.resized = {
dataURL: dataURL,
type: dataURL.match(/:(.+\/.+);/)[1],
};
callback(imageResult);
});
};
var applyScope = function (isValidFile) {
scope.fileDetails.isValid = isValidFile;
scope.onImageDropCtrlFn({ fileDetails: scope.fileDetails });
};
var handleUserChooseAndDragEvents = function (fileDetails) {
scope.fileDetails.fileData = fileDetails;
if (isFileTypeAllowed(scope.fileDetails.fileData)) {
fileToDataURL(scope.fileDetails.fileData).then(function (dataURL) {
scope.fileDetails.base64FileData = dataURL;
if (scope.resizeMaxHeight || scope.resizeMaxWidth) {
//resize image
if ((scope.fileDetails.fileData.size / 1000000) >= parseInt(scope.whenToCompress)) {
//do image compression
var imageResult = {
file: scope.fileDetails.fileData,
url: URL.createObjectURL(scope.fileDetails.fileData),
dataURL: scope.fileDetails.base64FileData
};
doResizing(imageResult, function (imageResult) {
scope.fileDetails.fileData = imageResult.file;
scope.fileDetails.base64FileData = imageResult.resized.dataURL;
//scope.fileDetails.fileData.type = imageResult.resized.type;
applyScope(true);
});
} else {
//no compresssion needed
applyScope(true);
}
}
else {
//no resizing
applyScope(true);
}
});
}
else {
applyScope(false);
}
};
//image choose event
element.bind("change", function (changeEvent) {
if (changeEvent.target.files) {
handleUserChooseAndDragEvents(changeEvent.target.files[0]);
}
});
//image drag and drop
var onDragOver = function (e) {
e.preventDefault();
};
var onDragEnd = function (e) {
e.preventDefault();
};
$document.bind("dragover", onDragOver);
//Dragging ends on the overlay, which takes the whole window
element.bind("dragleave", onDragEnd)
.bind("drop", function (e) {
e.preventDefault();
e.stopPropagation();
handleUserChooseAndDragEvents(e.originalEvent.dataTransfer.files[0]);
onDragEnd(e);
});
}
}
}]);
I'm trying to change into angular 4
Below is the angular 4 code:
import { Directive, ElementRef, Input, OnInit, Inject } from '#angular/core';
import { DOCUMENT } from '#angular/platform-browser';
declare var $: any;
#Directive({
selector: '[appImageFileRead]'
})
export class ImageFileReadDirective implements OnInit {
#Input() resize_max_height: any;
#Input() resize_max_width: any;
#Input() resize_quality: any;
#Input() resize_type: any;
#Input() when_to_compress: any;
#Input() onImageDropCtrlFn: any = '&onImageDrop';
currentElem: any;
URL: any = window.URL;
//allowed extensions
fileExtension: any = ['image/jpeg', 'image/jpg', 'image/png', 'image/gif', 'image/bmp'];
fileDetails: any = { fileData: {}, base64FileData: '', isValid: false };
options: any;
constructor( #Inject(DOCUMENT) private document: any,el: ElementRef) {
this.currentElem = el;
}
ngOnInit() {
console.log("resize_max_height======" + this.resize_max_height);
this.options = {
resizeMaxHeight: parseInt(this.resize_max_width) || 300,
resizeMaxWidth: parseInt(this.resize_max_width) || 250,
resizeQuality: parseInt(this.resize_max_width) || 0.9,
resizeType: this.resize_type || 'image/png'
}
this.currentElem.bind("change", function (changeEvent: any) {
if (changeEvent.target.files) {
this.handleUserChooseAndDragEvents(changeEvent.target.files[0]);
}
});
this.document.bind("dragover", this.onDragOver);
this.currentElem.bind("dragleave", this.onDragEnd)
.bind("drop", function (e: any) {
e.preventDefault();
e.stopPropagation();
this.handleUserChooseAndDragEvents(e.originalEvent.dataTransfer.files[0]);
this.onDragEnd(e);
});
}
isFileTypeAllowed(uploadedFile: any) {
try {
return $.inArray(uploadedFile.type, this.fileExtension) == -1 ? false : true;
}
catch (Ex) {
}
}
getResizeArea() {
var resizeArea = document.createElement('canvas');
resizeArea.id = 'result_image';
resizeArea.style.visibility = 'hidden';
document.body.appendChild(resizeArea);
return resizeArea;
}
resizeImage(origImage: any, options: any) {
var maxHeight = options.resizeMaxHeight;
var maxWidth = options.resizeMaxWidth;
var quality = options.resizeQuality;
var type = options.resizeType;
var canvas = this.getResizeArea();
var height = origImage.height;
var width = origImage.width;
// calculate the width and height, constraining the proportions
if (width > height) {
if (width > maxWidth) {
height = Math.round(height *= maxWidth / width);
width = maxWidth;
}
} else {
if (height > maxHeight) {
width = Math.round(width *= maxHeight / height);
height = maxHeight;
}
}
canvas.width = width;
canvas.height = height;
//draw image on canvas
var ctx = canvas.getContext("2d");
ctx.drawImage(origImage, 0, 0, width, height);
// get the data from canvas as 70% jpg (or specified type).
return canvas.toDataURL(type, quality);
}
createImage(url: any, callback: any) {
var image = new Image();
image.onload = function () {
callback(image);
};
image.src = url;
}
fileToDataURL(file: any) {
var deferred = new Promise((resolve, reject) => {
var reader = new FileReader();
reader.onload = function (e: any) {
resolve(e.target.result);
};
reader.readAsDataURL(file);
}).then();
return deferred;
}
doResizing(imageResult: any, callback: any) {
this.createImage(imageResult.url, function (image: any) {
var dataURL = this.resizeImage(image, this.options);
imageResult.resized = {
dataURL: dataURL,
type: dataURL.match(/:(.+\/.+);/)[1],
};
callback(imageResult);
});
}
applyScope(isValidFile: any) {
this.fileDetails.isValid = isValidFile;
this.onImageDropCtrlFn({ fileDetails: this.fileDetails });
};
handleUserChooseAndDragEvents(fileDetails: any) {
this.fileDetails.fileData = fileDetails;
if (this.isFileTypeAllowed(this.fileDetails.fileData)) {
this.fileToDataURL(this.fileDetails.fileData).then(function (dataURL: any) {
this.fileDetails.base64FileData = dataURL;
if (this.resize_max_height || this.resize_max_width) {
//resize image
if ((this.fileDetails.fileData.size / 1000000) >= parseInt(this.whenToCompress)) {
//do image compression
var imageResult = {
file: this.fileDetails.fileData,
url: URL.createObjectURL(this.fileDetails.fileData),
dataURL: this.fileDetails.base64FileData
};
this.doResizing(imageResult, function (imageResult: any) {
this.fileDetails.fileData = imageResult.file;
this.fileDetails.base64FileData = imageResult.resized.dataURL;
//scope.fileDetails.fileData.type = imageResult.resized.type;
this.applyScope(true);
});
} else {
//no compresssion needed
this.applyScope(true);
}
}
else {
//no resizing
this.applyScope(true);
}
});
}
else {
this.applyScope(false);
}
}
//image choose event
//image drag and drop
onDragOver(e:any) {
e.preventDefault();
}
onDragEnd(e:any) {
e.preventDefault();
}
//Dragging ends on the overlay, which takes the whole window
}
I'm not sure about this.currentElem.bind
and this.document.bind("dragover", this.onDragOver);
how to implement or bind events on the element.
I also need some guidance on the promise if its implemented correctly or not.
<div class="form-group text-area"
id="file-drop"
image-file-read
on-image-drop="imageDropped(fileDetails)"
resize-max-height="300"
resize-max-width="300"
resize-quality="0.9"
resize-type="image/png"
when-to-compress="3">
Thanks!!
EDIT:Trying to add HostListener
#HostListener('document:dragover') onDocumentDragOver(evt: any) {
evt.preventDefault();
evt.stopPropagation();
this.background = '#999';
this.onDragOver(evt);
}
but this gives and error
Cannot read property 'preventDefault' of undefined
To bind to properties of the element, you can you #HostBinding. To bind to events of the element, you can use #HostListener:
#HostBinding('class.test-class') hasTestClass = false;
#HostListener('mouseenter') onMouseEnter() {
// ...
}
Here is more about this topic: https://alligator.io/angular/hostbinding-hostlistener/
You can also use this for binding to window or document events, like this:
#HostListener('document:dragover', ['$event'])
onDocumentDragOver(e) {
// ...
}
About the promise - you are creating the promise and then calling .then() on it - this will immediately run it, that's not probably what you want... Also there is no need to save the promise to deferred variable and then return it, simply return the new Promise, like this:
fileToDataURL(file: any) {
return new Promise((resolve, reject) => {
var reader = new FileReader();
reader.onload = function (e: any) {
resolve(e.target.result);
};
reader.readAsDataURL(file);
});
}

how do i display base 64 encoded string as image in ionic app in offline

my code in here
here $scope.student_photo is my variable to get encoded string
first i get it in ajax and stored in local db
$http.post(mData.url+'getStudentDetails/',{student_id : student_id} ).success(function(data){
//$scope.student_pic = data.student_details[0].student_pic;
//$scope.student_photo = data.student_details[0].student_photo;
$scope.myImage=data.student_details[0].student_photo;
//alert($scope.myImage);
// var image_stu = data.student_details[0].student_photo;
// localStorage.setItem("imageData", image_stu);
// document.getElementById("img_stu").src='data:image/png;base64,' + image_stu;
//alert("DONE");
var events = data.student_details
var l = events.length;
//alert(l);
db.transaction(function(tx) {
tx.executeSql('SELECT * FROM dashboard ', [], function(tx, results){
if (results.rows.length == 0)
{
tx.executeSql("INSERT INTO dashboard(dashboard_id, stu_name,clas,sec,student_image) VALUES(?,?,?,?,?)", [ student_id,data.student_details[0].student_name, data.student_details[0].class_name, data.student_details[0].section_name,data.student_details[0].student_photo], function(tx, res) {
$scope.dashboardDetails();
//alert('INSERTED');
}, function(e) {
//alert('ERROR: ' + e.message);
});
}
});
});
});
$scope.showScreen = function(screen) {
$state.go('app.'+screen);
}
$scope.printDashboard = function()
{
//alert('ss');
db.transaction(function (tx) {
// tx.executeSql('SELECT * FROM dashboard', [], function (tx, dtresult) {
//alert("dtresult.rows.length" + dtresult.rows.length);
// console.log("dtresult.rows.length" + dtresult.rows.length);
// $scope.totalrecords = dtresult.rows.length;
// });
tx.executeSql('SELECT * FROM dashboard', [], function (tx, dresult) {
console.log("dresult.rows.length" + dresult.rows.length);
dataset = dresult.rows;
console.log(dresult.rows);
for (var i = 0, item = null; i < dresult.rows.length; i++) {
item = dataset.item(i);
$scope.dashboarditems.push({stu_name: item['stu_name'],stu_class: item['clas'],stu_sec: item['Sec'],stu_img: item['student_image']});
//$scope.items.push({id: item['notice_title'], notice:item['notice'], event_date: item['event_date']});
console.log($scope.dashboarditems[0]);
}
$state.go('app.dashboard');
});
});
}
$scope.dashboardDetails = function()
{
var citems = [];
syncWithServer(function(syncSuccess){
$scope.printDashboard();
}, false);
}
Question:
How to display my base64 string to image formate.
If your base64 string is present inside the $scope.student_photo
try the following img tag to display the image in view
<img ng-src="{{'data:image/png;base64,'+student_photo}}">

Resources