How to predict after training data is normalized? - tensorflow.js

I am learning TensorFlow.js using the official documentation and modified the code mentioned in Codelab to output pounds when given a kg value as input.
So I have a run function which runs when DOM is loaded.
async function run() {
const model = createModel();
const data = createData();
const tensorData = convertToTensor(data);
const { inputs, labels } = tensorData;
// Train Model
await trainModel(model, inputs, labels);
console.log('Training Complete');
// Predict
const normalizedPredictData = normalizeData([5]);
const { normalizedPredictDataVal, predictValMax, predictValMin } = normalizedPredictData;
const output = model.predict(normalizedPredictDataVal);
const finalOutput = unNormalizeData(output, predictValMax, predictValMin);
console.log(finalOutput.print());
}
document.addEventListener('DOMContentLoaded', run);
createModel creates a simple sequential model with 2 layers - one hidden layer and one output layer.
function createModel() {
const model = tf.sequential();
// Hidden Layer
model.add(tf.layers.dense({ units: 1, inputShape: [1] }));
// Output Layer
model.add(tf.layers.dense({ units: 1 }));
return model;
}
createData is a function which generates 500 values for training.
function createData() {
const data = {
inputs: Array.from({ length: 500 }, (x, i) => i),
labels: Array.from({ length: 500 }, (x, i) => i * 2.2)
};
return data;
}
Inputs run from 0 to 499 and labels are just input * 2.2 because I want to predict pounds when a kg value is given as input.
convertToTensor function normalizes the generated data after converting it to tensors.
function convertToTensor(data) {
return tf.tidy(() => {
tf.util.shuffle(data);
const inputs = data.inputs;
const labels = data.labels;
const inputTensor = tf.tensor2d(inputs, [inputs.length, 1]);
const labelTensor = tf.tensor2d(labels, [labels.length, 1]);
// Normalize Data
const inputMax = inputTensor.max();
const inputMin = inputTensor.min();
const labelMax = inputTensor.max();
const labelMin = inputTensor.min();
const normalizedInputs = inputTensor.sub(inputMin).div(inputMax.sub(inputMin));
const normalizedLabels = labelTensor.sub(labelMin).div(labelMax.sub(labelMin));
return {
inputs: normalizedInputs,
labels: normalizedLabels,
inputMax,
inputMin,
labelMax,
labelMin
};
});
}
Finally the data is trained using trainModel
async function trainModel(model, inputs, labels) {
model.compile({
optimizer: tf.train.adam(),
loss: tf.losses.meanSquaredError,
metrics: ['mse']
});
const batchSize = 32;
const epochs = 50;
return await model.fit(inputs, labels, {
batchSize,
epochs,
shuffle: true,
callbacks: tfvis.show.fitCallbacks(
{ name: 'Training Performance' },
['loss', 'mse'],
{ height: 200, callbacks: ['onEpochEnd'] }
)
});
}
Now that the data is trained, it's time to predict values. As the model is trained with normalized values, I am only passing normalized input values to predict function.
function normalizeData(value) {
const predictValTensor = tf.tensor2d(value, [value.length, 1]);
const predictValMax = predictValTensor.max();
const predictValMin = predictValTensor.min();
const normalizedPredictDataVal = predictValTensor.sub(predictValMin).div(predictValMax.sub(predictValMin));
return {
normalizedPredictDataVal,
predictValMax,
predictValMin
};
}
The above function converts the value to tensor, normalizes it and returns the result which is then passed to predict function for an output value. As the input was normalized, the output needs to be unNormalized so have created a function to unnormalize it.
function unNormalizeData(value, predictMax, predictMin) {
const unNormPredictVal = value.mul(predictMax.sub(predictMin)).add(predictMin);
return unNormPredictVal;
}
Once the output is unNormalized, I am simply logging it to console. But it is only outputting the value which I had given as input. In this case, the value is 5.
Code till training data is working fine. I think the error lies where I am trying to normalize and unnormalize the value for predict.

The predicted value should be normalized by using the max and min value of the training sample.
There should not be predictValMax (respectively predictValMin) different from inputMax (respectively inputMin)
const predictValMax = predictValTensor.max();
const predictValMin = predictValTensor.min();
A prediction of a feature should be invariant by dataset of features.
Training set of feature
[-5, 5], inputMin = -5, inputMax = 5; normalized = [0, 0.5]
Given this two test sets of features:
[5, 6], predictMin = 5, predictMax = 6; normalized = [0, 1];
[5], predictMin = 5, predictMax = 6; normalize = [1] // ( though a division by zero occurs here).
The normalized value for 5 is different in the test sets. It is also different from the normalized value in the training data. The model will end up predicting different value for the same feature 5 each time that it occurs because its normalized value is dataset dependant.
This would not happen if the same normalized parameters (inputMin, inputMax) are applied for each predicted value.
function createModel() {
const model = tf.sequential();
// Hidden Layer
model.add(tf.layers.dense({ units: 1, inputShape: [1] }));
// Output Layer
model.add(tf.layers.dense({ units: 1 }));
return model;
}
let inputMin, inputMax, labelMin, labelMax
function createData() {
const data = {
inputs: Array.from({ length: 500 }, (x, i) => i),
labels: Array.from({ length: 500 }, (x, i) => i * 2.2)
};
return data;
}
function convertToTensor(data) {
return tf.tidy(() => {
tf.util.shuffle(data);
const inputs = data.inputs;
const labels = data.labels;
const inputTensor = tf.tensor2d(inputs, [inputs.length, 1]);
const labelTensor = tf.tensor2d(labels, [labels.length, 1]);
inputTensor.print();
labelTensor.print()
// Normalize Data
inputMax = inputTensor.max();
inputMin = inputTensor.min();
labelMax = inputTensor.max();
labelMin = inputTensor.min();
const normalizedInputs = inputTensor.sub(inputMin).div(inputMax.sub(inputMin));
const normalizedLabels = labelTensor.sub(labelMin).div(labelMax.sub(labelMin));
return {
inputs: normalizedInputs,
labels: normalizedLabels,
inputMax,
inputMin,
labelMax,
labelMin
};
});
}
async function trainModel(model, inputs, labels) {
const learningRate = 0.01;
const optimizer = tf.train.sgd(learningRate);
// tf.train.adam()
model.compile({
optimizer: optimizer ,
loss: tf.losses.meanSquaredError,
metrics: ['mse']
});
const batchSize = 32;
const epochs = 200;
inputs.print()
labels.print()
return await model.fit(inputs, labels, {
batchSize,
epochs,
shuffle: true,
callbacks: tfvis.show.fitCallbacks(
{ name: 'Training Performance' },
['loss', 'mse'],
{ height: 200, callbacks: ['onEpochEnd'] }
)
});
}
function normalizeData(value) {
const predictValTensor = tf.tensor2d(value, [value.length, 1]);
//const predictValMax = predictValTensor.max();
//const predictValMin = predictValTensor.min();
const normalizedPredictDataVal = predictValTensor.sub(inputMin).div(inputMax.sub(inputMin));
return {
normalizedPredictDataVal,
inputMax,
inputMin
};
}
function unNormalizeData(value, predictMax, predictMin) {
const unNormPredictVal = value.mul(inputMax.sub(inputMin)).add(inputMin);
return unNormPredictVal;
}
async function run() {
const model = createModel();
const data = createData();
const tensorData = convertToTensor(data);
const { inputs, labels } = tensorData;
await trainModel(model, inputs, labels);
console.log('Training Complete');
const normalizedPredictData = await normalizeData([1000, 6, 7]);
console.log('normalizedinput')
normalizedPredictData.normalizedPredictDataVal.print()
const { normalizedPredictDataVal, predictValMax, predictValMin } = normalizedPredictData;
const output = await model.predict(normalizedPredictDataVal);
console.log(output.print());
const finalOutput = await unNormalizeData(output, predictValMax, predictValMin);
console.log(finalOutput.print());
}
document.addEventListener('DOMContentLoaded', run);
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>Document</title>
</head>
<body>
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs#1.0.0/dist/tf.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs-vis#1.0.2/dist/tfjs-vis.umd.min.js"></script>
<script src="index.js"></script>
</body>
</html>

Related

Jest not generating values correctly using crypto

I made a function to generate a complex password using window.crypto lib, this work perfectly and return values like jQzPN%c#tr71ie6Dt^C8.
Here is my function :
const genPwd = (length: number): string => {
const regex = /^(?=.*[A-Z])(?=.*[!##$%^&*])(?=.*[0-9]).{8,}$/;
const charset =
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789~!##$%^&*()_-+={[}]|:;<,>.?/';
let result = '';
let values = new Uint32Array(length);
window.crypto.getRandomValues(values);
for (let i = 0; i < length; i++) {
result += charset[values[i] % charset.length];
}
if (!regex.test(result)) {
console.log('result', result);
return genPwd(length);
} else {
return result;
}
};
But when I try to test this function using Jest, it falls everytime in the last condition because it doesn't match the regex, the console.log shows "AAAAAAAAAAAAAAAAAAAA", like nothing is generating correctly, so it doesn't match the regex and looping.
Here is my test :
it('should generate a x length password', () => {
const mGetRandomValues = jest.fn().mockReturnValueOnce(new Uint32Array(20));
Object.defineProperty(globalThis, 'crypto', {
value: { getRandomValues: mGetRandomValues },
});
const valueToConvert = utils.genPwd(20);
const valueToFind = 20;
expect(valueToConvert).toHaveLength(valueToFind);
expect(mGetRandomValues).toBeCalledWith(new Uint32Array(valueToFind));
});
Does anyone have a solution ?
I have no idea about this issue

Multi-variate regression in TensorFlow.js

I'm following the tutorials for TensorFlow.js: I've followed along their basic regression tutorial and it worked perfectly fine.
Then I tried to expand on that to perform a multi-variate prediction, but I can't understand what I'm doing wrong. I loaded my structured data, created the model specifying the input shape, then I changed the tensor conversion to obtain an array of arrays with the correct shape. When I pass the inputs to the model.fit() function, I get the following error:
Error when checking model input: the Array of Tensors that you are passing to your model is not the size the model expected. Expected to see 1 Tensor(s), but instead got the following list of Tensor(s): 0.3263888888888889,0.42857142857142855,0.22580645161290322,0.75,1,0.2,0.7583333333333333,0.42857142857142855,0.22580645161290322,0.75,0,0.4,0.3236111111111111,0.14285714285714285,0.3870967741935484,0.75,1,0.2,0.7590277777777777,0.7142857142857143,0.2903225806451613,0.75,0,0.2,0.7763888888888889,0.2857142857142857,0.1935483870967742,0.75,0,0.4,0.32430555555555557,0.5714285714285714,0.25806451612903225,0.75,1,0.2,0.325,0.7142857142857143,0.2903225806451613,0.75,1,0.2,0.32569444444444445,0.14285714285714285,0.16129032258064516,0.75,1,0.2,0.7687499999999999,0.14285714285714285,0.16129032258064516,0.75,0,0.4,0.32916666666666666,0.2857142857142857,0.1935483870967742,0.75,1,0.4
Here is my code:
async function getData() {
return [
{"direction":1,"dow":1,"month":9,"dom":5,"start":749,"arrival":832,"variant1":1,"startHour":7.816666666666666,"arrivalHour":8.533333333333333,"timeTaken":0.7166666666666668},
{"direction":0,"dow":1,"month":9,"dom":5,"start":1827,"arrival":1917,"variant1":2,"startHour":18.45,"arrivalHour":19.283333333333335,"timeTaken":0.8333333333333357},
{"direction":1,"dow":2,"month":9,"dom":6,"start":754,"arrival":846,"variant1":2,"startHour":7.9,"arrivalHour":8.766666666666667,"timeTaken":0.8666666666666671},
{"direction":0,"dow":2,"month":9,"dom":6,"start":1838,"arrival":1934,"variant1":2,"startHour":18.633333333333333,"arrivalHour":19.566666666666666,"timeTaken":0.9333333333333336},
{"direction":1,"dow":3,"month":9,"dom":7,"start":750,"arrival":836,"variant1":1,"startHour":7.833333333333333,"arrivalHour":8.6,"timeTaken":0.7666666666666666},
{"direction":0,"dow":3,"month":9,"dom":7,"start":1812,"arrival":1855,"variant1":2,"startHour":18.2,"arrivalHour":18.916666666666668,"timeTaken":0.7166666666666686},
{"direction":1,"dow":4,"month":9,"dom":8,"start":747,"arrival":834,"variant1":1,"startHour":7.783333333333333,"arrivalHour":8.566666666666666,"timeTaken":0.7833333333333332},
{"direction":1,"dow":5,"month":9,"dom":9,"start":748,"arrival":834,"variant1":1,"startHour":7.8,"arrivalHour":8.566666666666666,"timeTaken":0.7666666666666666},
{"direction":0,"dow":5,"month":9,"dom":9,"start":1813,"arrival":1855,"variant1":1,"startHour":18.216666666666665,"arrivalHour":18.916666666666668,"timeTaken":0.7000000000000028},
{"direction":1,"dow":1,"month":9,"dom":12,"start":746,"arrival":833,"variant1":1,"startHour":7.766666666666667,"arrivalHour":8.55,"timeTaken":0.7833333333333341}
]
}
async function run() {
// Load the original input data that we are going to train on.
const data = await getData();
// Create the model
const model = createModel();
tfvis.show.modelSummary({name: 'Model Summary'}, model);
// Convert the data to a form we can use for training.
const tensorData = convertToTensor(data);
const {inputs, labels} = tensorData;
// Train the model
console.log('Starting Training');
await trainModel(model, inputs, labels);
console.log('Done Training');
}
document.addEventListener('DOMContentLoaded', run);
function createModel() {
const model = tf.sequential()
model.add(tf.layers.dense({inputShape: [ 6 ], units: 10}))
model.add(tf.layers.dense({units: 5}))
return model
}
function convertToTensor(data) {
return tf.tidy(() => {
tf.util.shuffle(data)
// Ad-hoc normalization. Variant1 is one of a few values (1, 2, 3...): dividing by 5 should never give much more than 1 in the future
const inputs = data.map(d => [
d.startHour / 24,
d.dow / 7,
d.dom / 31,
d.month / 12,
d.direction,
d.variant1 / 5,
])
const labels = data.map(d => d.timeTaken)
const inputTensor = tf.tensor2d(inputs, [ inputs.length, 6 ])
const labelTensor = tf.tensor2d(labels, [ labels.length, 1 ])
return {
inputs,
labels,
}
});
}
async function trainModel(model, inputs, labels) {
model.compile({
optimizer: tf.train.adam(),
loss: tf.losses.meanSquaredError,
metrics: ['mse'],
})
const batchSize = 2
const epochs = 100
return await model.fit(inputs, labels, {
batchSize,
epochs,
shuffle: true,
callbacks: tfvis.show.fitCallbacks(
{ name: 'Training Performance' },
['loss', 'mse'],
{ height: 200, callbacks: ['onEpochEnd'] }
)
})
}
<!DOCTYPE html>
<html>
<head>
<title>TensorFlow.js Tutorial</title>
<!-- Import TensorFlow.js -->
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs#2.0.0/dist/tf.min.js"></script>
<!-- Import tfjs-vis -->
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs-vis#1.0.2/dist/tfjs-vis.umd.min.js"></script>
</head>
<body>
<!-- Import the main script file -->
<script type="module" src="tfjs-regression-commute-times.so.js"></script>
</body>
</html>
I have a decent grasp of ML concepts, but it's the first time I try to actually implement them, so please point out any misconception I may have about layers or models or whatnot.

Count the duplicates in a string array using React JS

Following is a code I implemented to create a bar chart using chart js in React app. Here it creates a bar chart with all the data in an array. But, I want to change this code only to give the output in the x-axis - destination, y-axis - no. of occurrence of this destination since it has many repeated destinations.
I searched methods to this but I couldn't get a correct solution.
Can anyone help me to do this?
const dataArrayY4 = [];
res.data.map(item => {
dataArrayY4.push(item.time)
})
const dataArrayX4 = []
res.data.map(item => {
dataArrayX4.push(item.destination)
})
this.setState({
data4: dataArrayY4,
labels4: dataArrayX4,
});
This could be done as follows:
const res = {
data: [
{ time: 1, destination: 'A'},
{ time: 3, destination: 'A'},
{ time: 2, destination: 'B'}
]
};
let tmp4 = [];
res.data.map((o, i) => {
const existing = tmp4.find(e => e.destination == o.destination);
if (existing) {
existing.time += o.time;
} else {
tmp4.push({time: o.time, destination: o.destination});
}
})
this.setState({
data4: tmp.map(o => o.time);
labels4: tmp.map(o => o.destination);
});
Above code could further be optimized by using Array.reduce() instead of Array.map().
I would make the code more efficient. Instead of dataArrayY4 being an array, I would make it an object that has a key of value and the number of occurrence of each value. This way, you can count all the number of occurrences of the all items in res.data
const dataArrayY4 = {};
res.data.map(item => {
dataArrayY4[item.destination] = (dataArrayY4[item.destination] || 0) + 1
})
const dataArrayX4 = []
res.data.forEach(item => {
dataArrayX4.push(item.destination)
})
this.setState({
data4: dataArrayY4,
labels4: dataArrayX4,
});
Then if you want to look for the occurrence of a particular value you
use this eg. Sri Lanka
this.state.data4['Sri Lanka']

Camera streaming service works only with localhost but not with IP address

I have implemented a service which streams camera output on html5. But it works only if I use localhost:8080 localhost if I use IP address or machine name then it does not even detect the camera.
/*global logger*/
/*
VisualInspection
========================
#file : VisualInspection.js
#version : 1.0.0
#author :
#date : 7/28/2019
#copyright :
#license : Apache 2
Documentation
========================
Describe your widget here.
*/
// Required module list. Remove unnecessary modules, you can always get them back from the boilerplate.
define([
"dojo/_base/declare",
"mxui/widget/_WidgetBase",
"dijit/_TemplatedMixin",
"mxui/dom",
"dojo/dom",
"dojo/dom-prop",
"dojo/dom-geometry",
"dojo/dom-class",
"dojo/dom-style",
"dojo/dom-construct",
"dojo/_base/array",
"dojo/_base/lang",
"dojo/text",
"dojo/html",
"dojo/_base/event",
"VisualInspection/lib/jquery-1.11.2",
"dojo/text!VisualInspection/widget/template/VisualInspection.html",
"VisualInspection/widget/template/tf.min",
// "dojo/text!VisualInspection/widget/template/labels.json",
// "dojo/text!VisualInspection/widget/template/model.json"
], function (declare, _WidgetBase, _TemplatedMixin, dom, dojoDom, dojoProp, dojoGeometry, dojoClass, dojoStyle, dojoConstruct, dojoArray, lang, dojoText, dojoHtml, dojoEvent, _jQuery, widgetTemplate, tf) {
"use strict";
var $ = _jQuery.noConflict(true);
var LABELS_URL = "http://pni6w2465:7777/EasyPlan/model_web/labels.json"
var MODEL_JSON = "http://pni6w2465:7777/EasyPlan/model_web/model.json"
// var tf = require(['../../VisualInspection/node_modules/#tensorflow/tfjs']);
//////////////
const TFWrapper = model => {
const calculateMaxScores = (scores, numBoxes, numClasses) => {
const maxes = []
const classes = []
for (let i = 0; i < numBoxes; i++) {
let max = Number.MIN_VALUE
let index = -1
for (let j = 0; j < numClasses; j++) {
if (scores[i * numClasses + j] > max) {
max = scores[i * numClasses + j]
index = j
}
}
maxes[i] = max
classes[i] = index
}
return [maxes, classes]
}
const buildDetectedObjects = (
width,
height,
boxes,
scores,
indexes,
classes
) => {
const count = indexes.length
const objects = []
for (let i = 0; i < count; i++) {
const bbox = []
for (let j = 0; j < 4; j++) {
bbox[j] = boxes[indexes[i] * 4 + j]
}
const minY = bbox[0] * height
const minX = bbox[1] * width
const maxY = bbox[2] * height
const maxX = bbox[3] * width
bbox[0] = minX
bbox[1] = minY
bbox[2] = maxX - minX
bbox[3] = maxY - minY
objects.push({
bbox: bbox,
class: classes[indexes[i]],
score: scores[indexes[i]]
})
}
return objects
}
var img = null;
const detect = input => {
const batched = tf.tidy(() => {
const img = tf.browser.fromPixels(input)
// Reshape to a single-element batch so we can pass it to executeAsync.
// var img = null;
// //sid
// var canvas = document.querySelector("#canvasElement");
// if (canvas.getContext) {
// var ctx = canvas.getContext("2d");
// img = canvas.toDataURL("image/png");
// }
return img.expandDims(0)
})
const height = batched.shape[1]
const width = batched.shape[2]
// const height = img.height
// const width = img.width
return model.executeAsync(batched).then(result => {
const scores = result[0].dataSync()
const boxes = result[1].dataSync()
// clean the webgl tensors
batched.dispose()
tf.dispose(result)
const [maxScores, classes] = calculateMaxScores(
scores,
result[0].shape[1],
result[0].shape[2]
)
const prevBackend = tf.getBackend()
// run post process in cpu
tf.setBackend('cpu')
const indexTensor = tf.tidy(() => {
const boxes2 = tf.tensor2d(boxes, [
result[1].shape[1],
result[1].shape[3]
])
return tf.image.nonMaxSuppression(
boxes2,
maxScores,
20, // maxNumBoxes
0.5, // iou_threshold
0.5 // score_threshold
)
})
const indexes = indexTensor.dataSync()
indexTensor.dispose()
// restore previous backend
tf.setBackend(prevBackend)
return buildDetectedObjects(
width,
height,
boxes,
maxScores,
indexes,
classes
)
})
}
return {
detect: detect
}
}
//////////////////////
// Declare widget's prototype.
return declare("VisualInspection.widget.VisualInspection", [_WidgetBase, _TemplatedMixin], {
// _TemplatedMixin will create our dom node using this HTML template.
templateString: widgetTemplate,
// DOM elements
inputNodes: null,
colorSelectNode: null,
colorInputNode: null,
infoTextNode: null,
// Parameters configured in the Modeler.
mfToExecute: "",
messageString: "",
backgroundColor: "",
// Internal variables. Non-primitives created in the prototype are shared between all widget instances.
_handles: null,
_contextObj: null,
_alertDiv: null,
_readOnly: false,
// dojo.declare.constructor is called to construct the widget instance. Implement to initialize non-primitive properties.
constructor: function () {
logger.debug(this.id + ".constructor");
this._handles = [];
},
// dijit._WidgetBase.postCreate is called after constructing the widget. Implement to do extra setup work.
postCreate: function () {
logger.debug(this.id + ".postCreate");
if (this.readOnly || this.get("disabled") || this.readonly) {
this._readOnly = true;
}
this._updateRendering();
this._setupEvents();
var video = document.querySelector("#videoElement");
var canvas = document.querySelector("#canvasElement");
// if (navigator.mediaDevices.getUserMedia) {
// navigator.mediaDevices.getUserMedia({ video: true })
// .then(function (stream) {
// video.srcObject = stream;
// })
// .catch(function (err0r) {
// console.log("Something went wrong!");
// });
// }
this.componentDidMount();
},
////////////////////////////////////////////////////////
componentDidMount: function () {
var video = document.querySelector("#videoElement");
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
const webCamPromise = navigator.mediaDevices
.getUserMedia({
audio: false,
video: {
facingMode: 'user'
}
})
.then(stream => {
window.stream = stream
video.srcObject = stream
return new Promise((resolve, _) => {
video.onloadedmetadata = () => {
resolve()
}
})
})
const modelPromise = tf.loadGraphModel(MODEL_JSON)
const labelsPromise = fetch(LABELS_URL).then(data => data.json())
Promise.all([modelPromise, labelsPromise, webCamPromise])
.then(values => {
const [model, labels] = values
this.detectFrame(video, model, labels)
})
.catch(error => {
console.error(error)
})
}
},
detectFrame: function (video, model, labels) {
TFWrapper(model)
.detect(video)
.then(predictions => {
this.renderPredictions(predictions, labels)
requestAnimationFrame(() => {
this.detectFrame(video, model, labels)
})
})
},
renderPredictions: function (predictions, labels) {
var canvas = document.querySelector("#canvasElement");
const ctx = canvas.getContext('2d')
ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height)
// Font options.
const font = '16px sans-serif'
ctx.font = font
ctx.textBaseline = 'top'
predictions.forEach(prediction => {
const x = prediction.bbox[0]
const y = prediction.bbox[1]
const width = prediction.bbox[2]
const height = prediction.bbox[3]
const label = labels[parseInt(prediction.class)]
// Draw the bounding box.
ctx.strokeStyle = '#00FFFF'
ctx.lineWidth = 4
ctx.strokeRect(x, y, width, height)
// Draw the label background.
ctx.fillStyle = '#00FFFF'
const textWidth = ctx.measureText(label).width
const textHeight = parseInt(font, 10) // base 10
ctx.fillRect(x, y, textWidth + 4, textHeight + 4)
})
predictions.forEach(prediction => {
const x = prediction.bbox[0]
const y = prediction.bbox[1]
const label = labels[parseInt(prediction.class)]
// Draw the text last to ensure it's on top.
ctx.fillStyle = '#000000'
ctx.fillText(label, x, y)
})
},
///////////////////////////////////////////////////////////
// mxui.widget._WidgetBase.update is called when context is changed or initialized. Implement to re-render and / or fetch data.
update: function (obj, callback) {
logger.debug(this.id + ".update");
this._contextObj = obj;
this._resetSubscriptions();
this._updateRendering(callback); // We're passing the callback to updateRendering to be called after DOM-manipulation
},
// mxui.widget._WidgetBase.enable is called when the widget should enable editing. Implement to enable editing if widget is input widget.
enable: function () {
logger.debug(this.id + ".enable");
},
// mxui.widget._WidgetBase.enable is called when the widget should disable editing. Implement to disable editing if widget is input widget.
disable: function () {
logger.debug(this.id + ".disable");
},
// mxui.widget._WidgetBase.resize is called when the page's layout is recalculated. Implement to do sizing calculations. Prefer using CSS instead.
resize: function (box) {
logger.debug(this.id + ".resize");
},
// mxui.widget._WidgetBase.uninitialize is called when the widget is destroyed. Implement to do special tear-down work.
uninitialize: function () {
logger.debug(this.id + ".uninitialize");
// Clean up listeners, helper objects, etc. There is no need to remove listeners added with this.connect / this.subscribe / this.own.
},
// We want to stop events on a mobile device
_stopBubblingEventOnMobile: function (e) {
logger.debug(this.id + "._stopBubblingEventOnMobile");
if (typeof document.ontouchstart !== "undefined") {
dojoEvent.stop(e);
}
},
// Attach events to HTML dom elements
_setupEvents: function () {
logger.debug(this.id + "._setupEvents");
this.connect(this.colorSelectNode, "change", function (e) {
// Function from mendix object to set an attribute.
this._contextObj.set(this.backgroundColor, this.colorSelectNode.value);
});
this.connect(this.infoTextNode, "click", function (e) {
// Only on mobile stop event bubbling!
this._stopBubblingEventOnMobile(e);
// If a microflow has been set execute the microflow on a click.
if (this.mfToExecute !== "") {
this._execMf(this.mfToExecute, this._contextObj.getGuid());
}
});
},
_execMf: function (mf, guid, cb) {
logger.debug(this.id + "._execMf");
if (mf && guid) {
mx.ui.action(mf, {
params: {
applyto: "selection",
guids: [guid]
},
callback: lang.hitch(this, function (objs) {
if (cb && typeof cb === "function") {
cb(objs);
}
}),
error: function (error) {
console.debug(error.description);
}
}, this);
}
},
// Rerender the interface.
_updateRendering: function (callback) {
logger.debug(this.id + "._updateRendering");
// Important to clear all validations!
this._clearValidations();
// The callback, coming from update, needs to be executed, to let the page know it finished rendering
this._executeCallback(callback, "_updateRendering");
},
// Handle validations.
_handleValidation: function (validations) {
logger.debug(this.id + "._handleValidation");
this._clearValidations();
var validation = validations[0],
message = validation.getReasonByAttribute(this.backgroundColor);
if (this._readOnly) {
validation.removeAttribute(this.backgroundColor);
} else if (message) {
this._addValidation(message);
validation.removeAttribute(this.backgroundColor);
}
},
// Clear validations.
_clearValidations: function () {
logger.debug(this.id + "._clearValidations");
dojoConstruct.destroy(this._alertDiv);
this._alertDiv = null;
},
// Show an error message.
_showError: function (message) {
logger.debug(this.id + "._showError");
if (this._alertDiv !== null) {
dojoHtml.set(this._alertDiv, message);
return true;
}
this._alertDiv = dojoConstruct.create("div", {
"class": "alert alert-danger",
"innerHTML": message
});
dojoConstruct.place(this._alertDiv, this.domNode);
},
// Add a validation.
_addValidation: function (message) {
logger.debug(this.id + "._addValidation");
this._showError(message);
},
// Reset subscriptions.
_resetSubscriptions: function () {
logger.debug(this.id + "._resetSubscriptions");
// Release handles on previous object, if any.
this.unsubscribeAll();
// When a mendix object exists create subscribtions.
if (this._contextObj) {
this.subscribe({
guid: this._contextObj.getGuid(),
callback: lang.hitch(this, function (guid) {
this._updateRendering();
})
});
this.subscribe({
guid: this._contextObj.getGuid(),
attr: this.backgroundColor,
callback: lang.hitch(this, function (guid, attr, attrValue) {
this._updateRendering();
})
});
this.subscribe({
guid: this._contextObj.getGuid(),
val: true,
callback: lang.hitch(this, this._handleValidation)
});
}
},
_executeCallback: function (cb, from) {
logger.debug(this.id + "._executeCallback" + (from ? " from " + from : ""));
if (cb && typeof cb === "function") {
cb();
}
}
});
});
require(["VisualInspection/widget/VisualInspection"]);
<div id="container">
<video autoplay="true" playsInline="true" width="600" height="500" id="videoElement" style="position: fixed;" >
</video>
<canvas id= "canvasElement" width="600" height="500" style="position: absolute;">
</canvas>
</div>
From Above code, When I run using localhost this code will execute :
<!-- begin snippet: js hide: false console: true babel: false -->
When I run using machine name or IP name :
This code is not considered in developer tool of chrome due to security or some reason

wrong output of classifier

I'm new to machine learning and i used an mnist demo model to train a cat and dog classifier.But it doesn't seem to work very well.Here are some diagrams of the model:
It seems that this model always predicts any input as a cat.
This is my code. Please help me.
index.js:
import {IMAGE_H, IMAGE_W, MnistData} from './data.js';
import * as ui from './ui.js';
let classNum = 0;
function createConvModel() {
const model = tf.sequential();
model.add(tf.layers.conv2d({
inputShape: [IMAGE_H, IMAGE_W, 3],
kernelSize: 5,
filters: 32,
activation: 'relu'
}));
model.add(tf.layers.maxPooling2d({poolSize: 2, strides: 2}));
model.add(tf.layers.conv2d({kernelSize: 5, filters: 32, activation: 'relu'}));
model.add(tf.layers.maxPooling2d({poolSize: 2, strides: 2}));
model.add(tf.layers.conv2d({kernelSize: 5, filters: 64, activation: 'relu'}));
model.add(tf.layers.flatten({}));
model.add(tf.layers.dense({units: 64, activation: 'relu'}));
model.add(tf.layers.dense({units: classNum, activation: 'softmax'}));
return model;
}
function createDenseModel() {
const model = tf.sequential();
model.add(tf.layers.flatten({inputShape: [IMAGE_H, IMAGE_W, 3]}));
model.add(tf.layers.dense({units: 42, activation: 'relu'}));
model.add(tf.layers.dense({units: classNum, activation: 'softmax'}));
return model;
}
async function train(model, fitCallbacks) {
ui.logStatus('Training model...');
const optimizer = 'rmsprop';
model.compile({
optimizer,
loss: 'categoricalCrossentropy',
metrics: ['accuracy'],
});
const batchSize = 64;
const trainEpochs = ui.getTrainEpochs();
let trainBatchCount = 0;
const trainData = data.getTrainData();
const valData = data.getValData();
const testData = data.getTestData();
await model.fit(trainData.xs, trainData.labels, {
batchSize:batchSize,
validationData:[valData.xs,valData.labels],
shuffle:true,
epochs: trainEpochs,
callbacks: fitCallbacks
});
console.log("complete");
const classNames = ['cat','dog'];
const [preds, labels] = doPrediction(model,testData);
const classAccuracy = await tfvis.metrics.perClassAccuracy(labels, preds);
const container = { name: 'Accuracy', tab: 'Evaluation' };
tfvis.show.perClassAccuracy(container, classAccuracy, classNames);
}
function doPrediction(model,testData) {
const testxs = testData.xs;
const labels = testData.labels.argMax([-1]);
const preds = model.predict(testxs).argMax([-1]);
testxs.dispose();
return [preds, labels];
}
function createModel() {
let model;
const modelType = ui.getModelTypeId();
if (modelType === 'ConvNet') {
model = createConvModel();
} else if (modelType === 'DenseNet') {
model = createDenseModel();
} else {
throw new Error(`Invalid model type: ${modelType}`);
}
return model;
}
async function watchTraining(model) {
const metrics = ['loss', 'val_loss', 'acc', 'val_acc'];
const container = {
name: 'charts', tab: 'Training', styles: { height: '1000px' }
};
const callbacks = tfvis.show.fitCallbacks(container, metrics);
return train(model, callbacks);
}
let data;
async function load() {
tf.disableDeprecationWarnings();
classNum = await localforage.getItem('classNum');
tfvis.visor();
data = new MnistData();
await data.load();
}
ui.setTrainButtonCallback(async () => {
ui.logStatus('Loading data...');
await load();
ui.logStatus('Creating model...');
const model = createModel();
model.summary();
ui.logStatus('Starting model training...');
await watchTraining(model);
});
data.js:
export const IMAGE_H = 64;
export const IMAGE_W = 64;
const IMAGE_SIZE = IMAGE_H * IMAGE_W;
let NUM_CLASSES = 0;
let trainImagesLabels;
let testLabels;
let trainImages ;
let testImages ;
let validateImages;
let validateLabels;
let validateSplit = 0.2;
let modelId;
let classNum;
/**
* A class that fetches the sprited MNIST dataset and provide data as
* tf.Tensors.
*/
export class MnistData {
constructor() {}
//shuffle
static shuffleSwap(arr1,arr2) {
if(arr1.length == 1) return {arr1,arr2};
let i = arr1.length;
while(--i > 1) {
let j = Math.floor(Math.random() * (i+1));
[arr1[i], arr1[j]] = [arr1[j], arr1[i]];
[arr2[i], arr2[j]] = [arr2[j], arr2[i]];
}
return {arr1,arr2};
}
async load() {
//get data from localforage
this.trainImages = await localforage.getItem('dataset');
this.trainImagesLabels = await localforage.getItem('datasetLabel');
this.modelId = await localforage.getItem('modelId');
this.classNum = await localforage.getItem('classNum');
this.trainImages.shift();
this.trainImagesLabels.shift();
//construct the validateData
let status = false;
let maxVal = Math.floor(this.trainImages.length * 0.2);
this.validateImages = new Array();
this.validateLabels = new Array();
for(let i=0;i<maxVal;i++){
if(status){
this.validateImages.push(this.trainImages.pop());
this.validateLabels.push(this.trainImagesLabels.pop());
status = false;
}else{
this.validateImages.push(this.trainImages.shift());
this.validateLabels.push(this.trainImagesLabels.shift());
status = true;
}
}
//construct the testData
this.testImages = new Array();
this.testLabels = new Array();
for(let i=0;i<maxVal;i++){
if(status){
this.testImages.push(this.trainImages.pop());
this.testLabels.push(this.trainImagesLabels.pop());
status = false;
}else{
this.testImages.push(this.trainImages.shift());
this.testLabels.push(this.trainImagesLabels.shift());
status = true;
}
}
//shuffle
let val = MnistData.shuffleSwap(this.validateImages,this.validateLabels);
this.validateImages = val.arr1;
this.validateLabels = val.arr2;
let train = MnistData.shuffleSwap(this.trainImages,this.trainImagesLabels);
this.trainImages = train.arr1;
this.trainImagesLabels = train.arr2;
}
getTrainData() {
const xs = tf.tensor4d(this.trainImages);
const labels = tf.oneHot(tf.tensor1d(this.trainImagesLabels,'int32'),this.classNum);
return {xs, labels};
}
getValData() {
const xs = tf.tensor4d(this.validateImages);
const labels = tf.oneHot(tf.tensor1d(this.validateLabels,'int32'),this.classNum);
return {xs, labels};
}
getTestData() {
const xs = tf.tensor4d(this.testImages);
const labels = tf.oneHot(tf.tensor1d(this.testLabels,'int32'),this.classNum);
return {xs, labels};
}
}
I added some pictures at the beginning.
//getclassNum
function getClassNum(files) {
let classArr = new Array();
let dirArr = new Array();
let imageNum = 0;
for (let i = 0; i < files.length; i++) {
if (files[i].type.split('/')[0] == 'image' && files[i].type.split('/')[1] == 'jpeg') {
dirArr = files[i].webkitRelativePath.split('/');
let currentClassIndex = dirArr.length - 2;
let isExist = false;
if (currentClassIndex <= 0)
isExist = true;
else {
imageNum++;
}
if (classArr == null) {
classArr.push(dirArr[currentClassIndex]);
}
for (let j = 0; j < classArr.length; j++) {
if (classArr[j] == dirArr[currentClassIndex]) {
isExist = true;
}
}
if (!isExist) {
classArr.push(dirArr[currentClassIndex]);
}
}
}
let classNum = classArr.length;
return {classNum, imageNum, classArr};
}
//get nested array
function getDataset(files, classArr,imgNum) {
let trainLabelArr = new Array();
let trainDataArr = new Array();
for (let i = 0; i < files.length; i++) {
if (files[i].type.split('/')[0] == 'image'&& files[i].type.split('/')[1] == 'jpeg') {
let dirArr = files[i].webkitRelativePath.split('/');
let currentClassIndex = dirArr.length - 2;
if (currentClassIndex >= 0) {
for(let j=0;j<classArr.length;j++){
if(dirArr[currentClassIndex]==classArr[j]){
let reader = new FileReader();
reader.readAsDataURL(files[i]);
reader.onload = function () {
document.getElementById('image').setAttribute( 'src', reader.result);
let tensor= tf.browser.fromPixels(document.getElementById('image'));
let nest = tensor.arraySync();
trainDataArr.push(nest);
trainLabelArr.push(j);
}
}
}
}
}
}
return{trainDataArr,trainLabelArr,trainDataLength}
}
//getfiles
async function fileChange(that) {
let files = that.files;
let container = getClassNum(files);
let data = getDataset(files, container.classArr,container.imageNum);
let trainDataArr = data.trainDataArr;
let trainLabelArr = data.trainLabelArr;
setTimeout(function () {
localforage.setItem('dataset',trainDataArr,function (err,result) {
});
localforage.setItem('datasetLabel',trainLabelArr,function (err,result) {
});
localforage.setItem('modelId',modelId,function (err,result) {
});
localforage.setItem('classNum',container.classNum,function (err,result) {
});
},container.imageNum * 10);
}
}
Let me answer my question. After a day of testing, I found that this model needs a lot of data. Each category requires at least 1,000 images. If there is not enough training data, the model can only output one result. Moreover, this model performs very well in recognizing objects with fewer characters such as letters and signs, and not very well in recognizing animals or natural environments.

Resources