Adding a Clickable Dot to a World Map Using JQuery VMap - jqvmap

I've recently inherited a project which is using jqvmap.
There's a map of the world with a column of clickable dots on it:
Whilst I have been able to find the code which seems to add those dots, I cannot figure out how this data was generated:
jQuery.fn.vectorMap('addMap', 'world_en', {
"width": 950, "height": 550, "paths": {
"id": { "path": "M781.68,324.4l-2.31,8.68l-12.53,4.", ... ... ... "name": "Pakistan" },
"in": { "path": "M670.98,313.01l4.58-2.24l2.72-9,"... ... ... "name": "India" },
"np": { "path": "M671.19,242.56l0.46,4.2, ... ... ... "name": "Greece" },
"demo": {
"path": "M922.59,148a8.59,8.59,0,1,1-8.59-8.59C918.76,139,922.59,143.27,922.59,148Z",
"name": "Demo"
},
"Demo2": {
"path": "M922.59,80A8.59,8.59,0,1,1,914,71.42C918.76,71,922.59,75.27,922.59,80Z",
"name": "Demo2"
},
"Demo3": {
"path": "M922.59,114a8.59,8.59,0,1,1-8.59-8.59C918.76,105,922.59,109.27,922.59,114Z",
"name": "Demo3"
},
"Demo4": {
"path": "M922.59,182a8.59,8.59,0,1,1-8.59-8.59C918.76,173,922.59,177.27,922.59,182Z",
"name": "Demo4"
},
"Demo5": {
"path": "M922.59,216a8.59,8.59,0,1,1-8.59-8.59C918.76,207,922.59,211.27,922.59,216Z",
"name": "Demo5"
}
}
});
I need to add another dot.
Is there a tool which the previous developer has used?
Is there a guide somewhere which explains how to go about doing such a thing.
I've searched Google and looked at the Github site for vmap and have not found anything which could explain this.
Here's the code in jquery's ready function which set's the map up:
$(document).ready(function () {
$("#hidCountrySU").val('');
$("#hidServerCode").val('');
$('#lblServerName').text(' Select server to test ');
$('#btnSubmitServer').attr('disabled', 'disabled');
// List of Regions we'll let clicks through for
let enabledRegions = ['demo', 'demo2', 'demo3', 'demo4', 'demo5'];
let map = null;
// Store currentRegion
let currentRegion = 'demo';
map = $('#vmap').vectorMap({
map: 'world_en',
backgroundColor: '#fff',
color: '#ffffff',
hoverOpacity: 0.7,
selectedColor: '#F90606',
enableZoom: false,
showTooltip: true,
values: sample_data,
scaleColors: ['#C8EEFF', '#006491'],
colors: {
demo: '#F78774',
demo2: '#F78774',
demo3: '#F78774',
demo4: '#F78774',
demo5: '#F78774'
},
normalizeFunction: 'polynomial',
selectedRegions: ['aue'],
onRegionClick: function (event, code, region) {
// Check if this is an eabled region, and not the current selected on
if (enabledRegions.indexOf(code) === -1) {
// Not an enabled region
event.preventDefault();
} else {
// Enabled region. Update newly selected region.
currentRegion = code;
$('.selectedRegionColor').removeClass('selectedRegionColor');
$('#jqvmap1_' + code).addClass('selectedRegionColor');
}
},
onRegionSelect: function (event, code, region) {
localStorage.setItem("locationSelected", "true");
if (code == 'demo') {
$("#hidCountrySU").val('countryAU');
$("#hidServerCode").val('demo');
$('#lblServerName').text(' demo testing ');
$('#btnSubmitServer').removeAttr('disabled');
} else if (code == 'demo2') {
$("#hidCountrySU").val('countryAU');
$("#hidServerCode").val('demo2');
$('#lblServerName').text(' demo2 testing ');
$('#btnSubmitServer').removeAttr('disabled');
} else if (code == 'demo3') {
$("#hidCountrySU").val('countryAU');
$("#hidServerCode").val('Demo3');
$('#lblServerName').text(' Demo3 testing ');
$('#btnSubmitServer').removeAttr('disabled');
} else if (code == 'demo4') {
$("#hidCountrySU").val('countryAU');
$("#hidServerCode").val('demo4');
$('#lblServerName').text(' B Demo ');
$('#btnSubmitServer').removeAttr('disabled');
} else if (code == 'demo5') {
$("#hidCountrySU").val('countryAU');
$("#hidServerCode").val('demo5');
$('#lblServerName').text(' demo5 ');
$('#btnSubmitServer').removeAttr('disabled');
if ($("#form_ServerSubmitFromServer").length === 1) {
$("#form_ServerSubmitFromServer").submit();
}
else if ($("#form_ServerSubmitFormServer2").length === 1) {
$("#form_ServerSubmitFormServer2").submit();
}
},
onLabelShow: function (event, label, code) {
if (enabledRegions.indexOf(code) === -1) {
event.preventDefault();
}
}
});
});
Obviously the first snippet is extending the vectorMap.
I can only assume the previous dev used some kind of image editing program like Inkscape to add the dot and somehow get the path for it. But I just don't know.
Thanks

Related

Add slot validation using Dialog.Delegate directive in Alexa Skill

I'm creating an Alexa skill where you can order a drink, there are two options: tea or coffee. I decided to use Dialog.Delegate directives to handle multi-turn conversation and ElicitSlot directives to control slot requests depending on the value of the drink 'tea or coffee'. I'm basically trying to replicate the 'Combine delegation and manual control to handle complex dialogs' example from the Alexa documentation: https://developer.amazon.com/en-US/docs/alexa/custom-skills/delegate-dialog-to-alexa.html#combine-delegation-and-manual-control-to-handle-complex-dialogs
Everything works fine, but the Dialog.Delegate directive is throwing an error. The directive correctly invokes the elicitation and confirmationStatus prompts but does not invoke the validations prompts, and when an invalid value is entered; for example 'coke'; it just repeats the elicitation prompt which makes the conversation unnatural and unintuitive. According to the documentation, nothing else needs to be done to add the validations, just add Dialog.Delegate, this is how it is in the documentation example:
Expected result
User: Alexa, tell My Coffee Shop to start my order
Alexa: Would you like coffee or tea?
User: shoes (Alexa sends the skill an IntentRequest with the OrderIntent. The drink slot contains the invalid value shoes.)
Because the dialog is not complete, the skill returns Dialog.Delegate.
Alexa: Shoes is not an item on the menu. Which would you like, coffee or tea? (Alexa responds with a prompt defined in the slot validation rules for the drink slot.)
Actual result
Alexa: Would you like coffee or tea?
User: shoes (Alexa sends the skill an IntentRequest with the OrderIntent. The drink slot contains the invalid value shoes.)
Because the dialog is not complete, the skill returns Dialog.Delegate.
Alexa: Alexa: Would you like coffee or tea?
Reply with the same elicitation prompts and it should respond with the validation propmt
I would appreciate it if you could help me understand why this is happening and tell me what I am doing wrong. I know that I could add a function in my code that validates the values ​​of the drink and if this value is not correct, ask the user again with ElicitSlot Directive but my idea is to use Dialog.Delegate, and this directive is supposed to add the validation prompts. Thanks Alexa Devs!
Note: the values ​​of the slots, sample utterances, and prompts are in Spanish; because I'm from Colombia!; but this does not affect the logic or operation of the skill.
My code:
Interaction Model
{
"interactionModel": {
"languageModel": {
"invocationName": "cafeteria prueba",
"intents": [
{
"name": "AMAZON.CancelIntent",
"samples": []
},
{
"name": "AMAZON.HelpIntent",
"samples": []
},
{
"name": "AMAZON.StopIntent",
"samples": []
},
{
"name": "HelloWorldIntent",
"slots": [],
"samples": [
"hola",
"como estás",
"di hola mundo",
"di hola",
"hola mundo"
]
},
{
"name": "AMAZON.NavigateHomeIntent",
"samples": []
},
{
"name": "OrderIntent",
"slots": [
{
"name": "drink",
"type": "drink",
"samples": [
"{drink}"
]
},
{
"name": "coffeeRoast",
"type": "coffeeRoast"
},
{
"name": "teaType",
"type": "teaType"
}
],
"samples": [
"quiero ordenar una bebida"
]
}
],
"types": [
{
"name": "drink",
"values": [
{
"name": {
"value": "te"
}
},
{
"name": {
"value": "café"
}
}
]
},
{
"name": "coffeeRoast",
"values": [
{
"name": {
"value": "doble"
}
},
{
"name": {
"value": "normal"
}
},
{
"name": {
"value": "negro"
}
}
]
},
{
"name": "teaType",
"values": [
{
"name": {
"value": "piña"
}
},
{
"name": {
"value": "verde"
}
},
{
"name": {
"value": "manzanilla"
}
}
]
}
]
},
"dialog": {
"intents": [
{
"name": "OrderIntent",
"delegationStrategy": "SKILL_RESPONSE",
"confirmationRequired": false,
"prompts": {},
"slots": [
{
"name": "drink",
"type": "drink",
"confirmationRequired": false,
"elicitationRequired": true,
"prompts": {
"elicitation": "Elicit.Slot.955195256762.342534459016"
},
"validations": [
{
"type": "hasEntityResolutionMatch",
"prompt": "Slot.Validation.955195256762.342534459016.331746291219"
}
]
},
{
"name": "coffeeRoast",
"type": "coffeeRoast",
"confirmationRequired": false,
"elicitationRequired": false,
"prompts": {}
},
{
"name": "teaType",
"type": "teaType",
"confirmationRequired": false,
"elicitationRequired": false,
"prompts": {}
}
]
}
],
"delegationStrategy": "ALWAYS"
},
"prompts": [
{
"id": "Elicit.Slot.955195256762.342534459016",
"variations": [
{
"type": "PlainText",
"value": "¿qué quieres tomar café o te?"
}
]
},
{
"id": "Slot.Validation.955195256762.342534459016.331746291219",
"variations": [
{
"type": "PlainText",
"value": "{drink} no está incluido en el menú ¿quieres café o te?"
}
]
}
]
}
}
Lambda (index.js)
I have not modified the other files, they are as they are created by Alexa Console auto-hosted node.js with the Start From Scratch template
/* *
* This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK (v2).
* Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management,
* session persistence, api calls, and more.
* */
const Alexa = require('ask-sdk-core');
const LaunchRequestHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'LaunchRequest';
},
handle(handlerInput) {
const speakOutput = 'Welcome, you can say Hello or Help. Which would you like to try?';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
const CoffeeGivenOrderIntentHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === "IntentRequest"
&& handlerInput.requestEnvelope.request.intent.name === "OrderIntent"
&& handlerInput.requestEnvelope.request.intent.slots.drink.value
&& handlerInput.requestEnvelope.request.intent.slots.drink.value === 'café'
&& !handlerInput.requestEnvelope.request.intent.slots.coffeeRoast.value
},
handle(handlerInput) {
return handlerInput.responseBuilder
.speak('¿Cómo quieres el café normal, negro o doble?')
.reprompt('¿Cómo quieres el café normal, negro o doble?')
.addElicitSlotDirective('coffeeRoast')
.getResponse();
}
};
const TeaGivenOrderIntentHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === "IntentRequest"
&& handlerInput.requestEnvelope.request.intent.name === "OrderIntent"
&& handlerInput.requestEnvelope.request.intent.slots.drink.value
&& handlerInput.requestEnvelope.request.intent.slots.drink.value === 'te'
&& !handlerInput.requestEnvelope.request.intent.slots.teaType.value
},
handle(handlerInput) {
return handlerInput.responseBuilder
.speak("¿Quieres el te de manzanilla, piña o verde?")
.reprompt("Would you like a black, green, oolong, or white tea?")
.addElicitSlotDirective('teaType')
.getResponse();
}
};
const StartedInProgressOrderIntentHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === "IntentRequest"
&& handlerInput.requestEnvelope.request.intent.name === "OrderIntent"
&& handlerInput.requestEnvelope.request.dialogState !== 'COMPLETED';
},
handle(handlerInput) {
const currentIntent = handlerInput.requestEnvelope.request.intent;
let drink = currentIntent.slots.drink.validations.type;
return handlerInput.responseBuilder
.speak(drink)
.addDelegateDirective(currentIntent)
.getResponse();
}
};
const CompletedOrderIntentHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === "IntentRequest"
&& handlerInput.requestEnvelope.request.intent.name === "OrderIntent"
&& handlerInput.requestEnvelope.request.dialogState === "COMPLETED";
},
handle(handlerInput){
const drink = handlerInput.requestEnvelope.request.intent.slots.drink.value;
let type;
if (drink === 'café') {
type = handlerInput.requestEnvelope.request.intent.slots.coffeeRoast.value;
} else if (drink === 'te') {
type = handlerInput.requestEnvelope.request.intent.slots.teaType.value;
} else {
type = 'agua';
}
const speechText = `Toma tu ${type} ${drink}`;
return handlerInput.responseBuilder
.speak(speechText)
.getResponse();
}
};
const HelpIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.HelpIntent';
},
handle(handlerInput) {
const speakOutput = 'You can say hello to me! How can I help?';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
const CancelAndStopIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& (Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.CancelIntent'
|| Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.StopIntent');
},
handle(handlerInput) {
const speakOutput = 'Goodbye!';
return handlerInput.responseBuilder
.speak(speakOutput)
.getResponse();
}
};
/* *
* FallbackIntent triggers when a customer says something that doesn’t map to any intents in your skill
* It must also be defined in the language model (if the locale supports it)
* This handler can be safely added but will be ingnored in locales that do not support it yet
* */
const FallbackIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.FallbackIntent';
},
handle(handlerInput) {
const speakOutput = 'Sorry, I don\'t know about that. Please try again.';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
/* *
* SessionEndedRequest notifies that a session was ended. This handler will be triggered when a currently open
* session is closed for one of the following reasons: 1) The user says "exit" or "quit". 2) The user does not
* respond or says something that does not match an intent defined in your voice model. 3) An error occurs
* */
const SessionEndedRequestHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'SessionEndedRequest';
},
handle(handlerInput) {
console.log(`~~~~ Session ended: ${JSON.stringify(handlerInput.requestEnvelope)}`);
// Any cleanup logic goes here.
return handlerInput.responseBuilder.getResponse(); // notice we send an empty response
}
};
/* *
* The intent reflector is used for interaction model testing and debugging.
* It will simply repeat the intent the user said. You can create custom handlers for your intents
* by defining them above, then also adding them to the request handler chain below
* */
const IntentReflectorHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest';
},
handle(handlerInput) {
const intentName = Alexa.getIntentName(handlerInput.requestEnvelope);
const speakOutput = `You just triggered ${intentName}`;
return handlerInput.responseBuilder
.speak(speakOutput)
//.reprompt('add a reprompt if you want to keep the session open for the user to respond')
.getResponse();
}
};
/**
* Generic error handling to capture any syntax or routing errors. If you receive an error
* stating the request handler chain is not found, you have not implemented a handler for
* the intent being invoked or included it in the skill builder below
* */
const ErrorHandler = {
canHandle() {
return true;
},
handle(handlerInput, error) {
const speakOutput = 'Sorry, I had trouble doing what you asked. Please try again.';
console.log(`~~~~ Error handled: ${JSON.stringify(error)}`);
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
/**
* This handler acts as the entry point for your skill, routing all request and response
* payloads to the handlers above. Make sure any new handlers or interceptors you've
* defined are included below. The order matters - they're processed top to bottom
* */
exports.handler = Alexa.SkillBuilders.custom()
.addRequestHandlers(
LaunchRequestHandler,
CoffeeGivenOrderIntentHandler,
TeaGivenOrderIntentHandler,
StartedInProgressOrderIntentHandler,
CompletedOrderIntentHandler,
HelpIntentHandler,
CancelAndStopIntentHandler,
FallbackIntentHandler,
SessionEndedRequestHandler,
IntentReflectorHandler)
.addErrorHandlers(
ErrorHandler)
.withCustomUserAgent('sample/hello-world/v1.2')
.lambda();
I have same problem when I create "Accept only Slot Type's values and synonyms" for validation rule. I tried on Evaluate Model but the slot value is always "not filled". If I tried it on Simulator, the slot value is empty. The slot validation speech never play. So, I catch this validation in Lambda.
I had the same problem, turns out that validation works only if slot filling is enabled.
If the slot is not required and validation fails it returns an empty slot, if it is required it forces validation.
This comes from my experience, it might be wrong but for me it worked

Logic App - change JSON format using native actions only

I have a below JSON input coming from a source system:
{
"d":{
"results":[
{
"userId":"123",
"employmentType":"Full time",
"employment":{
"compansation":{
"results":[
{
"payments":{
"results":[
{
"payType":"Annual Salary",
"value":"70000"
},
{
"payType":"Annual Leave",
"value":"1000"
},
{
"payType":"Other Payments",
"value":"2000"
}
]
}
}
]
}
}
},
{
"userId":"456",
"employmentType":"Full time",
"employment":{
"compansation":{
"results":[
{
"payments":{
"results":[
{
"payType":"Annual Salary",
"value":"80000"
},
{
"payType":"Annual Leave",
"value":"2000"
},
{
"payType":"Other Payments",
"value":"3000"
}
]
}
}
]
}
}
},
{
"userId":"123",
"employmentType":"Full time",
"employment":{
"compansation":{
"results":[
{
"payments":{
"results":[
{
"payType":"Annual Salary",
"value":"90000"
},
{
"payType":"Annual Leave",
"value":"3000"
},
{
"payType":"Other Payments",
"value":"4000"
}
]
}
}
]
}
}
}
]
}
}
I want to filter "employment/compansation/payments" to use "payType" "Annual Salay" and "Annual Leave" only; and filter out "payType: Other Payments". Then sum both "Annual Salary" and "Annual Leave" and generate final output like:
[
{
"userId":"123",
"employmentType":"Full time",
"totalSalary":"71000"
},
{
"userId":"456",
"employmentType":"Full time",
"totalSalary":"82000"
},
{
"userId":"789",
"employmentType":"Full time",
"totalSalary":"93000"
}
]
Can I achieve this by only using native Logic App actions? Without using Functions or even JavaScript code? And how?
You can use the "Filter Array" connector in Logic App.
This might be a duplicate question, with similar question being answered here.

Alexa Developer Console replying with "There was a problem with the requested skill's response"

I have been trying to resolve this issue for the past couple of hours with no success. Here is my code:
Lambda code:
/*eslint-disable func-names */
/* eslint quote-props: ["error", "consistent"]*/
// There are three sections, Text Strings, Skill Code, and Helper Function(s).
// You can copy and paste the contents as the code for a new Lambda function, using the alexa-skill-kit-sdk-factskill template.
// This code includes helper functions for compatibility with versions of the SDK prior to 1.0.9, which includes the dialog directives.
// 1. Text strings =====================================================================================================
// Modify these strings and messages to change the behavior of your Lambda function
let speechOutput;
let reprompt;
let welcomeOutput = "Welcome to Tanpura! You can ask me to play at any pitch. How can I help you today?";
let welcomeReprompt = "sample re-prompt text";
// 2. Skill Code =======================================================================================================
"use strict";
const Alexa = require('alexa-sdk');
const APP_ID = undefined; // TODO replace with your app ID (OPTIONAL).
speechOutput = '';
const handlers = {
'LaunchRequest': function () {
this.emit(':ask', welcomeOutput, welcomeReprompt);
},
'AMAZON.HelpIntent': function () {
speechOutput = 'You can ask me to play at any pitch, such as C, C sharp, D, D sharp, and so on. How can I help you?';
reprompt = '';
this.emit(':ask', speechOutput, reprompt);
},
'AMAZON.CancelIntent': function () {
speechOutput = 'Would you like me to play another pitch?';
this.emit(':tell', speechOutput);
},
'AMAZON.StopIntent': function () {
speechOutput = 'I hope I helped you. Goodbye!';
this.emit(':tell', speechOutput);
},
'SessionEndedRequest': function () {
speechOutput = '';
//this.emit(':saveState',true);//uncomment to save attributes to db on session end
this.emit(':tell', speechOutput);
},
'AMAZON.FallbackIntent': function () {
speechOutput = '';
//any intent slot variables are listed here for convenience
//Your custom intent handling goes here
speechOutput = "I currently do not support your request. However, you can ask me to play at any pitch, and I will do so. How can I help?";
this.emit(":ask", speechOutput, speechOutput);
},
'AMAZON.NavigateHomeIntent': function () {
speechOutput = '';
//any intent slot variables are listed here for convenience
//Your custom intent handling goes here
speechOutput = "Welcome to Tanpura! You can ask me to play at any pitch. How can I help you today?";
this.emit(":ask", speechOutput, speechOutput);
},
'PlayNoteIntent': function () {
speechOutput = '';
//any intent slot variables are listed here for convenience
let noteSlot = resolveCanonical(this.event.request.intent.slots.note);
console.log("User selected pitch: " + noteSlot);
let accidentalSlot = resolveCanonical(this.event.request.intent.slots.accidental);
console.log("User selected accidental: " + accidentalSlot);
var notes = {
'a': {
'flat': 'https://s3.amazonaws.com/tanpuranew/tanpura_Gsharp.mp3',
'natural': 'https://s3.amazonaws.com/tanpuranew/tanpura_A.mp3',
'sharp': 'https://s3.amazonaws.com/tanpuranew/tanpura_Asharp.mp3'
},
'b': {
'flat': 'https://s3.amazonaws.com/tanpuranew/tanpura_Asharp.mp3',
'natural': 'https://s3.amazonaws.com/tanpuranew/tanpura_B.mp3',
'sharp': 'https://s3.amazonaws.com/tanpuranew/tanpura_C.mp3'
},
'c': {
'flat': 'https://s3.amazonaws.com/tanpuranew/tanpura_B.mp3',
'natural': 'https://s3.amazonaws.com/tanpuranew/tanpura_C.mp3',
'sharp': 'https://s3.amazonaws.com/tanpuranew/tanpura_Csharp.mp3'
},
'd': {
'flat': 'https://s3.amazonaws.com/tanpuranew/tanpura_Csharp.mp3',
'natural': 'https://s3.amazonaws.com/tanpuranew/tanpura_D.mp3',
'sharp': 'https://s3.amazonaws.com/tanpuranew/tanpura_Dsharp.mp3'
},
'e': {
'flat': 'https://s3.amazonaws.com/tanpuranew/tanpura_Dsharp.mp3',
'natural': 'https://s3.amazonaws.com/tanpuranew/tanpura_E.mp3',
'sharp': 'https://s3.amazonaws.com/tanpuranew/tanpura_F.mp3'
},
'f': {
'flat': 'https://s3.amazonaws.com/tanpuranew/tanpura_E.mp3',
'natural': 'https://s3.amazonaws.com/tanpuranew/tanpura_F.mp3',
'sharp': 'https://s3.amazonaws.com/tanpuranew/tanpura_Fsharp.mp3'
},
'g': {
'flat': 'https://s3.amazonaws.com/tanpuranew/tanpura_Fsharp.mp3',
'natural': 'https://s3.amazonaws.com/tanpuranew/tanpura_G.mp3',
'sharp': 'https://s3.amazonaws.com/tanpuranew/tanpura_Gsharp.mp3'
}
}
var note = noteSlot.toLowerCase();
var speechReprompt = "";
if (noteSlot && notes[note]){
var audio = '';
var accidental = 'natural';
if(accidentalSlot && accidental.indexOf(accidentalSlot) > -1){
accidental = accidentalSlot;
}
var audioSrc = notes[note][accidental];
speechOutput = "Ok. I will play " + noteSlot + accidental + <audio src="' + audioSrc + '" />;
speechReprompt = "Would you like me to continue playing?";
}
else{
speechOutput = "The note you have requested is not supported yet.";
speechReprompt = "However, Tanpura does support A, B, C, D, E, F, G, and all of the accidentals in between.";
}
//Your custom intent handling goes here
this.emit(":ask", speechOutput, speechOutput);
},
'Unhandled': function () {
speechOutput = "Tanpura didn't quite understand what you wanted. Please try rephrasing your request.";
this.emit(':ask', speechOutput, speechOutput);
}
};
exports.handler = (event, context) => {
const alexa = Alexa.handler(event, context);
alexa.appId = APP_ID;
// To enable string internationalization (i18n) features, set a resources object.
//alexa.resources = languageStrings;
alexa.registerHandlers(handlers);
//alexa.dynamoDBTableName='DYNAMODB_TABLE_NAME';//uncomment this line to save attributes to DB
alexa.execute();
};
// END of Intent Handlers {} ========================================================================================
// 3. Helper Function =================================================================================================
function resolveCanonical(slot){
//this function looks at the entity resolution part of request and returns the slot value if a synonyms is provided
let canonical;
try{
canonical = slot.resolutions.resolutionsPerAuthority[0].values[0].value.name;
}catch(err){
console.log(err.message);
canonical = slot.value;
};
return canonical;
};
function delegateSlotCollection(){
console.log("in delegateSlotCollection");
console.log("current dialogState: "+this.event.request.dialogState);
if (this.event.request.dialogState === "STARTED") {
console.log("in Beginning");
let updatedIntent= null;
// updatedIntent=this.event.request.intent;
//optionally pre-fill slots: update the intent object with slot values for which
//you have defaults, then return Dialog.Delegate with this updated intent
// in the updatedIntent property
//this.emit(":delegate", updatedIntent); //uncomment this is using ASK SDK 1.0.9 or newer
//this code is necessary if using ASK SDK versions prior to 1.0.9
if(this.isOverridden()) {
return;
}
this.handler.response = buildSpeechletResponse({
sessionAttributes: this.attributes,
directives: getDialogDirectives('Dialog.Delegate', updatedIntent, null),
shouldEndSession: false
});
this.emit(':responseReady', updatedIntent);
} else if (this.event.request.dialogState !== "COMPLETED") {
console.log("in not completed");
// return a Dialog.Delegate directive with no updatedIntent property.
//this.emit(":delegate"); //uncomment this is using ASK SDK 1.0.9 or newer
//this code necessary is using ASK SDK versions prior to 1.0.9
if(this.isOverridden()) {
return;
}
this.handler.response = buildSpeechletResponse({
sessionAttributes: this.attributes,
directives: getDialogDirectives('Dialog.Delegate', null, null),
shouldEndSession: false
});
this.emit(':responseReady');
} else {
console.log("in completed");
console.log("returning: "+ JSON.stringify(this.event.request.intent));
// Dialog is now complete and all required slots should be filled,
// so call your normal intent handler.
return this.event.request.intent;
}
}
function randomPhrase(array) {
// the argument is an array [] of words or phrases
let i = 0;
i = Math.floor(Math.random() * array.length);
return(array[i]);
}
function isSlotValid(request, slotName){
let slot = request.intent.slots[slotName];
//console.log("request = "+JSON.stringify(request)); //uncomment if you want to see the request
let slotValue;
//if we have a slot, get the text and store it into speechOutput
if (slot && slot.value) {
//we have a value in the slot
slotValue = slot.value.toLowerCase();
return slotValue;
} else {
//we didn't get a value in the slot.
return false;
}
}
//These functions are here to allow dialog directives to work with SDK versions prior to 1.0.9
//will be removed once Lambda templates are updated with the latest SDK
function createSpeechObject(optionsParam) {
if (optionsParam && optionsParam.type === 'SSML') {
return {
type: optionsParam.type,
ssml: optionsParam['speech']
};
} else {
return {
type: optionsParam.type || 'PlainText',
text: optionsParam['speech'] || optionsParam
};
}
}
function buildSpeechletResponse(options) {
let alexaResponse = {
shouldEndSession: options.shouldEndSession
};
if (options.output) {
alexaResponse.outputSpeech = createSpeechObject(options.output);
}
if (options.reprompt) {
alexaResponse.reprompt = {
outputSpeech: createSpeechObject(options.reprompt)
};
}
if (options.directives) {
alexaResponse.directives = options.directives;
}
if (options.cardTitle && options.cardContent) {
alexaResponse.card = {
type: 'Simple',
title: options.cardTitle,
content: options.cardContent
};
if(options.cardImage && (options.cardImage.smallImageUrl || options.cardImage.largeImageUrl)) {
alexaResponse.card.type = 'Standard';
alexaResponse.card['image'] = {};
delete alexaResponse.card.content;
alexaResponse.card.text = options.cardContent;
if(options.cardImage.smallImageUrl) {
alexaResponse.card.image['smallImageUrl'] = options.cardImage.smallImageUrl;
}
if(options.cardImage.largeImageUrl) {
alexaResponse.card.image['largeImageUrl'] = options.cardImage.largeImageUrl;
}
}
} else if (options.cardType === 'LinkAccount') {
alexaResponse.card = {
type: 'LinkAccount'
};
} else if (options.cardType === 'AskForPermissionsConsent') {
alexaResponse.card = {
type: 'AskForPermissionsConsent',
permissions: options.permissions
};
}
let returnResult = {
version: '1.0',
response: alexaResponse
};
if (options.sessionAttributes) {
returnResult.sessionAttributes = options.sessionAttributes;
}
return returnResult;
}
function getDialogDirectives(dialogType, updatedIntent, slotName) {
let directive = {
type: dialogType
};
if (dialogType === 'Dialog.ElicitSlot') {
directive.slotToElicit = slotName;
} else if (dialogType === 'Dialog.ConfirmSlot') {
directive.slotToConfirm = slotName;
}
if (updatedIntent) {
directive.updatedIntent = updatedIntent;
}
return [directive];
}
Alexa Developer JSON Input:
{
"version": "1.0",
"session": {
"new": true,
"sessionId": "amzn1.echo-api.session.1456bfda-a9d3-457f-88a7-bc5387d774db",
"application": {
"applicationId": "amzn1.ask.skill.0c237132-8815-4025-a23f-ca6df688bcd2"
},
"user": {
"userId": "amzn1.ask.account.AGD7V7GZTLU4DQH623OMU5MUBR2FGWXKDVW2OPNYYRWKIYJCHQBCSKVNQHEPOEXQWO33Q4OTJ6LSIRLYT3TN33OAK3W7LLNNYPU5S3MVKPMPNH2XDWYJ7DBWCFZRXY4STCPFKVL2FADYZE4TXS53Z5AXBPN6344R6VG6GD365TFQTCPPKABC5IKM46UZXUX3BPR4TQ4KEYO6LTA"
}
},
"context": {
"System": {
"application": {
"applicationId": "amzn1.ask.skill.0c237132-8815-4025-a23f-ca6df688bcd2"
},
"user": {
"userId": "amzn1.ask.account.AGD7V7GZTLU4DQH623OMU5MUBR2FGWXKDVW2OPNYYRWKIYJCHQBCSKVNQHEPOEXQWO33Q4OTJ6LSIRLYT3TN33OAK3W7LLNNYPU5S3MVKPMPNH2XDWYJ7DBWCFZRXY4STCPFKVL2FADYZE4TXS53Z5AXBPN6344R6VG6GD365TFQTCPPKABC5IKM46UZXUX3BPR4TQ4KEYO6LTA"
},
"device": {
"deviceId": "amzn1.ask.device.AFHBRIBVUWYIR2ESXPKWP3G3PHYK4W5VW4NF55KH5ZXD27WMSPBPU7YLJQJWM2YQDZBH7VWGXCLNQKESUNWWGI6CJUWUUSWUKVBZWZC5LBNXMCDY2IOZAZUYWHYXT5VLLA7XC3OP2WY7RXE2LPRHM5E4BIMR662M5MZKJH4WRPUFS3HVIFRDK",
"supportedInterfaces": {}
},
"apiEndpoint": "https://api.amazonalexa.com",
"apiAccessToken": "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOiJodHRwczovL2FwaS5hbWF6b25hbGV4YS5jb20iLCJpc3MiOiJBbGV4YVNraWxsS2l0Iiwic3ViIjoiYW16bjEuYXNrLnNraWxsLjBjMjM3MTMyLTg4MTUtNDAyNS1hMjNmLWNhNmRmNjg4YmNkMiIsImV4cCI6MTUzOTU1MzE2NywiaWF0IjoxNTM5NTQ5NTY3LCJuYmYiOjE1Mzk1NDk1NjcsInByaXZhdGVDbGFpbXMiOnsiY29uc2VudFRva2VuIjpudWxsLCJkZXZpY2VJZCI6ImFtem4xLmFzay5kZXZpY2UuQUZIQlJJQlZVV1lJUjJFU1hQS1dQM0czUEhZSzRXNVZXNE5GNTVLSDVaWEQyN1dNU1BCUFU3WUxKUUpXTTJZUURaQkg3VldHWENMTlFLRVNVTldXR0k2Q0pVV1VVU1dVS1ZCWldaQzVMQk5YTUNEWTJJT1pBWlVZV0hZWFQ1VkxMQTdYQzNPUDJXWTdSWEUyTFBSSE01RTRCSU1SNjYyTTVNWktKSDRXUlBVRlMzSFZJRlJESyIsInVzZXJJZCI6ImFtem4xLmFzay5hY2NvdW50LkFHRDdWN0daVExVNERRSDYyM09NVTVNVUJSMkZHV1hLRFZXMk9QTllZUldLSVlKQ0hRQkNTS1ZOUUhFUE9FWFFXTzMzUTRPVEo2TFNJUkxZVDNUTjMzT0FLM1c3TExOTllQVTVTM01WS1BNUE5IMlhEV1lKN0RCV0NGWlJYWTRTVENQRktWTDJGQURZWkU0VFhTNTNaNUFYQlBONjM0NFI2Vkc2R0QzNjVURlFUQ1BQS0FCQzVJS000NlVaWFVYM0JQUjRUUTRLRVlPNkxUQSJ9fQ.KAHvIOOUP4k-73lNMxRnOToYjrUbeHuLRDQGzMFi9dVEiwc2QpvpMZpLNpG5rCtoqB2-OfC48KbK5u67nW6X9QO6DSoNTBfPKUatIHB6pUWbArdv-FliUO69SQMomjLtLzC86_jnZ8TqvNavjb5I5hOGnmCe5Fv2IY5HgBw0h07Dq3ZT4i_4edcnhX9zYJretTEydF0L3JA7GTithgtAGFxbBqbiDTKRMlaGUGBWAkZkHy8FPWsAmvfTwRaNL7F3LAEbGH2QJlyoPQR7jYij7CsnlRAEv-3Ur1kFaMEdhDNA9fcn2JI4TVf1umy0fL66dHWq3omk2p5I4FyrJ3a8SQ"
}
},
"request": {
"type": "IntentRequest",
"requestId": "amzn1.echo-api.request.80cc2899-2fa2-4828-99ba-1c25d8cce05b",
"timestamp": "2018-10-14T20:39:27Z",
"locale": "en-US",
"intent": {
"name": "PlayNoteIntent",
"confirmationStatus": "NONE",
"slots": {
"note": {
"name": "note",
"value": "an",
"resolutions": {
"resolutionsPerAuthority": [
{
"authority": "amzn1.er-authority.echo-sdk.amzn1.ask.skill.0c237132-8815-4025-a23f-ca6df688bcd2.Note",
"status": {
"code": "ER_SUCCESS_NO_MATCH"
}
}
]
},
"confirmationStatus": "NONE",
"source": "USER"
},
"accidental": {
"name": "accidental",
"value": "e",
"resolutions": {
"resolutionsPerAuthority": [
{
"authority": "amzn1.er-authority.echo-sdk.amzn1.ask.skill.0c237132-8815-4025-a23f-ca6df688bcd2.accidental",
"status": {
"code": "ER_SUCCESS_NO_MATCH"
}
}
]
},
"confirmationStatus": "NONE",
"source": "USER"
}
}
}
}
}
And finally the Skill's JSON:
{
"interactionModel": {
"languageModel": {
"invocationName": "tanpura",
"intents": [
{
"name": "AMAZON.FallbackIntent",
"samples": []
},
{
"name": "AMAZON.CancelIntent",
"samples": []
},
{
"name": "AMAZON.HelpIntent",
"samples": []
},
{
"name": "AMAZON.StopIntent",
"samples": []
},
{
"name": "AMAZON.NavigateHomeIntent",
"samples": []
},
{
"name": "PlayNoteIntent",
"slots": [
{
"name": "note",
"type": "Note"
},
{
"name": "accidental",
"type": "accidental"
}
],
"samples": [
"for an {note} {accidental}",
"for a {note} {accidental}",
"play {note} {accidental}",
"Start an {note} {accidental}",
"Give me an {note} {accidental}",
"Make an {note} {accidental}",
"Put on an {note} {accidental}",
"Sing at an {note} {accidental}",
"Sing at a {note} {accidental}",
"Create an {note} {accidental}",
"Lets hear an {note} {accidental}",
"Play an {note} {accidental}",
"Lets hear a {note} {accidental}",
"Sing at {note} {accidental}",
"Create a {note} {accidental}",
"Make a {note} {accidental}",
"Put on a {note} {accidental}",
"Initiate a {note} {accidental}",
"Give me a {note} {accidental}",
"Start a {note} {accidental}",
"Play a {note} {accidental}"
]
}
],
"types": [
{
"name": "Note",
"values": [
{
"name": {
"value": "B"
}
},
{
"name": {
"value": "A#"
}
},
{
"name": {
"value": "A"
}
},
{
"name": {
"value": "G#"
}
},
{
"name": {
"value": "G"
}
},
{
"name": {
"value": "F"
}
},
{
"name": {
"value": "E"
}
},
{
"name": {
"value": "D#"
}
},
{
"name": {
"value": "D"
}
},
{
"name": {
"value": "C#"
}
},
{
"name": {
"value": "C"
}
}
]
},
{
"name": "accidental",
"values": [
{
"name": {
"value": "natural"
}
},
{
"name": {
"value": "flat"
}
},
{
"name": {
"value": "sharp"
}
}
]
}
]
}
}
}
The code was working initially, but after I made a couple of edits to the Lambda code, I was getting the same reply again and again, which made no sense. I think that the problem may lie in the fact that I added an extra value for the accidental value, and I added a natural value before rebuilding my Lambda code in the skillinator.io site. Any help would be much appreciated as I have been struggling with this code all day.
"There was a problem with the requested skill's response” means that there is something wrong with the response json. It might be null or invalid.
In your case, this line throws an error because your string concatenation is not right.
speechOutput = "Ok. I will play " + noteSlot + accidental + <audio src="' + audioSrc + '" />;
Change it to :
speechOutput = "Ok. I will play " + noteSlot + accidental + "<audio src=\"" + audioSrc + "\" />";

Ace editor autocomplete uploads extra-records

I added ui-ace editor to the my application with angular. Instead of requesting words every time, I get a json 1 time, during application initiation.
Example of json-file:
[
{
"Word": "Do {int} + {int}",
"Meta": "Implemented"
},
{
"Word": "Line3",
"Meta": "Not-implemented"
},
{
"Word": "Line2",
"Meta": "Not-implemented"
},
{
"Word": "Line4",
"Meta": "Not-implemented"
},
{
"Word": "444",
"Meta": "Not-implemented"
},
{
"Word": "E1",
"Meta": "Not-implemented"
},
{
"Word": "E2",
"Meta": "Not-implemented"
},
{
"Word": "E1Try",
"Meta": "Not-implemented"
},
{
"Word": "E3",
"Meta": "Not-implemented"
},
{
"Word": "E4444",
"Meta": "Not-implemented"
}
]
The issue is that some of words are listed in autocomplete more than ones, take a look on a screenshot: http://take.ms/N8BFZ .
Here's how I load ace-editor, where ctrl.listStepLines is an object which contains json-response from API:
$scope.aceLoaded = function(_editor){
// Editor part
var _session = _editor.getSession();
var _renderer = _editor.renderer;
_editor.$blockScrolling = Infinity;
_editor.setOptions({
minLines: 10,
maxLines: 40,
wrap: true,
firstLineNumber: 1,
enableBasicAutocompletion: true,
enableSnippets: true,
enableLiveAutocompletion: true
})
var langTools = ace.require("ace/ext/language_tools");
var rhymeCompleter = {
getCompletions: function (editor, session, pos, prefix, callback) {
if (prefix.length === 0) { callback(null, []); return }
callback(null, ctrl.listStepLines.map(function (ea) {
return { name: ea.Word, value: ea.Word, meta: ea.Meta }
}));
}
}
langTools.addCompleter(rhymeCompleter);
};
The issue was that angularjs loaded my function a lot of times and ace editor had 14 similar completers. I refactored my code and create a separate function for completer adding which is called only one time.
ctrl.addAutoCompleter();
function init() {
ctrl.addAutoCompleter = function () {
var langTools = ace.require("ace/ext/language_tools");
var stepLineCompleter = {
getCompletions: function (_editor, session, pos, prefix, callback) {
if (prefix.length === 0) { callback(null, []); return }
callback(null, ctrl.listStepLines.map(function (ea) {
return { name: ea.Word, value: ea.Word, meta: ea.Meta }
}));
}
}
langTools.addCompleter(stepLineCompleter);
}
};

how to uplaod multiple images from i pad gallery using ionic and cordova plugin?

I want to upload multiple images in iPad using ionic and cordovaImagePicker plugin.
I am getting an error:
Uncaught Error: [$injector:modulerr] Failed to instantiate module starter due to:
Error: [$injector:modulerr] Failed to instantiate module ngCordova due to:
Error: [$injector:nomod] Module 'ngCordova' is not available! You either misspelled the module name or forgot to load it. If registering a module ensure that you specify the dependencies as the second argument.
I am not getting how to exactly use that plugin + how to test whether its working or not?
app.js:
var app = angular.module('starter', ['ionic','ngCordova']);
app.run(function($ionicPlatform) {
$ionicPlatform.ready(function() {
// Hide the accessory bar by default (remove this to show the accessory bar above the keyboard
// for form inputs)
if(window.cordova && window.cordova.plugins.Keyboard) {
cordova.plugins.Keyboard.hideKeyboardAccessoryBar(true);
}
if(window.StatusBar) {
StatusBar.styleDefault();
}
});
});
app.controller('ImagePickerController', function($scope, $cordovaImagePicker, $ionicPlatform, $cordovaContacts) {
$scope.collection = {
selectedImage : ''
};
$ionicPlatform.ready(function() {
$scope.getImageSaveContact = function() {
// Image picker will load images according to these settings
var options = {
maximumImagesCount: 1, // Max number of selected images, I'm using only one for this example
width: 800,
height: 800,
quality: 80 // Higher is better
};
$cordovaImagePicker.getPictures(options).then(function (results) {
// Loop through acquired images
for (var i = 0; i < results.length; i++) {
$scope.collection.selectedImage = results[i]; // We loading only one image so we can use it like this
window.plugins.Base64.encodeFile($scope.collection.selectedImage, function(base64){ // Encode URI to Base64 needed for contacts plugin
$scope.collection.selectedImage = base64;
$scope.addContact(); // Save contact
});
}
}, function(error) {
console.log('Error: ' + JSON.stringify(error)); // In case of error
});
};
});
Here is my code on plunker.
my app.js file.
var app = angular.module('starter', ['ionic','ngCordova']);
app.run(function($ionicPlatform) {
$ionicPlatform.ready(function() {
// Hide the accessory bar by default (remove this to show the accessory bar above the keyboard
// for form inputs)
if(window.cordova && window.cordova.plugins.Keyboard) {
cordova.plugins.Keyboard.hideKeyboardAccessoryBar(true);
}
if(window.StatusBar) {
StatusBar.styleDefault();
}
});
});
app.controller('ImagePickerController', function($scope, $cordovaImagePicker, $ionicPlatform, $cordovaContacts) {
$scope.collection = {
selectedImage : ''
};
$ionicPlatform.ready(function() {
alert("hiii");
$scope.getImageSaveContact = function() {
// Image picker will load images according to these settings
var options = {
maximumImagesCount: 1, // Max number of selected images, I'm using only one for this example
width: 800,
height: 800,
quality: 80 // Higher is better
};
$cordovaImagePicker.getPictures(options).then(function (results) {
// Loop through acquired images
for (var i = 0; i < results.length; i++) {
$scope.collection.selectedImage = results[i]; // We loading only one image so we can use it like this
window.plugins.Base64.encodeFile($scope.collection.selectedImage, function(base64){ // Encode URI to Base64 needed for contacts plugin
$scope.collection.selectedImage = base64;
$scope.addContact(); // Save contact
});
}
}, function(error) {
console.log('Error: ' + JSON.stringify(error)); // In case of error
});
};
});
$scope.contact = { // We will use it to save a contact
"displayName": "Gajotres",
"name": {
"givenName" : "Dragannn",
"familyName" : "Gaiccc",
"formatted" : "Dragannn Gaiccc"
},
"nickname": 'Gajotres',
"phoneNumbers": [
{
"value": "+385959052082",
"type": "mobile"
},
{
"value": "+385914600731",
"type": "phone"
}
],
"emails": [
{
"value": "dragan.gaic#gmail.com",
"type": "home"
}
],
"addresses": [
{
// "type": "home",
"formatted": "Some Address",
"streetAddress": "Some Address",
"locality":"Zagreb",
"region":"Zagreb",
"postalCode":"10000",
"country":"Croatia"
}
],
"ims": null,
"organizations": [
{
"type": "Company",
"name": "Generali",
"department": "IT",
"title":"Senior Java Developer"
}
],
"birthday": Date("08/01/1980"),
"note": "",
"photos": [
{
"type": "base64",
"value": $scope.collection.selectedImage
}
],
"categories": null,
"urls": null
}
$scope.addContact = function() {
$cordovaContacts.save($scope.contact).then(function(result) {
console.log('Contact Saved!');
}, function(err) {
console.log('An error has occured while saving contact data!');
});
};
});
run the code inside x code otherwise it will always show the error cordova is not defined.

Resources