I'm creating an Alexa skill where you can order a drink, there are two options: tea or coffee. I decided to use Dialog.Delegate directives to handle multi-turn conversation and ElicitSlot directives to control slot requests depending on the value of the drink 'tea or coffee'. I'm basically trying to replicate the 'Combine delegation and manual control to handle complex dialogs' example from the Alexa documentation: https://developer.amazon.com/en-US/docs/alexa/custom-skills/delegate-dialog-to-alexa.html#combine-delegation-and-manual-control-to-handle-complex-dialogs
Everything works fine, but the Dialog.Delegate directive is throwing an error. The directive correctly invokes the elicitation and confirmationStatus prompts but does not invoke the validations prompts, and when an invalid value is entered; for example 'coke'; it just repeats the elicitation prompt which makes the conversation unnatural and unintuitive. According to the documentation, nothing else needs to be done to add the validations, just add Dialog.Delegate, this is how it is in the documentation example:
Expected result
User: Alexa, tell My Coffee Shop to start my order
Alexa: Would you like coffee or tea?
User: shoes (Alexa sends the skill an IntentRequest with the OrderIntent. The drink slot contains the invalid value shoes.)
Because the dialog is not complete, the skill returns Dialog.Delegate.
Alexa: Shoes is not an item on the menu. Which would you like, coffee or tea? (Alexa responds with a prompt defined in the slot validation rules for the drink slot.)
Actual result
Alexa: Would you like coffee or tea?
User: shoes (Alexa sends the skill an IntentRequest with the OrderIntent. The drink slot contains the invalid value shoes.)
Because the dialog is not complete, the skill returns Dialog.Delegate.
Alexa: Alexa: Would you like coffee or tea?
Reply with the same elicitation prompts and it should respond with the validation propmt
I would appreciate it if you could help me understand why this is happening and tell me what I am doing wrong. I know that I could add a function in my code that validates the values of the drink and if this value is not correct, ask the user again with ElicitSlot Directive but my idea is to use Dialog.Delegate, and this directive is supposed to add the validation prompts. Thanks Alexa Devs!
Note: the values of the slots, sample utterances, and prompts are in Spanish; because I'm from Colombia!; but this does not affect the logic or operation of the skill.
My code:
Interaction Model
{
"interactionModel": {
"languageModel": {
"invocationName": "cafeteria prueba",
"intents": [
{
"name": "AMAZON.CancelIntent",
"samples": []
},
{
"name": "AMAZON.HelpIntent",
"samples": []
},
{
"name": "AMAZON.StopIntent",
"samples": []
},
{
"name": "HelloWorldIntent",
"slots": [],
"samples": [
"hola",
"como estás",
"di hola mundo",
"di hola",
"hola mundo"
]
},
{
"name": "AMAZON.NavigateHomeIntent",
"samples": []
},
{
"name": "OrderIntent",
"slots": [
{
"name": "drink",
"type": "drink",
"samples": [
"{drink}"
]
},
{
"name": "coffeeRoast",
"type": "coffeeRoast"
},
{
"name": "teaType",
"type": "teaType"
}
],
"samples": [
"quiero ordenar una bebida"
]
}
],
"types": [
{
"name": "drink",
"values": [
{
"name": {
"value": "te"
}
},
{
"name": {
"value": "café"
}
}
]
},
{
"name": "coffeeRoast",
"values": [
{
"name": {
"value": "doble"
}
},
{
"name": {
"value": "normal"
}
},
{
"name": {
"value": "negro"
}
}
]
},
{
"name": "teaType",
"values": [
{
"name": {
"value": "piña"
}
},
{
"name": {
"value": "verde"
}
},
{
"name": {
"value": "manzanilla"
}
}
]
}
]
},
"dialog": {
"intents": [
{
"name": "OrderIntent",
"delegationStrategy": "SKILL_RESPONSE",
"confirmationRequired": false,
"prompts": {},
"slots": [
{
"name": "drink",
"type": "drink",
"confirmationRequired": false,
"elicitationRequired": true,
"prompts": {
"elicitation": "Elicit.Slot.955195256762.342534459016"
},
"validations": [
{
"type": "hasEntityResolutionMatch",
"prompt": "Slot.Validation.955195256762.342534459016.331746291219"
}
]
},
{
"name": "coffeeRoast",
"type": "coffeeRoast",
"confirmationRequired": false,
"elicitationRequired": false,
"prompts": {}
},
{
"name": "teaType",
"type": "teaType",
"confirmationRequired": false,
"elicitationRequired": false,
"prompts": {}
}
]
}
],
"delegationStrategy": "ALWAYS"
},
"prompts": [
{
"id": "Elicit.Slot.955195256762.342534459016",
"variations": [
{
"type": "PlainText",
"value": "¿qué quieres tomar café o te?"
}
]
},
{
"id": "Slot.Validation.955195256762.342534459016.331746291219",
"variations": [
{
"type": "PlainText",
"value": "{drink} no está incluido en el menú ¿quieres café o te?"
}
]
}
]
}
}
Lambda (index.js)
I have not modified the other files, they are as they are created by Alexa Console auto-hosted node.js with the Start From Scratch template
/* *
* This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK (v2).
* Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management,
* session persistence, api calls, and more.
* */
const Alexa = require('ask-sdk-core');
const LaunchRequestHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'LaunchRequest';
},
handle(handlerInput) {
const speakOutput = 'Welcome, you can say Hello or Help. Which would you like to try?';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
const CoffeeGivenOrderIntentHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === "IntentRequest"
&& handlerInput.requestEnvelope.request.intent.name === "OrderIntent"
&& handlerInput.requestEnvelope.request.intent.slots.drink.value
&& handlerInput.requestEnvelope.request.intent.slots.drink.value === 'café'
&& !handlerInput.requestEnvelope.request.intent.slots.coffeeRoast.value
},
handle(handlerInput) {
return handlerInput.responseBuilder
.speak('¿Cómo quieres el café normal, negro o doble?')
.reprompt('¿Cómo quieres el café normal, negro o doble?')
.addElicitSlotDirective('coffeeRoast')
.getResponse();
}
};
const TeaGivenOrderIntentHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === "IntentRequest"
&& handlerInput.requestEnvelope.request.intent.name === "OrderIntent"
&& handlerInput.requestEnvelope.request.intent.slots.drink.value
&& handlerInput.requestEnvelope.request.intent.slots.drink.value === 'te'
&& !handlerInput.requestEnvelope.request.intent.slots.teaType.value
},
handle(handlerInput) {
return handlerInput.responseBuilder
.speak("¿Quieres el te de manzanilla, piña o verde?")
.reprompt("Would you like a black, green, oolong, or white tea?")
.addElicitSlotDirective('teaType')
.getResponse();
}
};
const StartedInProgressOrderIntentHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === "IntentRequest"
&& handlerInput.requestEnvelope.request.intent.name === "OrderIntent"
&& handlerInput.requestEnvelope.request.dialogState !== 'COMPLETED';
},
handle(handlerInput) {
const currentIntent = handlerInput.requestEnvelope.request.intent;
let drink = currentIntent.slots.drink.validations.type;
return handlerInput.responseBuilder
.speak(drink)
.addDelegateDirective(currentIntent)
.getResponse();
}
};
const CompletedOrderIntentHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === "IntentRequest"
&& handlerInput.requestEnvelope.request.intent.name === "OrderIntent"
&& handlerInput.requestEnvelope.request.dialogState === "COMPLETED";
},
handle(handlerInput){
const drink = handlerInput.requestEnvelope.request.intent.slots.drink.value;
let type;
if (drink === 'café') {
type = handlerInput.requestEnvelope.request.intent.slots.coffeeRoast.value;
} else if (drink === 'te') {
type = handlerInput.requestEnvelope.request.intent.slots.teaType.value;
} else {
type = 'agua';
}
const speechText = `Toma tu ${type} ${drink}`;
return handlerInput.responseBuilder
.speak(speechText)
.getResponse();
}
};
const HelpIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.HelpIntent';
},
handle(handlerInput) {
const speakOutput = 'You can say hello to me! How can I help?';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
const CancelAndStopIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& (Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.CancelIntent'
|| Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.StopIntent');
},
handle(handlerInput) {
const speakOutput = 'Goodbye!';
return handlerInput.responseBuilder
.speak(speakOutput)
.getResponse();
}
};
/* *
* FallbackIntent triggers when a customer says something that doesn’t map to any intents in your skill
* It must also be defined in the language model (if the locale supports it)
* This handler can be safely added but will be ingnored in locales that do not support it yet
* */
const FallbackIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.FallbackIntent';
},
handle(handlerInput) {
const speakOutput = 'Sorry, I don\'t know about that. Please try again.';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
/* *
* SessionEndedRequest notifies that a session was ended. This handler will be triggered when a currently open
* session is closed for one of the following reasons: 1) The user says "exit" or "quit". 2) The user does not
* respond or says something that does not match an intent defined in your voice model. 3) An error occurs
* */
const SessionEndedRequestHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'SessionEndedRequest';
},
handle(handlerInput) {
console.log(`~~~~ Session ended: ${JSON.stringify(handlerInput.requestEnvelope)}`);
// Any cleanup logic goes here.
return handlerInput.responseBuilder.getResponse(); // notice we send an empty response
}
};
/* *
* The intent reflector is used for interaction model testing and debugging.
* It will simply repeat the intent the user said. You can create custom handlers for your intents
* by defining them above, then also adding them to the request handler chain below
* */
const IntentReflectorHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest';
},
handle(handlerInput) {
const intentName = Alexa.getIntentName(handlerInput.requestEnvelope);
const speakOutput = `You just triggered ${intentName}`;
return handlerInput.responseBuilder
.speak(speakOutput)
//.reprompt('add a reprompt if you want to keep the session open for the user to respond')
.getResponse();
}
};
/**
* Generic error handling to capture any syntax or routing errors. If you receive an error
* stating the request handler chain is not found, you have not implemented a handler for
* the intent being invoked or included it in the skill builder below
* */
const ErrorHandler = {
canHandle() {
return true;
},
handle(handlerInput, error) {
const speakOutput = 'Sorry, I had trouble doing what you asked. Please try again.';
console.log(`~~~~ Error handled: ${JSON.stringify(error)}`);
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
/**
* This handler acts as the entry point for your skill, routing all request and response
* payloads to the handlers above. Make sure any new handlers or interceptors you've
* defined are included below. The order matters - they're processed top to bottom
* */
exports.handler = Alexa.SkillBuilders.custom()
.addRequestHandlers(
LaunchRequestHandler,
CoffeeGivenOrderIntentHandler,
TeaGivenOrderIntentHandler,
StartedInProgressOrderIntentHandler,
CompletedOrderIntentHandler,
HelpIntentHandler,
CancelAndStopIntentHandler,
FallbackIntentHandler,
SessionEndedRequestHandler,
IntentReflectorHandler)
.addErrorHandlers(
ErrorHandler)
.withCustomUserAgent('sample/hello-world/v1.2')
.lambda();
I have same problem when I create "Accept only Slot Type's values and synonyms" for validation rule. I tried on Evaluate Model but the slot value is always "not filled". If I tried it on Simulator, the slot value is empty. The slot validation speech never play. So, I catch this validation in Lambda.
I had the same problem, turns out that validation works only if slot filling is enabled.
If the slot is not required and validation fails it returns an empty slot, if it is required it forces validation.
This comes from my experience, it might be wrong but for me it worked
Related
I'm trying to create an alexa skill with the help of 'Survey' template, which uses personalization + voice recognition based auth.
personalization + voice recognition based auth works fine. I have added a new intent 'Introduce' which needs to be triggered based on utterance - 'introduce' but that isn't working as expected.
Alexa open my bot
-> (welcome note from alexa)
let's begin (invokes StartMyStandupIntentHandler intent and auth based on voice id)
-> Hello How can i help you?
introduce
-> doesn't invoke IntroduceHandler but i have IntentReflectorHandler that says : You just triggered the introduce intent. You're hearing this response because introduce does not have an intent handler yet.
index.js:
const LaunchRequestHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === 'LaunchRequest';
},
handle(handlerInput) {
const requestAttributes = handlerInput.attributesManager.getRequestAttributes();
const skillName = requestAttributes.t('SKILL_NAME');
const name = personalization.getPersonalizedPrompt(handlerInput);
var speakOutput = ""
if (name && name.length > 0) {
speakOutput = requestAttributes.t('GREETING_PERSONALIZED', skillName);
} else {
speakOutput = requestAttributes.t('GREETING', skillName);
}
const repromptOutput = requestAttributes.t('GREETING_REPROMPT');
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(repromptOutput)
.getResponse();
},
};
const StartMyStandupIntentHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === 'IntentRequest'
&& handlerInput.requestEnvelope.request.intent.name === 'StartMyStandupIntent';
},
async handle(handlerInput) {
const requestAttributes = handlerInput.attributesManager.getRequestAttributes();
let speakOutput;
const name = personalization.getPersonalizedPrompt(handlerInput);
let response = handlerInput.responseBuilder;
if (name && name.length > 0) {
speakOutput = 'Hello '+ name +'! How can i help you?';
const upsServiceClient = handlerInput.serviceClientFactory.getUpsServiceClient();
let profileName
let profileEmail
try {
profileName = await upsServiceClient.getPersonsProfileGivenName();
profileEmail = await upsServiceClient.getProfileEmail();
} catch (error) {
return handleError(error, handlerInput)
}
const sessionAttributes = handlerInput.attributesManager.getSessionAttributes();
sessionAttributes.userEmail = profileEmail;
sessionAttributes.userName = profileName;
handlerInput.attributesManager.setSessionAttributes(sessionAttributes);
} else {
speakOutput = requestAttributes.t('PERSONALIZED_FALLBACK')
}
return response
.speak(speakOutput)
.withShouldEndSession(false)
.getResponse()
},
};
const Introduce_Handler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === 'IntentRequest'
&& handlerInput.requestEnvelope.request.intent.name === 'Introduce'
},
handle(handlerInput) {
const responseBuilder = handlerInput.responseBuilder;
let say = 'Hi everyone, As the famous saying goes, \'The human voice is the most perfect instrument of all\', . ';
say += 'A very Warm greetings to all. ';
return responseBuilder
.speak(say)
.withShouldEndSession(false)
.getResponse();
},
};
/**
* Voice consent request - response is handled via Skill Connections.
* Hence we need to handle async response from the Voice Consent.
* The user could have accepted or rejected or skipped the voice consent request.
* Create your custom callBackFunction to handle accepted flow - in this case its handling identifying the person
* The rejected/skipped default handling is taken care by the library.
*
* #params handlerInput - the handlerInput received from the IntentRequest
* #returns
**/
async function handleCallBackForVoiceConsentAccepted(handlerInput) {
const upsServiceClient = handlerInput.serviceClientFactory.getUpsServiceClient();
let profileName = await upsServiceClient.getProfileEmail();
let profileEmail = await upsServiceClient.getPersonsProfileGivenName();
const sessionAttributes = handlerInput.attributesManager.getSessionAttributes();
sessionAttributes.userEmail = profileEmail;
sessionAttributes.userName = profileName;
handlerInput.attributesManager.setSessionAttributes(sessionAttributes);
// this is done because currently intent chaining is not supported from any
// Skill Connections requests, such as SessionResumedRequest.
const requestAttributes = handlerInput.attributesManager.getRequestAttributes();
const name = personalization.getPersonalizedPrompt(handlerInput);
let speakOutput = 'Hello '+ name +'! How can i help you?';
//let repromptOutput = requestAttributes.t('ABOUT_REPROMPT');
let response = handlerInput.responseBuilder;
return response
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse()
}
Interaction model json:
{
"interactionModel": {
"languageModel": {
"invocationName": "my bot",
"modelConfiguration": {
"fallbackIntentSensitivity": {
"level": "LOW"
}
},
"intents": [
{
"name": "AMAZON.CancelIntent",
"samples": []
},
{
"name": "AMAZON.HelpIntent",
"samples": []
},
{
"name": "AMAZON.StopIntent",
"samples": []
},
{
"name": "AMAZON.NavigateHomeIntent",
"samples": []
},
{
"name": "GetCodeIntent",
"slots": [
{
"name": "MeetingCode",
"type": "AMAZON.NUMBER"
}
],
"samples": [
"My code is {MeetingCode}",
"The code is {MeetingCode}",
"{MeetingCode}"
]
},
{
"name": "GetReportIntent",
"slots": [
{
"name": "questionYesterday",
"type": "AMAZON.SearchQuery",
"samples": [
"{questionYesterday}"
]
},
{
"name": "questionToday",
"type": "AMAZON.SearchQuery",
"samples": [
"{questionToday}"
]
},
{
"name": "questionBlocking",
"type": "AMAZON.SearchQuery",
"samples": [
"{questionBlocking}"
]
}
],
"samples": [
"{questionToday} today",
"{questionYesterday} yesterday",
"yesterday {questionYesterday}",
"today {questionToday}"
]
},
{
"name": "AMAZON.FallbackIntent",
"samples": []
},
{
"name": "AMAZON.YesIntent",
"samples": []
},
{
"name": "AMAZON.NoIntent",
"samples": []
},
{
"name": "ResetPinIntent",
"slots": [],
"samples": [
"where do i get a pin",
"what is my pin",
"how do i get a pin",
"i need a new pin",
"i forgot my pin"
]
},
{
"name": "StartMyStandupIntent",
"slots": [],
"samples": [
"yes let's get started",
"yes let's begin",
"let's begin",
"let's get started"
]
},
{
"name": "Introduce",
"slots": [],
"samples": [
"introduce yourself",
"introduce",
"intro"
]
}
],
"types": []
},
"dialog": {
"intents": [
{
"name": "GetReportIntent",
"delegationStrategy": "ALWAYS",
"confirmationRequired": false,
"prompts": {},
"slots": [
{
"name": "questionYesterday",
"type": "AMAZON.SearchQuery",
"confirmationRequired": false,
"elicitationRequired": true,
"prompts": {
"elicitation": "Elicit.Slot.420907304064.1434077833163"
}
},
{
"name": "questionToday",
"type": "AMAZON.SearchQuery",
"confirmationRequired": false,
"elicitationRequired": true,
"prompts": {
"elicitation": "Elicit.Slot.173201382582.539843571833"
}
},
{
"name": "questionBlocking",
"type": "AMAZON.SearchQuery",
"confirmationRequired": false,
"elicitationRequired": true,
"prompts": {
"elicitation": "Elicit.Slot.173201382582.1204298947985"
}
}
]
}
],
"delegationStrategy": "ALWAYS"
},
"prompts": [
{
"id": "Elicit.Slot.288779318596.409557698368",
"variations": [
{
"type": "PlainText",
"value": "Alright, first question. What did you do yesterday?"
}
]
},
{
"id": "Elicit.Slot.288779318596.1420775370020",
"variations": [
{
"type": "PlainText",
"value": "Got it. What will you do today?"
}
]
},
{
"id": "Elicit.Slot.288779318596.88143460540",
"variations": [
{
"type": "PlainText",
"value": "Okay, last question. Is there anything blocking your progress?"
}
]
},
{
"id": "Elicit.Slot.420907304064.1434077833163",
"variations": [
{
"type": "PlainText",
"value": "What did you work on yesterday?"
}
]
},
{
"id": "Elicit.Slot.173201382582.539843571833",
"variations": [
{
"type": "PlainText",
"value": "What will you work on today?"
}
]
},
{
"id": "Elicit.Slot.173201382582.1204298947985",
"variations": [
{
"type": "PlainText",
"value": "What if anything is blocking your progress?"
}
]
}
]
}
}
I suspect you missed adding your intent handler to the Skill object. See this snipet from the Alexa documentation
let skill;
exports.handler = async function (event, context) {
console.log(`REQUEST++++${JSON.stringify(event)}`);
if (!skill) {
skill = Alexa.SkillBuilders.custom()
.addRequestHandlers(
LaunchRequestHandler,
StartMyStandupIntentHandler,
HelpIntentHandler,
CancelAndStopIntentHandler,
SessionEndedRequestHandler,
)
.addErrorHandlers(ErrorHandler)
.create();
}
const response = await skill.invoke(event, context);
console.log(`RESPONSE++++${JSON.stringify(response)}`);
return response;
};
You need to add the Introduce_Handler to the addRequestHandlers method call. Also, make sure to add it before the intent reflector handler. ASK will prioritize which handler is used based on the order they are added to the skill object. Your code will probably look something like this:
.addRequestHandlers(
LaunchRequestHandler,
AskWeatherIntentHandler,
Introduce_Handler,
HelpIntentHandler,
CancelAndStopIntentHandler,
SessionEndedRequestHandler,
IntentReflectorHandler
)
The skill in question asks for one permission when enabling in Web or app (Outbound Notification). But, when implemented Skill Enabled Event it's not asking user to give notification permission or not. Skill enablement works itself but permission is by default No. How to make alexa to ask for permission when enabling via voice?
Can Alexa prompt them via voice to enable the outbound notification?
skill.json
{
"manifest": {
"publishingInformation": {
"locales": {
"en-US": {
"summary": "test skill summary",
"examplePhrases": [
"Alexa, launch test skill",
"Alexa, open test skill",
"Alexa, start test skill"
],
"keywords": [
"test skill"
],
"name": "test skill",
"description": "test skill Description",
"smallIconUri": "",
"largeIconUri": "",
"updatesDescription": ""
}
},
"isAvailableWorldwide": true,
"testingInstructions": "n/a",
"category": "EVENT_FINDERS",
"distributionCountries": [],
"automaticDistribution": {
"isActive": false
}
},
"apis": {
"custom": {
"endpoint": {
"uri": "arn:aws:lambda:us-east-1:"
},
"interfaces": []
}
},
"manifestVersion": "1.0",
"privacyAndCompliance": {
"allowsPurchases": false,
"locales": {
"en-US": {
"privacyPolicyUrl": "",
"termsOfUseUrl": ""
}
},
"isExportCompliant": true,
"containsAds": false,
"isChildDirected": false,
"usesPersonalInfo": false
},
"events": {
"endpoint": {
"uri": "arn:aws:lambda:us-east-1:"
},
"publications": [
{
"eventName": "AMAZON.MessageAlert.Activated"
},
{
"eventName": "AMAZON.MediaContent.Available"
}
],
"regions": {
"NA": {
"endpoint": {
"uri": "arn:aws:lambda:us-east-1:",
"sslCertificateType": "Trusted"
}
}
},
"subscriptions": [
{
"eventName": "SKILL_PROACTIVE_SUBSCRIPTION_CHANGED"
},
{
"eventName": "SKILL_ENABLED"
},
{
"eventName": "SKILL_DISABLED"
},
{
"eventName": "SKILL_PERMISSION_ACCEPTED"
},
{
"eventName": "SKILL_PERMISSION_CHANGED"
},
{
"eventName": "SKILL_ACCOUNT_LINKED"
}
]
},
"permissions": [
{
"name": "alexa::devices:all:notifications:write"
}
]
}
}
Thank you for the help
There may be a different way, but once you are in the skill I believe you will need to send an ask for permissions card. As I understand it the idea is to make sure that Amazon is involved as a third party permissions granter. This will pop a permissions request in the Alexa app on the users phone. This added layer of security just makes sure the customer saw exactly what permissions they were granting.
You can do this a few different ways in your skill. You could check the first time that the user connects and keep track of that first connection in a persistent customer data layer. Or you could just check if the user has permission when you go to use that part of the skill. If they don't respond telling the customer you sent them a card to grant permissions.
Here is more info on permission cards:
https://developer.amazon.com/en-US/docs/alexa/custom-skills/request-customer-contact-information-for-use-in-your-skill.html#permissions-card-for-requesting-customer-consent
To run reminders via a lambda, other permissions are probably the same format.
const CreateReminderIntent = {
canHandle(handlerInput) {
const { request } = handlerInput.requestEnvelope;
return request.type === 'IntentRequest' && request.intent.name === 'CreateReminderIntent';
},
async handle(handlerInput) {
const { requestEnvelope, serviceClientFactory, responseBuilder } = handlerInput;
const consentToken = requestEnvelope.context.System.user.permissions
&& requestEnvelope.context.System.user.permissions.consentToken;
if (!consentToken) {
return handlerInput.responseBuilder
.addDirective({
type: "Connections.SendRequest",
name: "AskFor",
payload: {
"#type": "AskForPermissionsConsentRequest",
"#version": "1",
"permissionScope": "alexa::alerts:reminders:skill:readwrite"
},
token: "<string>"
})
.getResponse();
}
try {
const speechText = "Great! I've scheduled a reminder for you";
const ReminderManagementServiceClient = serviceClientFactory.getReminderManagementServiceClient();
const reminderPayload = {
"trigger": {
"type": "SCHEDULED_RELATIVE",
"offsetInSeconds": "10",
"timeZoneId": "Europe/London"
},
"alertInfo": {
"spokenInfo": {
"content": [{
"locale": "en-GB",
"text": "Wash the dog"
}]
}
},
"pushNotification": {
"status": "ENABLED"
}
};
await ReminderManagementServiceClient.createReminder(reminderPayload);
return responseBuilder
.speak(speechText)
.getResponse();
} catch (error) {
console.error(error);
return responseBuilder
.speak('Uh Oh. Looks like something went wrong.')
.getResponse();
}
}
};
How to return an intent with slot values from another intent?
I want to trigger an intent by returning a slot value of it in another intent.
Here is example of my JSON file:
{
"interactionModel": {
"languageModel": {
"invocationName": "movie antakshari",
"intents": [
{
"name": "SchoolIntent",
"slots": [
{
"name": "Subject",
"type": "subjects"
}
],
"samples": ["{subjects}"]
},
{
"name": "teachersIntent",
"slots": [],
"samples": ["teachers"]
},
],
"types": [
{
"name": "subjects",
"values": [
{
"name": {"value": "maths"}
},
{
"name": {"value": "english"}
}
]
}
]
}
}
}
Here is my index.js file :
const teacherIntentHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === 'IntentRequest'
&& handlerInput.requestEnvelope.request.intent.name === 'teacherIntent';
},
handle(handlerInput) {
if (some condition) {
// Here I want to return the schoolIntentHandler with a slot value maths
}
}
}
You can achieve this by ElicitIntent. But when you elicit your intent your slots for the particular intent will get clear(reset to null). To overcome this, before eliciting intent put your slot value into session attribute in a unique way to identify this as a slot one like SLOT_key. And when it enters into the desired intent get the slot value from session attribue & use it for your logic.
The intent invocation is driven by Users utterance. A User has to say something so that Alexa Service can match what user said to an intent in your model.
In your case, you need to make user to invoke schoolIntent by guiding him properly. i.e. You need to return a speech from here that will make user to utter something that matches schoolIntent
handle(handlerInput) {
if (some condition) {
// Here I want to return the schoolIntentHandler with a slot value maths
//
// You need to return a speech from here that will make user to utter something that matches schoolIntent.
}
}
I am trying to publish my skill however the in the Functional Test under Certification I keep receiving
The skill is a duplicate of an Alexa reference or template sample skill.
I have tried making the code appear more foreign to the algorithm checking it but it keeps bringing up the error. It says its a copy of the Hello World skill
AWS Lambda code:
const Alexa = require('ask-sdk-core');
const LaunchRequestHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === 'LaunchRequest';
},
handle(handlerInput) {
const speechText = 'Welcome to the Alexa Skills Kit, you can say what about ramranch';
return handlerInput.responseBuilder
.speak(speechText)
.reprompt(speechText)
.withSimpleCard('Ramranch', speechText)
.getResponse();
}
};
const RamranchIntentHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === 'IntentRequest'
&& handlerInput.requestEnvelope.request.intent.name === 'ramranchIntent';
},
handle(handlerInput) {
const speechText = 'Lets enjoy a worlds most beautiful composition, composed by the great, Sunil Syal, <audio src="https://my-apis.000webhostapp.com/audio/Romantic%20Solitude-Instrumental%20(Flute).mp3"/> Wow, That is amazing. Click the link on top right corner to listen to full song.';
return handlerInput.responseBuilder
.speak(speechText)
.withSimpleCard('Ramranch', speechText)
.getResponse();
}
};
const HelpIntentHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === 'IntentRequest'
&& handlerInput.requestEnvelope.request.intent.name === 'AMAZON.HelpIntent';
},
handle(handlerInput) {
const speechText = 'You can say what about Ramranch!';
return handlerInput.responseBuilder
.speak(speechText)
.reprompt(speechText)
.withSimpleCard('Ramranch', speechText)
.getResponse();
}
};
const CancelAndStopIntentHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === 'IntentRequest'
&& (handlerInput.requestEnvelope.request.intent.name === 'AMAZON.CancelIntent'
|| handlerInput.requestEnvelope.request.intent.name === 'AMAZON.StopIntent');
},
handle(handlerInput) {
const speechText = 'Goodbye!';
return handlerInput.responseBuilder
.speak(speechText)
.withSimpleCard('Ramranch', speechText)
.getResponse();
}
};
const SessionEndedRequestHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === 'SessionEndedRequest';
},
handle(handlerInput) {
//any cleanup logic goes here
return handlerInput.responseBuilder.getResponse();
}
};
const ErrorHandler = {
canHandle() {
return true;
},
handle(handlerInput, error) {
console.log(`Error handled: ${error.message}`);
return handlerInput.responseBuilder
.speak('Sorry, I can\'t understand the command. Please say again.')
.reprompt('Sorry, I can\'t understand the command. Please say again.')
.getResponse();
},
};
exports.handler = Alexa.SkillBuilders.custom()
.addRequestHandlers(
LaunchRequestHandler,
RamranchIntentHandler,
HelpIntentHandler,
CancelAndStopIntentHandler,
SessionEndedRequestHandler)
.addErrorHandlers(ErrorHandler)
.lambda();
JSON code:
{
"interactionModel": {
"languageModel": {
"invocationName": "barrysmod",
"intents": [
{
"name": "AMAZON.FallbackIntent",
"samples": []
},
{
"name": "AMAZON.CancelIntent",
"samples": []
},
{
"name": "AMAZON.HelpIntent",
"samples": []
},
{
"name": "AMAZON.StopIntent",
"samples": []
},
{
"name": "AMAZON.NavigateHomeIntent",
"samples": []
},
{
"name": "ramranchIntent",
"slots": [],
"samples": [
"what about ramranch",
"lets hear ramranch"
]
}
],
"types": []
}
}
I am new to creating skills, so any help is appreciated
I have been trying to resolve this issue for the past couple of hours with no success. Here is my code:
Lambda code:
/*eslint-disable func-names */
/* eslint quote-props: ["error", "consistent"]*/
// There are three sections, Text Strings, Skill Code, and Helper Function(s).
// You can copy and paste the contents as the code for a new Lambda function, using the alexa-skill-kit-sdk-factskill template.
// This code includes helper functions for compatibility with versions of the SDK prior to 1.0.9, which includes the dialog directives.
// 1. Text strings =====================================================================================================
// Modify these strings and messages to change the behavior of your Lambda function
let speechOutput;
let reprompt;
let welcomeOutput = "Welcome to Tanpura! You can ask me to play at any pitch. How can I help you today?";
let welcomeReprompt = "sample re-prompt text";
// 2. Skill Code =======================================================================================================
"use strict";
const Alexa = require('alexa-sdk');
const APP_ID = undefined; // TODO replace with your app ID (OPTIONAL).
speechOutput = '';
const handlers = {
'LaunchRequest': function () {
this.emit(':ask', welcomeOutput, welcomeReprompt);
},
'AMAZON.HelpIntent': function () {
speechOutput = 'You can ask me to play at any pitch, such as C, C sharp, D, D sharp, and so on. How can I help you?';
reprompt = '';
this.emit(':ask', speechOutput, reprompt);
},
'AMAZON.CancelIntent': function () {
speechOutput = 'Would you like me to play another pitch?';
this.emit(':tell', speechOutput);
},
'AMAZON.StopIntent': function () {
speechOutput = 'I hope I helped you. Goodbye!';
this.emit(':tell', speechOutput);
},
'SessionEndedRequest': function () {
speechOutput = '';
//this.emit(':saveState',true);//uncomment to save attributes to db on session end
this.emit(':tell', speechOutput);
},
'AMAZON.FallbackIntent': function () {
speechOutput = '';
//any intent slot variables are listed here for convenience
//Your custom intent handling goes here
speechOutput = "I currently do not support your request. However, you can ask me to play at any pitch, and I will do so. How can I help?";
this.emit(":ask", speechOutput, speechOutput);
},
'AMAZON.NavigateHomeIntent': function () {
speechOutput = '';
//any intent slot variables are listed here for convenience
//Your custom intent handling goes here
speechOutput = "Welcome to Tanpura! You can ask me to play at any pitch. How can I help you today?";
this.emit(":ask", speechOutput, speechOutput);
},
'PlayNoteIntent': function () {
speechOutput = '';
//any intent slot variables are listed here for convenience
let noteSlot = resolveCanonical(this.event.request.intent.slots.note);
console.log("User selected pitch: " + noteSlot);
let accidentalSlot = resolveCanonical(this.event.request.intent.slots.accidental);
console.log("User selected accidental: " + accidentalSlot);
var notes = {
'a': {
'flat': 'https://s3.amazonaws.com/tanpuranew/tanpura_Gsharp.mp3',
'natural': 'https://s3.amazonaws.com/tanpuranew/tanpura_A.mp3',
'sharp': 'https://s3.amazonaws.com/tanpuranew/tanpura_Asharp.mp3'
},
'b': {
'flat': 'https://s3.amazonaws.com/tanpuranew/tanpura_Asharp.mp3',
'natural': 'https://s3.amazonaws.com/tanpuranew/tanpura_B.mp3',
'sharp': 'https://s3.amazonaws.com/tanpuranew/tanpura_C.mp3'
},
'c': {
'flat': 'https://s3.amazonaws.com/tanpuranew/tanpura_B.mp3',
'natural': 'https://s3.amazonaws.com/tanpuranew/tanpura_C.mp3',
'sharp': 'https://s3.amazonaws.com/tanpuranew/tanpura_Csharp.mp3'
},
'd': {
'flat': 'https://s3.amazonaws.com/tanpuranew/tanpura_Csharp.mp3',
'natural': 'https://s3.amazonaws.com/tanpuranew/tanpura_D.mp3',
'sharp': 'https://s3.amazonaws.com/tanpuranew/tanpura_Dsharp.mp3'
},
'e': {
'flat': 'https://s3.amazonaws.com/tanpuranew/tanpura_Dsharp.mp3',
'natural': 'https://s3.amazonaws.com/tanpuranew/tanpura_E.mp3',
'sharp': 'https://s3.amazonaws.com/tanpuranew/tanpura_F.mp3'
},
'f': {
'flat': 'https://s3.amazonaws.com/tanpuranew/tanpura_E.mp3',
'natural': 'https://s3.amazonaws.com/tanpuranew/tanpura_F.mp3',
'sharp': 'https://s3.amazonaws.com/tanpuranew/tanpura_Fsharp.mp3'
},
'g': {
'flat': 'https://s3.amazonaws.com/tanpuranew/tanpura_Fsharp.mp3',
'natural': 'https://s3.amazonaws.com/tanpuranew/tanpura_G.mp3',
'sharp': 'https://s3.amazonaws.com/tanpuranew/tanpura_Gsharp.mp3'
}
}
var note = noteSlot.toLowerCase();
var speechReprompt = "";
if (noteSlot && notes[note]){
var audio = '';
var accidental = 'natural';
if(accidentalSlot && accidental.indexOf(accidentalSlot) > -1){
accidental = accidentalSlot;
}
var audioSrc = notes[note][accidental];
speechOutput = "Ok. I will play " + noteSlot + accidental + <audio src="' + audioSrc + '" />;
speechReprompt = "Would you like me to continue playing?";
}
else{
speechOutput = "The note you have requested is not supported yet.";
speechReprompt = "However, Tanpura does support A, B, C, D, E, F, G, and all of the accidentals in between.";
}
//Your custom intent handling goes here
this.emit(":ask", speechOutput, speechOutput);
},
'Unhandled': function () {
speechOutput = "Tanpura didn't quite understand what you wanted. Please try rephrasing your request.";
this.emit(':ask', speechOutput, speechOutput);
}
};
exports.handler = (event, context) => {
const alexa = Alexa.handler(event, context);
alexa.appId = APP_ID;
// To enable string internationalization (i18n) features, set a resources object.
//alexa.resources = languageStrings;
alexa.registerHandlers(handlers);
//alexa.dynamoDBTableName='DYNAMODB_TABLE_NAME';//uncomment this line to save attributes to DB
alexa.execute();
};
// END of Intent Handlers {} ========================================================================================
// 3. Helper Function =================================================================================================
function resolveCanonical(slot){
//this function looks at the entity resolution part of request and returns the slot value if a synonyms is provided
let canonical;
try{
canonical = slot.resolutions.resolutionsPerAuthority[0].values[0].value.name;
}catch(err){
console.log(err.message);
canonical = slot.value;
};
return canonical;
};
function delegateSlotCollection(){
console.log("in delegateSlotCollection");
console.log("current dialogState: "+this.event.request.dialogState);
if (this.event.request.dialogState === "STARTED") {
console.log("in Beginning");
let updatedIntent= null;
// updatedIntent=this.event.request.intent;
//optionally pre-fill slots: update the intent object with slot values for which
//you have defaults, then return Dialog.Delegate with this updated intent
// in the updatedIntent property
//this.emit(":delegate", updatedIntent); //uncomment this is using ASK SDK 1.0.9 or newer
//this code is necessary if using ASK SDK versions prior to 1.0.9
if(this.isOverridden()) {
return;
}
this.handler.response = buildSpeechletResponse({
sessionAttributes: this.attributes,
directives: getDialogDirectives('Dialog.Delegate', updatedIntent, null),
shouldEndSession: false
});
this.emit(':responseReady', updatedIntent);
} else if (this.event.request.dialogState !== "COMPLETED") {
console.log("in not completed");
// return a Dialog.Delegate directive with no updatedIntent property.
//this.emit(":delegate"); //uncomment this is using ASK SDK 1.0.9 or newer
//this code necessary is using ASK SDK versions prior to 1.0.9
if(this.isOverridden()) {
return;
}
this.handler.response = buildSpeechletResponse({
sessionAttributes: this.attributes,
directives: getDialogDirectives('Dialog.Delegate', null, null),
shouldEndSession: false
});
this.emit(':responseReady');
} else {
console.log("in completed");
console.log("returning: "+ JSON.stringify(this.event.request.intent));
// Dialog is now complete and all required slots should be filled,
// so call your normal intent handler.
return this.event.request.intent;
}
}
function randomPhrase(array) {
// the argument is an array [] of words or phrases
let i = 0;
i = Math.floor(Math.random() * array.length);
return(array[i]);
}
function isSlotValid(request, slotName){
let slot = request.intent.slots[slotName];
//console.log("request = "+JSON.stringify(request)); //uncomment if you want to see the request
let slotValue;
//if we have a slot, get the text and store it into speechOutput
if (slot && slot.value) {
//we have a value in the slot
slotValue = slot.value.toLowerCase();
return slotValue;
} else {
//we didn't get a value in the slot.
return false;
}
}
//These functions are here to allow dialog directives to work with SDK versions prior to 1.0.9
//will be removed once Lambda templates are updated with the latest SDK
function createSpeechObject(optionsParam) {
if (optionsParam && optionsParam.type === 'SSML') {
return {
type: optionsParam.type,
ssml: optionsParam['speech']
};
} else {
return {
type: optionsParam.type || 'PlainText',
text: optionsParam['speech'] || optionsParam
};
}
}
function buildSpeechletResponse(options) {
let alexaResponse = {
shouldEndSession: options.shouldEndSession
};
if (options.output) {
alexaResponse.outputSpeech = createSpeechObject(options.output);
}
if (options.reprompt) {
alexaResponse.reprompt = {
outputSpeech: createSpeechObject(options.reprompt)
};
}
if (options.directives) {
alexaResponse.directives = options.directives;
}
if (options.cardTitle && options.cardContent) {
alexaResponse.card = {
type: 'Simple',
title: options.cardTitle,
content: options.cardContent
};
if(options.cardImage && (options.cardImage.smallImageUrl || options.cardImage.largeImageUrl)) {
alexaResponse.card.type = 'Standard';
alexaResponse.card['image'] = {};
delete alexaResponse.card.content;
alexaResponse.card.text = options.cardContent;
if(options.cardImage.smallImageUrl) {
alexaResponse.card.image['smallImageUrl'] = options.cardImage.smallImageUrl;
}
if(options.cardImage.largeImageUrl) {
alexaResponse.card.image['largeImageUrl'] = options.cardImage.largeImageUrl;
}
}
} else if (options.cardType === 'LinkAccount') {
alexaResponse.card = {
type: 'LinkAccount'
};
} else if (options.cardType === 'AskForPermissionsConsent') {
alexaResponse.card = {
type: 'AskForPermissionsConsent',
permissions: options.permissions
};
}
let returnResult = {
version: '1.0',
response: alexaResponse
};
if (options.sessionAttributes) {
returnResult.sessionAttributes = options.sessionAttributes;
}
return returnResult;
}
function getDialogDirectives(dialogType, updatedIntent, slotName) {
let directive = {
type: dialogType
};
if (dialogType === 'Dialog.ElicitSlot') {
directive.slotToElicit = slotName;
} else if (dialogType === 'Dialog.ConfirmSlot') {
directive.slotToConfirm = slotName;
}
if (updatedIntent) {
directive.updatedIntent = updatedIntent;
}
return [directive];
}
Alexa Developer JSON Input:
{
"version": "1.0",
"session": {
"new": true,
"sessionId": "amzn1.echo-api.session.1456bfda-a9d3-457f-88a7-bc5387d774db",
"application": {
"applicationId": "amzn1.ask.skill.0c237132-8815-4025-a23f-ca6df688bcd2"
},
"user": {
"userId": "amzn1.ask.account.AGD7V7GZTLU4DQH623OMU5MUBR2FGWXKDVW2OPNYYRWKIYJCHQBCSKVNQHEPOEXQWO33Q4OTJ6LSIRLYT3TN33OAK3W7LLNNYPU5S3MVKPMPNH2XDWYJ7DBWCFZRXY4STCPFKVL2FADYZE4TXS53Z5AXBPN6344R6VG6GD365TFQTCPPKABC5IKM46UZXUX3BPR4TQ4KEYO6LTA"
}
},
"context": {
"System": {
"application": {
"applicationId": "amzn1.ask.skill.0c237132-8815-4025-a23f-ca6df688bcd2"
},
"user": {
"userId": "amzn1.ask.account.AGD7V7GZTLU4DQH623OMU5MUBR2FGWXKDVW2OPNYYRWKIYJCHQBCSKVNQHEPOEXQWO33Q4OTJ6LSIRLYT3TN33OAK3W7LLNNYPU5S3MVKPMPNH2XDWYJ7DBWCFZRXY4STCPFKVL2FADYZE4TXS53Z5AXBPN6344R6VG6GD365TFQTCPPKABC5IKM46UZXUX3BPR4TQ4KEYO6LTA"
},
"device": {
"deviceId": "amzn1.ask.device.AFHBRIBVUWYIR2ESXPKWP3G3PHYK4W5VW4NF55KH5ZXD27WMSPBPU7YLJQJWM2YQDZBH7VWGXCLNQKESUNWWGI6CJUWUUSWUKVBZWZC5LBNXMCDY2IOZAZUYWHYXT5VLLA7XC3OP2WY7RXE2LPRHM5E4BIMR662M5MZKJH4WRPUFS3HVIFRDK",
"supportedInterfaces": {}
},
"apiEndpoint": "https://api.amazonalexa.com",
"apiAccessToken": "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOiJodHRwczovL2FwaS5hbWF6b25hbGV4YS5jb20iLCJpc3MiOiJBbGV4YVNraWxsS2l0Iiwic3ViIjoiYW16bjEuYXNrLnNraWxsLjBjMjM3MTMyLTg4MTUtNDAyNS1hMjNmLWNhNmRmNjg4YmNkMiIsImV4cCI6MTUzOTU1MzE2NywiaWF0IjoxNTM5NTQ5NTY3LCJuYmYiOjE1Mzk1NDk1NjcsInByaXZhdGVDbGFpbXMiOnsiY29uc2VudFRva2VuIjpudWxsLCJkZXZpY2VJZCI6ImFtem4xLmFzay5kZXZpY2UuQUZIQlJJQlZVV1lJUjJFU1hQS1dQM0czUEhZSzRXNVZXNE5GNTVLSDVaWEQyN1dNU1BCUFU3WUxKUUpXTTJZUURaQkg3VldHWENMTlFLRVNVTldXR0k2Q0pVV1VVU1dVS1ZCWldaQzVMQk5YTUNEWTJJT1pBWlVZV0hZWFQ1VkxMQTdYQzNPUDJXWTdSWEUyTFBSSE01RTRCSU1SNjYyTTVNWktKSDRXUlBVRlMzSFZJRlJESyIsInVzZXJJZCI6ImFtem4xLmFzay5hY2NvdW50LkFHRDdWN0daVExVNERRSDYyM09NVTVNVUJSMkZHV1hLRFZXMk9QTllZUldLSVlKQ0hRQkNTS1ZOUUhFUE9FWFFXTzMzUTRPVEo2TFNJUkxZVDNUTjMzT0FLM1c3TExOTllQVTVTM01WS1BNUE5IMlhEV1lKN0RCV0NGWlJYWTRTVENQRktWTDJGQURZWkU0VFhTNTNaNUFYQlBONjM0NFI2Vkc2R0QzNjVURlFUQ1BQS0FCQzVJS000NlVaWFVYM0JQUjRUUTRLRVlPNkxUQSJ9fQ.KAHvIOOUP4k-73lNMxRnOToYjrUbeHuLRDQGzMFi9dVEiwc2QpvpMZpLNpG5rCtoqB2-OfC48KbK5u67nW6X9QO6DSoNTBfPKUatIHB6pUWbArdv-FliUO69SQMomjLtLzC86_jnZ8TqvNavjb5I5hOGnmCe5Fv2IY5HgBw0h07Dq3ZT4i_4edcnhX9zYJretTEydF0L3JA7GTithgtAGFxbBqbiDTKRMlaGUGBWAkZkHy8FPWsAmvfTwRaNL7F3LAEbGH2QJlyoPQR7jYij7CsnlRAEv-3Ur1kFaMEdhDNA9fcn2JI4TVf1umy0fL66dHWq3omk2p5I4FyrJ3a8SQ"
}
},
"request": {
"type": "IntentRequest",
"requestId": "amzn1.echo-api.request.80cc2899-2fa2-4828-99ba-1c25d8cce05b",
"timestamp": "2018-10-14T20:39:27Z",
"locale": "en-US",
"intent": {
"name": "PlayNoteIntent",
"confirmationStatus": "NONE",
"slots": {
"note": {
"name": "note",
"value": "an",
"resolutions": {
"resolutionsPerAuthority": [
{
"authority": "amzn1.er-authority.echo-sdk.amzn1.ask.skill.0c237132-8815-4025-a23f-ca6df688bcd2.Note",
"status": {
"code": "ER_SUCCESS_NO_MATCH"
}
}
]
},
"confirmationStatus": "NONE",
"source": "USER"
},
"accidental": {
"name": "accidental",
"value": "e",
"resolutions": {
"resolutionsPerAuthority": [
{
"authority": "amzn1.er-authority.echo-sdk.amzn1.ask.skill.0c237132-8815-4025-a23f-ca6df688bcd2.accidental",
"status": {
"code": "ER_SUCCESS_NO_MATCH"
}
}
]
},
"confirmationStatus": "NONE",
"source": "USER"
}
}
}
}
}
And finally the Skill's JSON:
{
"interactionModel": {
"languageModel": {
"invocationName": "tanpura",
"intents": [
{
"name": "AMAZON.FallbackIntent",
"samples": []
},
{
"name": "AMAZON.CancelIntent",
"samples": []
},
{
"name": "AMAZON.HelpIntent",
"samples": []
},
{
"name": "AMAZON.StopIntent",
"samples": []
},
{
"name": "AMAZON.NavigateHomeIntent",
"samples": []
},
{
"name": "PlayNoteIntent",
"slots": [
{
"name": "note",
"type": "Note"
},
{
"name": "accidental",
"type": "accidental"
}
],
"samples": [
"for an {note} {accidental}",
"for a {note} {accidental}",
"play {note} {accidental}",
"Start an {note} {accidental}",
"Give me an {note} {accidental}",
"Make an {note} {accidental}",
"Put on an {note} {accidental}",
"Sing at an {note} {accidental}",
"Sing at a {note} {accidental}",
"Create an {note} {accidental}",
"Lets hear an {note} {accidental}",
"Play an {note} {accidental}",
"Lets hear a {note} {accidental}",
"Sing at {note} {accidental}",
"Create a {note} {accidental}",
"Make a {note} {accidental}",
"Put on a {note} {accidental}",
"Initiate a {note} {accidental}",
"Give me a {note} {accidental}",
"Start a {note} {accidental}",
"Play a {note} {accidental}"
]
}
],
"types": [
{
"name": "Note",
"values": [
{
"name": {
"value": "B"
}
},
{
"name": {
"value": "A#"
}
},
{
"name": {
"value": "A"
}
},
{
"name": {
"value": "G#"
}
},
{
"name": {
"value": "G"
}
},
{
"name": {
"value": "F"
}
},
{
"name": {
"value": "E"
}
},
{
"name": {
"value": "D#"
}
},
{
"name": {
"value": "D"
}
},
{
"name": {
"value": "C#"
}
},
{
"name": {
"value": "C"
}
}
]
},
{
"name": "accidental",
"values": [
{
"name": {
"value": "natural"
}
},
{
"name": {
"value": "flat"
}
},
{
"name": {
"value": "sharp"
}
}
]
}
]
}
}
}
The code was working initially, but after I made a couple of edits to the Lambda code, I was getting the same reply again and again, which made no sense. I think that the problem may lie in the fact that I added an extra value for the accidental value, and I added a natural value before rebuilding my Lambda code in the skillinator.io site. Any help would be much appreciated as I have been struggling with this code all day.
"There was a problem with the requested skill's response” means that there is something wrong with the response json. It might be null or invalid.
In your case, this line throws an error because your string concatenation is not right.
speechOutput = "Ok. I will play " + noteSlot + accidental + <audio src="' + audioSrc + '" />;
Change it to :
speechOutput = "Ok. I will play " + noteSlot + accidental + "<audio src=\"" + audioSrc + "\" />";