Not getting any answer Get Artist request - spotipy

Trying to run a very basic scenario
import requests
from spotipy.oauth2 import SpotifyClientCredentials
#import matplotlib.pyplot as plt
import pandas as pd
CLIENT_ID = 'xxxxx'
CLIENT_SECRET = 'xxxxx'
client_credentials_manager = SpotifyClientCredentials(client_id=CLIENT_ID, client_secret=CLIENT_SECRET)
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
urn = 'spotify:artist:3jOstUTkEu2JkjvRdBA5Gu'
artist = sp.artist(urn)
print(artist)
An not getting any response. Other queries ( track for instance) works well, but can not retrieve anything from the Get Artist one.
Any ideas?

Related

Flask app is being run locally instead of on heroku

So I've deployed my flask app with react front end to heroku, but there seems to be some problem where flask is running on my local host instead of one the heroku server.
I've read tons of stackoverflow posts on this but to no resolution. Here is my flask code:
from flask import Flask, request
import flask
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from flask_cors import CORS
app = Flask(__name__,static_folder="./build",static_url_path="/")
app.config['SQLALCHEMY_DATABASE_URI'] = 'my database url'
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.secret_key = 'secret string'
CORS(app)
db = SQLAlchemy(app)
class Feature_votes(db.Model):
feature = db.Column(db.String(500), primary_key=True)
votes = db.Column(db.Integer, nullable=False)
date = db.Column(db.DateTime, nullable=False)
def __init__(self, feature, votes, date):
self.feature = feature
self.votes = votes
self.date = date
# Serve the react app
#app.route("/")
def index():
return app.send_static_file("index.html")
# Retrieve currently polled features from Feature_votes
#app.route("/getVotes", methods=['GET'])
def getVotes():
rows = Feature_votes.query.filter().order_by(Feature_votes.date)
response = []
for row in rows:
response.append(
{"feature": row.feature,
"votes": row.votes
})
return flask.jsonify(response)
# Add a new feature to the db with votes set to 0
#app.route("/featureAdd", methods=['POST'])
def featureAdd():
feature = request.get_json()["feature"]
featureEntry = Feature_votes(feature, 0, datetime.utcnow())
db.session.add(featureEntry)
db.session.commit()
response = {"feature": featureEntry.feature,
"votes": 0,
"date": featureEntry.date
}
return response
#app.route("/featureModifyVotes", methods=['POST'])
def featureUnvote():
feature = request.get_json()["feature"]
direction = request.get_json()["direction"]
featureEntry = Feature_votes.query.filter_by(feature=feature).first()
if (direction == "increase"):
featureEntry.votes += 1
else:
featureEntry.votes -= 1
db.session.commit()
response = {featureEntry.feature: featureEntry.votes}
return response
if __name__ == '__main__':
app.run()
and here is my Procfile
web: gunicorn --bind 0.0.0.0:$PORT server:app
Also here is a snip I took from inspect element to show that this request is being served locally.
I am relatively new to web development so it is possible I made a lot of mistakes. Please let me know if you can help or need any more info from me. Thanks.
So apparently that screenshot I posted in the question didn't mean that my server was running on localhost, but rather that my request was being made to the localhost. Turns out I had fetch("http://localhost...) in my build files. After using a relative path, rebuilding and pushing to heroku, everything is working.

Sagemaker model deployment failing due to custom endpoint name

AWS Sagemaker model deployment is failing when endpoint_name argument is specified. Any thoughts?
Without endpoint_name argument in deploy, model deployment works successfully.
Model training and saving into S3 location is successful either way.
import boto3
import os
import sagemaker
from sagemaker import get_execution_role
from sagemaker.predictor import csv_serializer
from sagemaker.amazon.amazon_estimator import get_image_uri
bucket = 'Y'
prefix = 'Z'
role = get_execution_role()
train_data, validation_data, test_data = np.split(df.sample(frac=1, random_state=100), [int(0.5 * len(df)), int(0.8 * len(df))])
train_data.to_csv('train.csv', index=False, header=False)
validation_data.to_csv('validation.csv', index=False, header=False)
test_data.to_csv('test.csv', index=False)
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'train/X/train.csv')).upload_file('train.csv')
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'validation/X/validation.csv')).upload_file('validation.csv')
container = get_image_uri(boto3.Session().region_name, 'xgboost')
#print(container)
s3_input_train = sagemaker.s3_input(s3_data='s3://{}/{}/train/{}'.format(bucket, prefix, suffix), content_type='csv')
s3_input_validation = sagemaker.s3_input(s3_data='s3://{}/{}/validation/{}/'.format(bucket, prefix, suffix), content_type='csv')
sess = sagemaker.Session()
output_loc = 's3://{}/{}/output'.format(bucket, prefix)
xgb = sagemaker.estimator.Estimator(container,
role,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
output_path=output_loc,
sagemaker_session=sess,
base_job_name='X')
#print('Model output to: {}'.format(output_location))
xgb.set_hyperparameters(eta=0.5,
objective='reg:linear',
eval_metric='rmse',
max_depth=3,
min_child_weight=1,
gamma=0,
early_stopping_rounds=10,
subsample=0.8,
colsample_bytree=0.8,
num_round=1000)
#Model fitting
xgb.fit({'train': s3_input_train, 'validation': s3_input_validation})
#Deploy model with automatic endpoint created
xgb_predictor_X = xgb.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge', endpoint_name='X')
xgb_predictor_X.content_type = 'text/csv'
xgb_predictor_X.serializer = csv_serializer
xgb_predictor_X.deserializer = None
INFO:sagemaker:Creating endpoint with name delaymins
ClientError: An error occurred (ValidationException) when calling the CreateEndpoint operation: Could not find model "arn:aws:sagemaker:us-west-2::model/X-2019-01-08-18-17-42-158".
Figured it out! If custom endpoint name is not ended before redeploying it, it get blacklisted(not sure if this is temporary). Therefore a different endpoint name must be used if this mistake is made. Moral of the story: Always end an endpoint before redeploying.

Error in feching message from gmail api with multiple queries

I want to get unread mails from yesterday. So i had to implement multiple queries in the message.list function which give me an error of invalid syntax. How do i do it? Can someone help me? And will internalDate help me anyway?
from __future__ import print_function
import httplib2
import os
from email.utils import parsedate_tz,mktime_tz,formatdate
from requests.adapters import HTTPAdapter
import datetime
from datetime import date,timedelta
import time
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
import json
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/gmail.readonly'
CLIENT_SECRET_FILE = 'client_server.json'
APPLICATION_NAME = 'Gmail API Python Quickstart'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gmail-python-quickstart.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def main():
da=date.fromordinal(730920)
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
today=date.today()
print (today)
yesterday=today-timedelta(1)
print (yesterday)
response = service.users().messages().list(userId='me',q='{in:inbox is:unread} AND {after: {0}.format(yesterday.strftime('%Y/%m/%d'))}').execute()
messages=[]
store=[]
message1=[]
test2=[]
da=[]
if 'messages' in response:
messages.extend(response['messages'])
fo = open("foo.txt", "wb")
for i in range(len(messages)):
store=messages[i]['id']
message = service.users().messages().get(userId='me',id=store,format='metadata',metadataHeaders=['from','date']).execute()
fo.write(store+" ");
#print(message['payload']['headers'][0])
fo.write(message['snippet'].encode('utf-8')+" ")
if message['payload']['headers'][0]['name'].lower()=="from":
From=message['payload']['headers'][0]['value']
fo.write(From+" ");
elif message['payload']['headers'][0]['name'].lower()=="date":
da=message['payload']['headers'][0]['value']
fo.write(da+"\n");
for line in open("foo.txt"):
print(line)
# Open a file
# Close opend file
fo.close()
if __name__ == '__main__':
main()
Use:
q='in:inbox is:unread newer_than:3d'
as the query. Gmail queries don't have a concept of timezones so if you try to get one day worth of email you'll end up with some overlap. Just use local filtering to narrow those down. See advanced Gmail search for more help. The API and Gmail UI use the same query syntax and should show the same results so you can do testing in the UI.

upload text file to google drive using python every 5 seconds

how can i modify the following python code to upload the ecg.txt to google drive every 5 seconds where i update this text file. the code now ask for Oauth every time it upload the file , i want it to ask for authentication only in the first time.
#!/usr/bin/python
import httplib2
import pprint
from apiclient.discovery import build
from apiclient.http import MediaFileUpload
from oauth2client.client import OAuth2WebServerFlow
# Copy your credentials from the console
CLIENT_ID = XXXXXX
CLIENT_SECRET = XXXXX
# Check https://developers.google.com/drive/scopes for all available scopes
OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive'
# Redirect URI for installed apps
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
# Path to the file to upload
FILENAME = 'ecg.txt'
# Run through the OAuth flow and retrieve credentials
flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, OAUTH_SCOPE, REDIRECT_URI)
authorize_url = flow.step1_get_authorize_url()
print 'Go to the following link in your browser: ' + authorize_url
code = raw_input('Enter verification code: ').strip()
credentials = flow.step2_exchange(code)
# Create an httplib2.Http object and authorize it with our credentials
http = httplib2.Http()
http = credentials.authorize(http)
drive_service = build('drive', 'v2', http=http)
# Insert a file
media_body = MediaFileUpload(FILENAME, mimetype='text/plain', resumable=True)
body = {
'title': 'My document',
'description': 'A test document',
'mimeType': 'text/plain'
}
file = drive_service.files().insert(body=body, media_body=media_body).execute()
pprint.pprint(file)
If you use the decorator as seen in the video bellow it stores your access token in the db for you and handles refreshing when needed.
https://www.youtube.com/watch?v=HoUdWBzUZ-M
https://developers.google.com/api-client-library/python/guide/google_app_engine
from oauth2client.appengine import OAuth2Decorator
decorator = OAuth2Decorator(client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
scope=OAUTH_SCOPE,
callback_path=REDIRECT_URI)
class MainPage(webapp2.RequestHandler):
#decorator.oauth_required #Simply place this above any function that requires login.
def get(self):

Exceeded soft memory limit with basic SELECT

I have a datastore with a kind named MyUsers(db.Model) that currently contains about 30 entities.
I have written a script that prints all the entities' "name" attribute to the screen (separated by the '#' char), using the following code:
def get(self):
q_1 = MyUsers.all().order('name')
for user in q_1:
self.response.out.write(user.name)
self.response.out.write("#")
The script works just fine, but the problem is that I always get critical message in the app engine log:
12-12 12:45AM 22.691
Exceeded soft memory limit with
220.043 MB after servicing 1 requests total
I 12-12 12:45AM 22.691
This request caused a new process to
be started for your application, and
thus caused your application code to
be loaded for the first time. This
request may thus take longer and use
more CPU than a typical request for
your application.
W 12-12 12:45AM 22.691
After handling this request, the
process that handled this request was
found to be using too much memory and
was terminated. This is likely to
cause a new process to be used for the
next request to your application. If
you see this message frequently, you
may have a memory leak in your
application.
It seems like this is a very straightforward basic operation, that shouldn't exceed any memory limits, so what can I do to improve it?
Thanks,
Joel
EDIT:
As for the imports, the imports I use are:
from models.model import *
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import profiler.appengine.request
import profiler.appengine.datastore
I used a profiler to try and understand what is wrong, maybe you can help
Thanks!
Joel
EDIT 2
This is the full version of the code (the problem occurred also before I imported the profiler, I used it after it happened to try and debug):
from models.model import MyUsers
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import profiler.appengine.request
import profiler.appengine.datastore
class PrintAll(webapp.RequestHandler):
def get(self):
q_1 = MyUsers.all().order('name')
for user in q_1:
self.response.out.write(user.name)
self.response.out.write("#")
application = webapp.WSGIApplication(
[('/print', PrintAll)
],
debug=True)
def main():
profiler.appengine.request.activate()
profiler.appengine.datastore.activate()
run_wsgi_app(application)
profiler.appengine.request.show_summary()
profiler.appengine.datastore.show_summary()
profiler.appengine.datastore.dump_requests() # optional
if __name__ == "__main__":
main()
As for the MyUsers() model class:
class MyUsers(db.Model):
user = db.UserProperty()
points = db.FloatProperty()
bonus = db.FloatProperty(default=0.0)
joindate = db.DateTimeProperty(auto_now_add=True)
lastEntry=db.DateTimeProperty(auto_now_add=True)
name=db.StringProperty()
last_name = db.StringProperty()
homepage = db.StringProperty()
hobbies = db.ListProperty(str)
other = db.StringProperty()
calculate1 = db.FloatProperty()
calculate2 = db.FloatProperty()
calculate3= db.IntegerProperty(default=0)
history = db.ListProperty(str)
history2 = db.ListProperty(str)
title = db.IntegerProperty(default=0)
title_string = db.StringProperty()
updateDate = db.DateTimeProperty(auto_now_add=True)
level=db.IntegerProperty(default=0)
debug_helper=db.IntegerProperty(default=0)
debug_list=db.ListProperty(str)
As it stands, there's not really any way that this could cause the error you're seeing. Can you provide a complete reproduction case? It's likely that something other than the code snippet you've included is the cause of this issue.

Resources