I am attempting to use data from a json file which has information stored by the user. But when I print from it, it displays all the information stored in it. How do I extract only 1 part? Here is the code:
#client.command(aliases = ["shib, shibaku, Shib"])
async def Shibaku(ctx, int = 0):
if int == 1:
with open('Shibaku1.json') as f:
coins_data = json.load(f)
for oslink in coins_data[str(ctx.author.id)]:
await ctx.send(oslink)
Here is the code for storing information in "Shibaku1.json"
#client.command()
async def shibaku1(ctx, coin1, coin2, coin3, coin4, coin5, coin6, shibakunumber, oslink):
await ctx.message.delete()
with open('Shibaku10.json', 'r') as f:
coins_data = json.load(f)
coins_data[str(ctx.author.id)] = (coin1, coin2, coin3, coin4, coin5, coin6, shibakunumber, oslink)
with open('Shibaku10.json', 'w') as f:
json.dump(coins_data, f)
Sameple json file:
{"331971067788787733": ["\ud83d\ude04", "\ud83d\ude06", "\ud83d\ude00", "\ud83d\ude01", "\ud83d\ude05", "\ud83e\udd0f", "1", "1"]
I want to display only the "oslink" part.
#client.command(aliases = ["shib, shibaku, Shib"])
async def Shibaku(ctx, int = 0):
if int == 1:
with open('Shibaku1.json') as f:
coins_data = json.load(f)
for oslink in coins_data[str(ctx.author.id)]:
await ctx.send(oslink)
In this for loop block, you are iterating every single element of the value of coins_data[author_id].
And from what I can tell, your coins_data is structured like:
{
author_id1: (list of coins, shibakunumber, oslink),
author_id2: (list of coins, shibakunumber, oslink),
...
}
Because the value for key of author_id is only a list, you are sending all of coins, shibakunumber, and the oslink.
If you only want to send the oslink, you need to structure the data so that you can specifically call for the oslink.
For example, a nested dictionaries will work:
{
author_id1: {
coin1: value,
coin2: value,
...,
oslink: somevalue
},
author_id2: {
coin1: value,
coin2: value,
...,
oslink: somevalue
},
...
}
This way, you can specifically look for it like coins_data[str(stx.author.id)]["oslink"]
so u want every coin come 1 by 1 , so heres my answer :
#client.command(aliases = ["shib, shibaku, Shib"])
async def Shibaku(ctx, int = 0):
if int == 1:
with open('Shibaku1.json') as f:
coins_data = json.load(f)
user_coin_list = coins_data[str(ctx.author.id)]
for oslink in user_coin_list:
await ctx.send(oslink)
so above works like when u get coins_data[str(ctx.author.id)] it returns list and then u have to run for loop in list but u were running for loop in json.
Related
The code I have at the moment doesn't accompany for all users as the json file which it stores the "coins" in only saves it as userid. This is the code for storing the coins in the json file:
#client.command()
async def Shibaku1(ctx, coin1, coin2, coin3, coin4, coin5, coin6):
with open('Shibaku1.json', 'r') as f:
coins_data = json.load(f)
coins_data['userid'] = (coin1, coin2, coin3, coin4, coin5, coin6)
with open('Shibaku1.json', 'w') as f:
json.dump(coins_data, f)
Example of what gets stored in the json file:
{"userid": [":Helicopter:", ":Skateboard1:", ":swords:", ":mace:", ":mace:", ":mangosteen:"]}
How do I make it so it stores a different set for each user?
Replace coins_data['userid'] with coins_data[str(ctx.author.id)].
Then your json file will look like this:
{"123456789": [":Helicopter:", ":Skateboard1:", ":swords:", ":mace:", ":mace:", ":mangosteen:"]}
I have a json file with 6 "coins" to each user (which the user can change), I am trying to get this data saved in the json file and match it with another 6 coin pattern in order to check how many matches are made in total. The json file format is:
{"331971067788787733": [":Helicopter:", ":Skateboard1:", ":swords:", ":mace:", ":Helicopter:", ":Skateboard1:"]}
With each "coin" being a discord emoji.
The code for the user to save it is:
#client.command()
async def Shibaku1(ctx, coin1, coin2, coin3, coin4, coin5, coin6):
with open('Shibaku1.json', 'r') as f:
coins_data = json.load(f)
coins_data[str(ctx.author.id)] = (coin1, coin2, coin3, coin4, coin5, coin6)
with open('Shibaku1.json', 'w') as f:
json.dump(coins_data, f)
How do I extract the string value of each coin from the json file with the users 6 coins? in order to compare them with the main pattern to find the number of matches.
So u can use for loop i list to compare the values, example given below :
for coin in coins_data[str(ctx.author.id)]:
if coin != new_coin:
#do smthing
I cant find Alchemy Language API in IBM Watson.
Can I do this with natural-language-understanding service and how?
When I add
from watson_developer_cloud import NaturalLanguageUnderstandingV1
from watson_developer_cloud.natural_language_understanding_v1 \
import Features, EntitiesOptions, KeywordsOptions
It shows some error with combined keyword
# In[]:
import tweepy
import re
import time
import math
import pandas as pd
from watson_developer_cloud import AlchemyLanguageV1
def initAlchemy():
al = AlchemyLanguageV1(api_key='GRYVUMdBbOtJXxNOIs1aopjjaiyOmLG7xJBzkAnvvwLh')
return al
def initTwitterApi():
consumer_key = 'OmK1RrZCVJSRmKxIuQqkBExvw'
consumer_secret = 'VWn6OR4rRgSi7qGnZHCblJMhrSvj1QbJmf0f62uX6ZQWZUUx5q'
access_token = '4852231552-adGooMpTB3EJYPHvs6oGZ40qlo3d2JbVjqUUWkJ'
access_token_secret = 'm9hgeM9p0r1nn8IoQWJYBs5qUQu56XmrAhsDSYKjuiVA4'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
return api
'''This function is implemented to handle tweepy exception errors
because search is rate limited at 180 queries per 15 minute window by twitter'''
def limit(cursor):
while True:
try:
yield cursor.next()
except tweepy.TweepError as error:
print(repr(error))
print("Twitter Request limit error reached sleeping for 15 minutes")
time.sleep(16*60)
except tweepy.RateLimitError:
print("Rate Limit Error occurred Sleeping for 16 minutes")
time.sleep(16*60)
def retrieveTweets(api, search, lim):
if(lim == ""):
lim = math.inf
else:
lim = int(lim)
text = []
for tweet in limit(tweepy.Cursor(api.search, q=search).items(limit = lim)):
t = re.sub('\s+', ' ', tweet.text)
text.append(t)
data = {"Tweet":text,
"Sentiment":"",
"Score":""}
dataFrame = pd.DataFrame(data, columns=["Tweet","Sentiment","Score"])
return dataFrame
def analyze(al,dataFrame):
sentiment = []
score = []
for i in range(0, dataFrame["Tweet"].__len__()):
res = al.combined(text=dataFrame["Tweet"][i],
extract="doc-sentiment",
sentiment=1)
sentiment.append(res["docSentiment"]["type"])
if(res["docSentiment"]["type"] == "neutral"):
score.append(0)
else:
score.append(res["docSentiment"]["score"])
dataFrame["Sentiment"] = sentiment
dataFrame["Score"] = score
return dataFrame
def main():
#Initialse Twitter Api
api = initTwitterApi()
#Retrieve tweets
dataFrame = retrieveTweets(api,input("Enter the search query (e.g. #hillaryclinton ) : "), input("Enter limit for number of tweets to be searched or else just hit enter : "))
#Initialise IBM Watson Alchemy Language Api
al = initAlchemy()
#Do Document Sentiment analysis
dataFrame = analyze(al, dataFrame)
#Save tweets, sentiment, and score data frame in csv file
dataFrame.to_csv(input("Enter the name of the file (with .csv extension) : "))
if __name__ == '__main__':
main()# -*- coding: utf-8 -*-
The Watson Natural Language Understanding only has a combined call, but since it is the only call, it isn't called combined, its actually analyze. Best place to go for details would be the API documentation - https://www.ibm.com/watson/developercloud/natural-language-understanding/api/v1/?python#post-analyze
Simply I need three conditions.
1) Log-in
2) Multiple request
3) Synchronous request ( sequential like 'C' )
I realized 'yield' should be used for multiple request.
But I think 'yield' works differently with 'C' and not sequential.
So I want to use request without 'yield' like below.
But crawl method wasn`t called ordinarily.
How can I call crawl method sequentially like C ?
class HotdaySpider(scrapy.Spider):
name = "hotday"
allowed_domains = ["test.com"]
login_page = "http://www.test.com"
start_urls = ["http://www.test.com"]
maxnum = 27982
runcnt = 10
def parse(self, response):
return [FormRequest.from_response(response,formname='login_form',formdata={'id': 'id', 'password': 'password'}, callback=self.after_login)]
def after_login(self, response):
global maxnum
global runcnt
i = 0
while i < runcnt :
**Request(url="http://www.test.com/view.php?idx=" + str(maxnum) + "/",callback=self.crawl)**
i = i + 1
def crawl(self, response):
global maxnum
filename = 'hotday.html'
with open(filename, 'wb') as f:
f.write(unicode(response.body.decode(response.encoding)).encode('utf-8'))
maxnum = maxnum + 1
When you return a list of requests (that's what you do when you yield many of them) Scrapy will schedule them and you can't control the order in which the responses will come.
If you want to process one response at a time and in order, you would have to return only one request in your after_login method and construct the next request in your crawl method.
def after_login(self, response):
return Request(url="http://www.test.com/view.php?idx=0/", callback=self.crawl)
def crawl(self, response):
global maxnum
global runcnt
filename = 'hotday.html'
with open(filename, 'wb') as f:
f.write(unicode(response.body.decode(response.encoding)).encode('utf-8'))
maxnum = maxnum + 1
next_page = int(re.search('\?idx=(\d*)', response.request.url).group(1)) + 1
if < runcnt:
return Request(url="http://www.test.com/view.php?idx=" + next_page + "/", callback=self.crawl)
I try to make query for tag search.
tags: how many tags ex.3
q: array of tags ex.['foo','hoo','poo']
def queryByTags(cls, tags, q):
def one():
qry = models.Card.query(models.Card.tags_value == q[0])
return qry
def two():
qry = models.Card.query(ndb.AND(models.Card.tags_value == q[0],
models.Card.tags_value == q[1]))
return qry
def three():
qry = models.Card.query(ndb.AND(models.Card.tags_value == q[0],
models.Card.tags_value == q[1],
models.Card.tags_value == q[2]))
return qry
tags_len = {1: one,
2: two,
3: three,
}
return tags_len[tags]()
This method can use up to 3 tags. I can copy code myself and extend it until 7,8,9...
It is very sad way...
Is there any smart way?
In pseudo python-ndb (I didn't run my code but you'll get it) I would say that a way would be to do:
cards_count = Card.query().filter(tags_value==q[0])\
.filter(tags_value==q[1])\
.filter(tags_value==q[2]).count()
or if iterating dynamic array (unknown length)
cards_count = Card.query()
for value in q:
q = q.filter(tags_value==value)
cards_count = q.count()