Other commands are delayed when a command error occurs in discord py - discord

#bot.command()
async def timer(ctx,minutes:int):
import datetime
start_time = datetime.datetime.now()
end_time = start_time + datetime.timedelta(minutes=minutes)
previous_time = datetime.datetime.now()
message = await ctx.channel.send('start timer!')
try:
while datetime.datetime.now() <= end_time:
current_time = datetime.datetime.now()
if current_time.minute != previous_time.minute:
previous_time = current_time
diff = end_time - datetime.datetime.now()
diff_seconds = diff.seconds
end_time_formatted = end_time.strftime('%I:%M:%S %p')
await message.edit(
content=f'> timer\n```⏰end time:{end_time_formatted}\n⏰{diff_seconds // 60}minute {diff_seconds%60}second left```'
)
except:
await ctx.channel.send('timer reset')
await message.edit(content='> time out!')
await ctx.channel.send('on time now')
The current timer is using message deletion because there is no other way to end it. However, when deleting the message, an error message appears after a one-minute interval. However, other commands are not available until error message appears.
i looking for way to solve it.

Related

How do I create a timestamp in an embed with pycord?

I would like my sent msg log which is an embedded msg to have a timestamp, so it would have a footing like bot_name • Today at 10:48 PM
Here is my current code,
#bot.event
async def on_message(ctx):
if ctx.author.bot: return
else:
log_msg = bot.get_channel(1023451687857442828)
embed = discord.Embed(
title = "Sent Message",
description = f"""This message was sent by{ctx.author.mention}, in {ctx.channel}\n**Message:** "{ctx.content}" """,
color = discord.Colour.green(),
)
embed.set_author(name=ctx.author)
embed.set_footer(text="bot_name")
await log_msg.send(embed=embed)```
You could do the following:
(remaining of your code)
now = datetime.now() # Gets your current time as a datetime object
embed.set_footer(text=f'bot_name • Today at {now.strftime("%H:%M")}') # Set your embed footer with the formatted time
await log_msg.send(embed=embed) # Sends your footer (already present in your original code)
If you don't have it imported already, you'll have to import datetime from datetime, or import datetime and do datetime.datetime.now() instead of just datetime.now()
You can use ctx.message.created_at to add a timestamp. That would look like this in the context of your code:
#bot.event
async def on_message(ctx):
if ctx.author.bot: return
else:
log_msg = bot.get_channel(1023451687857442828)
embed = discord.Embed(
title = "Sent Message",
description = f"""This message was sent by{ctx.author.mention}, in {ctx.channel}\n**Message:** "{ctx.content}" """,
color = discord.Colour.green(),
# message created at code below
timestamp = ctx.message.created_at
)
embed.set_author(name=ctx.author)
embed.set_footer(text="bot_name")
await log_msg.send(embed=embed)
Hope this helps

How do i make my discord.py bot go through a text file and if a user says a word in that file it will delete that message + how to make uptime command

I want my discord.py bot to go through a text file and if a user says a word in that file it will delete that message
elif msg.content == '***':
await msg.channel.send("Dont curse")
await msg.delete()
that is my code but i want to replace *** with a text document
And also another question i have some issues with an uptime command
async def on_ready():
print("Bot is running")
activity = discord.Game(name="!test", type=3)
await client.change_presence(status=discord.Status.idle, activity=activity)
channel = client.get_channel(944665750453497886)
myid = '<#516236429689618467>'
await channel.send("I am online "+myid)
#Uptime code starts
global startdate
startdate = datetime.now()
#client.command()
async def uptime(ctx):
now = datetime.now()
uptime = startdate - now
uptime = uptime.strftime('%d/%h/%M')
await ctx.send(f'Uptime: {uptime}')

Command raised an exception: TypeError: Object of type File is not JSON serializable but no other solution working for me

The Function of discord command named server
#bot.command(name = 'server')
async def _minecraftserverinfo(ctx, ip:str, port:str, servername:str):
if servername == "survival":
svname = "Survival"
elif servername == "lobby":
svname = "Lobby"
#serverembed = createEmbed(ip, port)
#value = f"The server has {status.players.online} players and replied in {status.latency} ms\nThe server replied in {latency} ms\nThe server has the following players online: {','.join(query.players.names)}"
server = MinecraftServer.lookup(ip+':'+port)
#=======================================
#=======================================
# 'status' is supported by all Minecraft servers that are version 1.7 or higher.
status = server.status()
latency = server.ping()
query = server.query()
serverembed = discord.Embed(title = ip, url = "https://www.zer02infinity.com/",description = f"Zero To Infinity Minecraft {svname} Server")#\nThe server has the following players online: {','.join(query.players.names)}")
serverembed.set_thumbnail(url="https://cdn.discordapp.com/attachments/833829693588373534/920017091325747220/2021-12-13_11.png")
serverembed.add_field(name="Players Count", value=f"{status.players.online}/{query.players.max}", inline=True)
serverembed.add_field(name="Direct Connect:", value=f"```mc.zer02infinity.com```", inline=True)
#serverembed.add_field(name="Names:", value=f"{latency} ms", inline=True)
#nl = '\n'
#serverembed.add_field(name="Names:", value=f"{nl}{nl.join(query.players.names)}", inline=False)
#serverembed.add_field(name="Names:", value={'\n'.join(query.players.ping)}, inline=True)
#sending embed
embed = await ctx.send(embed=serverembed)
await embed.add_reaction(emoji)
message_id = embed.id
while True:
server = MinecraftServer.lookup(ip+':'+port)
# 'status' is supported by all Minecraft servers that are version 1.7 or higher.
status = server.status()
latency = server.ping()
query = server.query()
serverembed = discord.Embed(title = ip, url = "https://www.zer02infinity.com/",description = f"Zero To Infinity Minecraft {svname} Server")#\nThe server has the following players online: {','.join(query.players.names)}")
serverembed.set_thumbnail(url="https://cdn.discordapp.com/attachments/833829693588373534/920017091325747220/2021-12-13_11.png")
serverembed.add_field(name="Players Count", value=f"{status.players.online}/{query.players.max}", inline=True)
serverembed.add_field(name="Direct Connect:", value=f"```mc.zer02infinity.com```", inline=True)
#serverembed.add_field(name="Names:", value=f"{latency} ms", inline=True)
#nl = '\n'
problem started after i added a image from local file
file = discord.File("./foo.png", filename="foo.png")
serverembed.add_field(name="Names:", value={','.join(query.players.names)}, inline=False)
serverembed.set_image(url="attachment://foo.png")
#print('Winners are:', *names, sep='\n')
serverembed = await embed.edit(file=file, embed = serverembed)
await asyncio.sleep(60)
pass
So this is the recent code i added where foo.png is maintained by graph.py given below
import discord
from discord.ext import commands
import matplotlib.pyplot as plt
class Equations(commands.Cog):
def __init__(self, bot):
self.bot = bot
def plotlineareq(self, a, b, clr):
x = [0, 10]
y = [(a * i + b) for i in x]
plt.figure(figsize=(10, 10)) # #Size of Graph
plt.xlim(x) # #X Range [-6,6]
plt.ylim(x) # #Y Range [-6,6]
axis = plt.gca() # #Get Current Axis
plt.plot(axis.get_xlim(), [0, 0], 'k--') # #X Axis Plots Line Across
plt.plot([0, 0], axis.get_ylim(), 'k--') # #Y Axis Plots Line Across
plt.locator_params(axis="x", nbins=20)
plt.locator_params(axis="y", nbins=20)
plt.plot(x, y, label='linear', linestyle='solid', color=clr)
plt.ylabel('Players')
plt.xlabel('Time')
mm = str(a)
bb = str(b)
plt.title('Live Status')
#plt.grid()
plt.savefig("foo.png")
#commands.command()
async def linear(self, ctx, equation):
try:
equation = equation.replace(" ", "")
mx = equation.split("x")[0]
mx = equation.replace("x", "").replace("y=", "")
bx = equation.split("+")[1]
self.plotlineareq(mx, bx, 'b')
file = discord.File("foo.png", filename='foo.png')
embed = discord.Embed(color=0xff0000)
embed = embed.set_image(url="attachment://foo.png")
await ctx.send(file=file, embed=embed)
except Exception as e:
await ctx.send(f"An error occured: {e}")
def setup(bot):
bot.add_cog(Equations(bot))
After executing it gives the following error in error log-
Command raised an exception: TypeError: Object of type File is not JSON serializable

cron job throwing DeadlineExceededError

I am currently working on a google cloud project in free trial mode. I have cron job to fetch the data from a data vendor and store it in the data store. I wrote the code to fetch the data couple of weeks ago and it was all working fine but all of sudden , i started receiving error "DeadlineExceededError: The overall deadline for responding to the HTTP request was exceeded" for last two days. I believe cron job is supposed to timeout only after 60 minutes any idea why i am getting the error?.
cron task
def run():
try:
config = cron.config
actual_data_source = config['xxx']['xxxx']
original_data_source = actual_data_source
company_list = cron.rest_client.load(config, "companies", '')
if not company_list:
logging.info("Company list is empty")
return "Ok"
for row in company_list:
company_repository.save(row,original_data_source, actual_data_source)
return "OK"
Repository code
def save( dto, org_ds , act_dp):
try:
key = 'FIN/%s' % (dto['ticker'])
company = CompanyInfo(id=key)
company.stock_code = key
company.ticker = dto['ticker']
company.name = dto['name']
company.original_data_source = org_ds
company.actual_data_provider = act_dp
company.put()
return company
except Exception:
logging.exception("company_repository: error occurred saving the company
record ")
raise
RestClient
def load(config, resource, filter):
try:
username = config['xxxx']['xxxx']
password = config['xxxx']['xxxx']
headers = {"Authorization": "Basic %s" % base64.b64encode(username + ":"
+ password)}
if filter:
from_date = filter['from']
to_date = filter['to']
ticker = filter['ticker']
start_date = datetime.strptime(from_date, '%Y%m%d').strftime("%Y-%m-%d")
end_date = datetime.strptime(to_date, '%Y%m%d').strftime("%Y-%m-%d")
current_page = 1
data = []
while True:
if (filter):
url = config['xxxx']["endpoints"][resource] % (ticker, current_page, start_date, end_date)
else:
url = config['xxxx']["endpoints"][resource] % (current_page)
response = urlfetch.fetch(
url=url,
deadline=60,
method=urlfetch.GET,
headers=headers,
follow_redirects=False,
)
if response.status_code != 200:
logging.error("xxxx GET received status code %d!" % (response.status_code))
logging.error("error happend for url: %s with headers %s", url, headers)
return 'Sorry, xxxx API request failed', 500
db = json.loads(response.content)
if not db['data']:
break
data.extend(db['data'])
if db['total_pages'] == current_page:
break
current_page += 1
return data
except Exception:
logging.exception("Error occured with xxxx API request")
raise
I'm guessing this is the same question as this, but now with more code:
DeadlineExceededError: The overall deadline for responding to the HTTP request was exceeded
I modified your code to write to the database after each urlfetch. If there are more pages, then it relaunches itself in a deferred task, which should be well before the 10 minute timeout.
Uncaught exceptions in a deferred task cause it to retry, so be mindful of that.
It was unclear to me how actual_data_source & original_data_source worked, but I think you should be able to modify that part.
crontask
def run(current_page=0):
try:
config = cron.config
actual_data_source = config['xxx']['xxxx']
original_data_source = actual_data_source
data, more = cron.rest_client.load(config, "companies", '', current_page)
for row in data:
company_repository.save(row, original_data_source, actual_data_source)
# fetch the rest
if more:
deferred.defer(run, current_page + 1)
except Exception as e:
logging.exception("run() experienced an error: %s" % e)
RestClient
def load(config, resource, filter, current_page):
try:
username = config['xxxx']['xxxx']
password = config['xxxx']['xxxx']
headers = {"Authorization": "Basic %s" % base64.b64encode(username + ":"
+ password)}
if filter:
from_date = filter['from']
to_date = filter['to']
ticker = filter['ticker']
start_date = datetime.strptime(from_date, '%Y%m%d').strftime("%Y-%m-%d")
end_date = datetime.strptime(to_date, '%Y%m%d').strftime("%Y-%m-%d")
url = config['xxxx']["endpoints"][resource] % (ticker, current_page, start_date, end_date)
else:
url = config['xxxx']["endpoints"][resource] % (current_page)
response = urlfetch.fetch(
url=url,
deadline=60,
method=urlfetch.GET,
headers=headers,
follow_redirects=False,
)
if response.status_code != 200:
logging.error("xxxx GET received status code %d!" % (response.status_code))
logging.error("error happend for url: %s with headers %s", url, headers)
return [], False
db = json.loads(response.content)
return db['data'], (db['total_pages'] != current_page)
except Exception as e:
logging.exception("Error occured with xxxx API request: %s" % e)
return [], False
I would prefer to write this as a comment, but I need more reputation to do that.
What happens when you run the actual data fetch directly instead of
through the cron job?
Have you tried measuring a time delta from the start to the end of
the job?
Has the number of companies being retrieved increased dramatically?
You appear to be doing some form of stock quote aggregation - is it
possible that the provider has started blocking you?

Benchmarking retrieval in cloudant

I want to retrieve a set of keys from a cloudant db. I tried a few ways. However, custom_result performs lightning fast as compared to other methods. Can someone explain why?
from cloudant import cloudant
import json
import time
from cloudant.result import Result,ResultByKey
with open('credentials.json') as f:
cred = json.load(f)
with cloudant(str(cred['credentials']['username']),str(cred['credentials']['password']),url=str(cred['credentials']['url'])) as client:
my_database = client['my_database']
#Using POST
payload = {"keys": ["012","345"]}
end_point = '{0}/{1}'.format(client.server_url, 'my_database/_all_docs')
params = {'include_docs': 'true'}
start = time.time()
response = client.r_session.post(end_point, data=json.dumps(payload), params=params)
end = time.time()
print end-start
#using custom_result
start = time.time()
result = my_database.custom_result(include_docs=True,keys=["012","345"])
end = time.time()
print end-start
#using all_doc
start = time.time()
result = my_database.all_docs(include_docs=True,keys=["012","345"])
end = time.time()
print end-start
#using iteration
keys=["012","345"]
start = time.time()
result=[]
result_collection = Result(my_database.all_docs, include_docs=True)
for i in range(len(keys)):
result.append(result_collection[ResultByKey(i)])
end = time.time()
print end-start
My output is as follows:
0.426064968109
4.10079956055e-05
0.409541845322
0.819295167923
Can someone tell me why custom_result performs so fast?
Function database.custom_result() only creates the cloudant.result.Result instance, no results are returned from Cloudant. You can try test it this way, end-start will be the whole time you get response from Cloudant.
start = time.time()
with database.custom_result(include_docs=True) as result:
data = result[:]
end = time.time()
print end-start

Resources