I am trying to ad a record of Transaction kind and then update the value of balance in User entity.
Here is the code :
class Transaction(db.Model):
user_ID = db.IntegerProperty(required = True)
amount = db.FloatProperty(default = 0.0)
remark = db.StringProperty(required = True)
proof = db.IntegerProperty(required = True)
time = db.DateTimeProperty(auto_now_add = True)
#classmethod
def add(cls,userID,amount,remark,proof):
user = User.get_by_id(int(userID))
logging.debug("amount : " + str(amount) + " before adding balance : " + str(user.balance))
user.balance += amount
logging.debug("after adding balance : " + str(user.balance))
user.put()
logging.debug("after putting balance : " + str( User.get_by_id(userID).balance ))
t = cls(user_ID=userID,amount=amount,remark=remark,proof=proof)
t.put()
LOG :
D 2014-12-19 00:53:23.413 amount : 0.47 before adding balance : 0.0
D 2014-12-19 00:53:23.413 after adding balance : 0.47
D 2014-12-19 00:53:23.462 after putting balance : 0.47
The logs are proper but This is not working as it should.
It should :
Ad a record of Transaction and
Update the balance value of User
but instead I am getting weird result:
Transaction record is added
User's balance is not updated.
Related
I am new to Python and I have been stuck trying out a "simple banking program".
I have got everything right except for this bit:
If the user types S then:
Ask the user for the account number.
Search the array for that account number and find its position in the accountnumbers array.
Display the Name, and balance at the position found during the above search.
Originally it was just supposed to be through accounts 1-5, but now I am having trouble coming up with a way to search the account numbers if they are any number, not just 1 - 5. For example
The user makes his account numbers 34, 445, 340,2354 and 3245. Completely random account numbers with no order.
Here is what I have so far
names = []
accountNumbers = []
balance = []
def displaymenu():
print("**** MENU OPTIONS ****")
print("Type P to populate accounts")
print("Type S to search for account")
print("Type E to exit")
choiceInput()
def choiceInput():
choice = str(input("Please enter your choice: "))
if (choice == "P"):
populateAccount()
elif (choice == "S"):
accountNumb = int(input("Please enter the account number to search: "))
if (accountNumb > 0) and (accountNumb < 6):
print("Name is: " + str(names[accountNumb - 1]))
print(names[accountNumb - 1] + " account has the balance of : $" + str(balance[accountNumb -1]))
elif (accountNumb == accountNumbers):
index = names.index(accountNumb)
accountNumb = index
print(names[accountNumb - 1] + " account has the balance of : $" + str(balance[accountNumb -1]))
else:
print("The account number not found!")
elif (choice == "E"):
print("Thank you for using the program.")
print("Bye")
raise SystemExit
else:
print("Invalid choice. Please try again!")
displaymenu()
def populateAccount ():
name = 0
for name in range(5):
Names = str(input("Please enter a name: "))
names.append(Names)
account ()
name = name + 1
def account ():
accountNumber = int(input("Please enter an account number: "))
accountNumbers.append(accountNumbers)
balances()
def balances ():
balances = int(input("Please enter a balance: "))
balance.append(balances)
displaymenu()
I have tried to use indexes and have not been able to find a solution.
Replace the following line of code
if (accountNumb > 0) and (accountNumb < 6):
with
if (accountNumb > 0) and (accountNumb < len(accountNumbers)):
My mistake. I messed up when appending the account number:
def account ():
accountNumber = int(input("Please enter an account number: "))
accountNumbers.append(accountNumbers)
balances()
I appended
accountNumbers
not
accountNumber
the code should be
def account ():
accountNumber = int(input("Please enter an account number: "))
accountNumbers.append(accountNumber)
balances()
also the searchArray function I made was:
def searchArray(accountNumbers):
x = int(input("Please enter an account number to search: "))
y = accountNumbers.index(x)
print("Name is: " + str(names[y]))
print(str(names[y]) + " account has a balance of: " + str(balance[y]))
rookie mistake , shouldnt be using such similar object names.
I am currently working on a google cloud project in free trial mode. I have cron job to fetch the data from a data vendor and store it in the data store. I wrote the code to fetch the data couple of weeks ago and it was all working fine but all of sudden , i started receiving error "DeadlineExceededError: The overall deadline for responding to the HTTP request was exceeded" for last two days. I believe cron job is supposed to timeout only after 60 minutes any idea why i am getting the error?.
cron task
def run():
try:
config = cron.config
actual_data_source = config['xxx']['xxxx']
original_data_source = actual_data_source
company_list = cron.rest_client.load(config, "companies", '')
if not company_list:
logging.info("Company list is empty")
return "Ok"
for row in company_list:
company_repository.save(row,original_data_source, actual_data_source)
return "OK"
Repository code
def save( dto, org_ds , act_dp):
try:
key = 'FIN/%s' % (dto['ticker'])
company = CompanyInfo(id=key)
company.stock_code = key
company.ticker = dto['ticker']
company.name = dto['name']
company.original_data_source = org_ds
company.actual_data_provider = act_dp
company.put()
return company
except Exception:
logging.exception("company_repository: error occurred saving the company
record ")
raise
RestClient
def load(config, resource, filter):
try:
username = config['xxxx']['xxxx']
password = config['xxxx']['xxxx']
headers = {"Authorization": "Basic %s" % base64.b64encode(username + ":"
+ password)}
if filter:
from_date = filter['from']
to_date = filter['to']
ticker = filter['ticker']
start_date = datetime.strptime(from_date, '%Y%m%d').strftime("%Y-%m-%d")
end_date = datetime.strptime(to_date, '%Y%m%d').strftime("%Y-%m-%d")
current_page = 1
data = []
while True:
if (filter):
url = config['xxxx']["endpoints"][resource] % (ticker, current_page, start_date, end_date)
else:
url = config['xxxx']["endpoints"][resource] % (current_page)
response = urlfetch.fetch(
url=url,
deadline=60,
method=urlfetch.GET,
headers=headers,
follow_redirects=False,
)
if response.status_code != 200:
logging.error("xxxx GET received status code %d!" % (response.status_code))
logging.error("error happend for url: %s with headers %s", url, headers)
return 'Sorry, xxxx API request failed', 500
db = json.loads(response.content)
if not db['data']:
break
data.extend(db['data'])
if db['total_pages'] == current_page:
break
current_page += 1
return data
except Exception:
logging.exception("Error occured with xxxx API request")
raise
I'm guessing this is the same question as this, but now with more code:
DeadlineExceededError: The overall deadline for responding to the HTTP request was exceeded
I modified your code to write to the database after each urlfetch. If there are more pages, then it relaunches itself in a deferred task, which should be well before the 10 minute timeout.
Uncaught exceptions in a deferred task cause it to retry, so be mindful of that.
It was unclear to me how actual_data_source & original_data_source worked, but I think you should be able to modify that part.
crontask
def run(current_page=0):
try:
config = cron.config
actual_data_source = config['xxx']['xxxx']
original_data_source = actual_data_source
data, more = cron.rest_client.load(config, "companies", '', current_page)
for row in data:
company_repository.save(row, original_data_source, actual_data_source)
# fetch the rest
if more:
deferred.defer(run, current_page + 1)
except Exception as e:
logging.exception("run() experienced an error: %s" % e)
RestClient
def load(config, resource, filter, current_page):
try:
username = config['xxxx']['xxxx']
password = config['xxxx']['xxxx']
headers = {"Authorization": "Basic %s" % base64.b64encode(username + ":"
+ password)}
if filter:
from_date = filter['from']
to_date = filter['to']
ticker = filter['ticker']
start_date = datetime.strptime(from_date, '%Y%m%d').strftime("%Y-%m-%d")
end_date = datetime.strptime(to_date, '%Y%m%d').strftime("%Y-%m-%d")
url = config['xxxx']["endpoints"][resource] % (ticker, current_page, start_date, end_date)
else:
url = config['xxxx']["endpoints"][resource] % (current_page)
response = urlfetch.fetch(
url=url,
deadline=60,
method=urlfetch.GET,
headers=headers,
follow_redirects=False,
)
if response.status_code != 200:
logging.error("xxxx GET received status code %d!" % (response.status_code))
logging.error("error happend for url: %s with headers %s", url, headers)
return [], False
db = json.loads(response.content)
return db['data'], (db['total_pages'] != current_page)
except Exception as e:
logging.exception("Error occured with xxxx API request: %s" % e)
return [], False
I would prefer to write this as a comment, but I need more reputation to do that.
What happens when you run the actual data fetch directly instead of
through the cron job?
Have you tried measuring a time delta from the start to the end of
the job?
Has the number of companies being retrieved increased dramatically?
You appear to be doing some form of stock quote aggregation - is it
possible that the provider has started blocking you?
I have an API endpoint, which only perform read request. Usually, the entire operation can be completed under 100ms
[03/Aug/2015:19:35:53 -0700] "GET /query?email=xxx%40aol.com&hash=xxx HTTP/1.1" 200 186 - "myapp-1.0.6o" "xxx.appspot.com" ms=74 cpu_ms=27 cpm_usd=0.000021 instance=00c61b117c55f2b00cdd73904665675ced040765 app_engine_release=1.9.24
However, it is not uncommon I can see some request, surge up to 20 seconds!
[03/Aug/2015:18:59:50 -0700] "GET /query?email=yyy%40gmail.com&hash=yyy HTTP/1.1" 200 193 - "myapp-1.0.6o" "xxx.appspot.com" ms=18288 cpu_ms=61 cpm_usd=0.000022 instance=00c61b117c55f2b00cdd73904665675ced040765 app_engine_release=1.9.24
Now, the server load is still pretty light. It gets 1 - 5 requests within a minute. We still fall under free quota usage.
The API python code is pretty straight forward. I don't think it is the culprit of such slow operation.
class User(ndb.Model):
email = ndb.StringProperty(required = True)
timestamp = ndb.DateTimeProperty(required = True)
class QueryHandler(webapp2.RequestHandler):
def get(self):
email = self.request.get('email')
hash = self.request.get('hash')
expected_hash = Utils.hash(email)
result = {
'email' : email,
'user_timestamp' : 0,
'server_timestamp' : 0,
'free_trial_duration' : _FREE_TRIAL_DURATION
}
user_timestamp = 0
if hash == expected_hash:
user_timestamp = memcache.get(email)
if user_timestamp is None:
user = User.get_by_id(email)
if user is not None:
user_timestamp = int(time.mktime(user.timestamp.timetuple()))
result['user_timestamp'] = user_timestamp
memcache.add(email, user_timestamp, _MEMCACHE_DURATION)
else:
result['user_timestamp'] = user_timestamp
else:
logging.debug('QueryHandler, email = ' + email + ', hash = ' + hash + ', expected_hash = ' + expected_hash)
server_timestamp = max(int(time.time()), user_timestamp)
result['server_timestamp'] = server_timestamp
self.response.headers['Content-Type'] = 'application/json'
json_result = json.encode(result)
self.response.out.write(json_result)
logging.debug('QueryHandler, result = ' + json_result)
app = webapp2.WSGIApplication([
('/query', QueryHandler),
], debug=False)
It seems more to Google App Engine server quality. Is there anything we can do, to avoid unreasonable slowness of API consumption?
String loc_expr = "distance(location, geopoint(" + userLatitude + ", " + userLongitude + "))";
// Build the SortOptions
SortOptions sortOptions = SortOptions.newBuilder()
.addSortExpression(SortExpression.newBuilder().setExpression(loc_expr).setDirection(SortExpression.SortDirection.ASCENDING).setDefaultValueNumeric(0))
.setLimit(200).build();
// Build the QueryOptions
QueryOptions options = QueryOptions.newBuilder().addExpressionToReturn(FieldExpression.newBuilder().setExpression(loc_expr).setName("distance")).setLimit(limit)
.setCursor(cursor).setSortOptions(sortOptions).build();
String queryString = loc_expr + " < " + searchRadius * 1000;
// Build the Query and run the search
Query query = Query.newBuilder().setOptions(options).build(queryString);
IndexSpec indexSpec = IndexSpec.newBuilder().setName("restaurants").build();
Index index = SearchServiceFactory.getSearchService().getIndex(indexSpec);
Results<ScoredDocument> result = index.search(query);
if (result.getNumberFound() > 0) {
Collection<ScoredDocument> coll = result.getResults();
for (ScoredDocument sd : coll) {
Key<Restaurant> key = Key.create(String.valueOf(sd.getId()));
Restaurant rest = ofy().load().key(key).now();
Field f = sd.getExpressions().get(0);
log.info("distance in meter : " + f.getNumber());
}
}
I am using the above mentioned code to get restaurants in nearby area. Following are my observation :-
Case 1: searchRadius = 0.5 km --- Maximum value of distance = 0.9 km
Case 2: searchRadius = 1 km --- maximum value of distance = 1.8 km
Case 3: searchRadius = 2 km --- maximum value of distance = 2.8 km
Case 4: searchRadius = 3 km --- maximum value of distance = 4.8 km
Why am I getting value of distance more the radius specified?
Note :- I am not calculating distance by my own. Distance is being returned by search API.
It's a known issue.
Until Google fix it, you have to filter the results in your code
I'm building a web app with django. I use postgresql for the db. The app code is getting really messy(my begginer skills being a big factor) and slow, even when I run the app locally.
This is an excerpt of my models.py file:
REPEATS_CHOICES = (
(NEVER, 'Never'),
(DAILY, 'Daily'),
(WEEKLY, 'Weekly'),
(MONTHLY, 'Monthly'),
...some more...
)
class Transaction(models.Model):
name = models.CharField(max_length=30)
type = models.IntegerField(max_length=1, choices=TYPE_CHOICES) # 0 = 'Income' , 1 = 'Expense'
amount = models.DecimalField(max_digits=12, decimal_places=2)
date = models.DateField(default=date.today)
frequency = models.IntegerField(max_length=2, choices=REPEATS_CHOICES)
ends = models.DateField(blank=True, null=True)
active = models.BooleanField(default=True)
category = models.ForeignKey(Category, related_name='transactions', blank=True, null=True)
account = models.ForeignKey(Account, related_name='transactions')
The problem is with date, frequency and ends. With this info I can know all the dates in which transactions occurs and use it to fill a cashflow table. Doing things this way involves creating a lot of structures(dictionaries, lists and tuples) and iterating them a lot. Maybe there is a very simple way of solving this with the actual schema, but I couldn't realize how.
I think that the app would be easier to code if, at the creation of a transaction, I could save all the dates in the db. I don't know if it's possible or if it's a good idea.
I'm reading a book about google app engine and the datastore's multivalued properties. What do you think about this for solving my problem?.
Edit: I didn't know about the PickleField. I'm now reading about it, maybe I could use it to store all the transaction's datetime objects.
Edit2: This is an excerpt of my cashflow2 view(sorry for the horrible code):
def cashflow2(request, account_name="Initial"):
if account_name == "Initial":
uri = "/cashflow/new_account"
return HttpResponseRedirect(uri)
month_info = {}
cat_info = {}
m_y_list = [] # [(month,year),]
trans = []
min, max = [] , []
account = Account.objects.get(name=account_name, user=request.user)
categories = account.categories.all()
for year in range(2006,2017):
for month in range(1,13):
month_info[(month, year)] = [0, 0, 0]
for cat in categories:
cat_info[(cat, month, year)] = 0
previous_months = 1 # previous months from actual
next_months = 5
dates_list = month_year_list(previous_month, next_months) # Returns [(month,year)] from the requested range
m_y_list = [(date.month, date.year) for date in month_year_list(1,5)]
min, max = dates_list[0], dates_list[-1]
INCOME = 0
EXPENSE = 1
ONHAND = 2
transacs_in_dates = []
txs = account.transactions.order_by('date')
for tx in txs:
monthyear = ()
monthyear = (tx.date.month, tx.date.year)
if tx.frequency == 0:
if tx.type == 0:
month_info[monthyear][INCOME] += tx.amount
if tx.category:
cat_info[(tx.category, monthyear[0], monthyear[1])] += tx.amount
else:
month_info[monthyear][EXPENSE] += tx.amount
if tx.category:
cat_info[(tx.category, monthyear[0], monthyear[1])] += tx.amount
if monthyear in lista_m_a:
if tx not in transacs_in_dates:
transacs_in_dates.append(tx)
elif tx.frequency == 4: # frequency = 'Monthly'
months_dif = relativedelta.relativedelta(tx.ends, tx.date).months
if tx.ends.day < tx.date.day:
months_dif += 1
years_dif = relativedelta.relativedelta(tx.ends, tx.date).years
dif = months_dif + (years_dif*12)
dates_range = dif + 1
for i in range(dates_range):
dt = tx.date+relativedelta.relativedelta(months=+i)
if (dt.month, dt.year) in m_y_list:
if tx not in transacs_in_dates:
transacs_in_dates.append(tx)
if tx.type == 0:
month_info[(fch.month,fch.year)][INCOME] += tx.amount
if tx.category:
cat_info[(tx.category, fch.month, fch.year)] += tx.amount
else:
month_info[(fch.month,fch.year)][EXPENSE] += tx.amount
if tx.category:
cat_info[(tx.category, fch.month, fch.year)] += tx.amount
import operator
thelist = []
thelist = sorted((my + tuple(v) for my, v in month_info.iteritems()),
key = operator.itemgetter(1, 0))
thelistlist = []
for atuple in thelist:
thelistlist.append(list(atuple))
for i in range(len(thelistlist)):
if i != 0:
thelistlist[i][4] = thelistlist[i-1][2] - thelistlist[i-1][3] + thelistlist[i-1][4]
list = []
for el in thelistlist:
if (el[0],el[1]) in lista_m_a:
list.append(el)
transactions = account.transactions.all()
cats_in_dates_income = []
cats_in_dates_expense = []
for t in transacs_in_dates:
if t.category and t.type == 0:
if t.category not in cats_in_dates_income:
cats_in_dates_income.append(t.category)
elif t.category and t.type == 1:
if t.category not in cats_in_dates_expense:
cats_in_dates_expense.append(t.category)
cat_infos = []
for k, v in cat_info.items():
cat_infos.append((k[0], k[1], k[2], v))
Depends on how relevant App Engine is here. P.S. If you'd like to store pickled objects as well as JSON objects in the Google Datastore, check out these two code snippets:
http://kovshenin.com/archives/app-engine-json-objects-google-datastore/
http://kovshenin.com/archives/app-engine-python-objects-in-the-google-datastore/
Also note that the Google Datastore is a non-relational database, so you might have other trouble refactoring your code to switch to that.
Cheers and good luck!