Following example script can pick out CRITICAL service but it also will select CRITICAL service in both SOFT and HARD state. I read though R1 doc many times. Still failed to select service in HARD and CRITICAL state.
#!/usr/bin/python
import socket
socket_path = '/var/spool/nagios/cmd/livestatus'
def sendQuery(query):
"""
INPUT: LQL string.
OUPUT: query result in string.
REF : http://stackoverflow.com/questions/24730883/python-errno-23-socket-livestatus
"""
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
s.send(query)
s.shutdown(socket.SHUT_WR)
answer = ''
while True:
data = s.recv(1024)
answer += data
if len(data) < 1024:
break
s.close()
return answer
LQL = 'GET services\n' + \
'Columns: host_name description state last_state_change last_time_critical\n' + \
'Filter: scheduled_downtime_depth = 0 \n' + \
'Filter: host_scheduled_downtime_depth = 0\n' + \
'Filter: in_notification_period = 1\n' + \
'Filter: state = 2 \n'
print sendQuery(LQL)
R1: https://mathias-kettner.de/checkmk_livestatus.html
Answer was provided by livestatus community, See R1.
select_type = 1 where 1 = HARD service alert. 0 = SOFT.
R1: http://lists.mathias-kettner.de/pipermail/checkmk-en/2016-January/018483.html
Related
I am trying to fetch data from a db database in batches and copy it to an ndb database, using a cursor. My code is doing it successfully for the first batch, but not fetching any further records. I did not find much information on cursors, please help me here.
Here is my code snippet: def post(self):
a = 0
chunk_size = 2
next_cursor = self.request.get("cursor")
query = db.GqlQuery("select * from BooksPost")
while a == 0:
if next_cursor:
query.with_cursor(start_cursor = next_cursor)
else:
a = 1
results = query.fetch(chunk_size)
for result in results:
nbook1 = result.bookname
nauthor1 = result.authorname
nbook1 = nBooksPost(nbookname = nbook1, nauthorname = nauthor1)
nbook1.put()
next_cursor = self.request.get("cursor")
Basically, how do I set the next cursor to iterate over?
def post(self):
chunk_size = 10
has_more_results = True
query = db.GqlQuery("select * from Post")
cursor = self.request.get('cursor', None)
#cursor = query.cursor()
if cursor:
query.with_cursor(cursor)
while has_more_results == True:
results = query.fetch(chunk_size)
new_cursor = query.cursor()
print("count: %d, results %d" % (query.count(), len(results)))
if query.count(1) == 1:
has_more_results = True
else:
has_more_results = False
for result in results:
#do this
query.with_cursor(new_cursor)
According to the Snowflake docs, when a user executes a copy command it will return 1 of 3 status values:
loaded
load failed
partially loaded
My question is if I use the Python Snowflake Connector (see example code below) to execute a copy command is an exception raised if the status returned is load failed or partially loaded?
Thank you!
copy_dml = 'copy into database.schema.table ' \
'from #fully_qualified_stage pattern = \'.*'+ table_name +'.*[.]json\' ' \
'file_format = (format_name = fully_qualified_json_format) ' \
'force = true;'
try:
import snowflake.connector
#-------------------------------------------------------------------------------------------------------------------------------
#snowflake variables
snowflake_warehouse = credentials.iloc[0]['snowflake_warehouse']
snowflake_account = credentials.iloc[0]['snowflake_account']
snowflake_role = credentials.iloc[0]['snowflake_role']
snowflake_username = credentials.iloc[0]['Username']
snowflake_password = credentials.iloc[0]['Password']
snowflake_connection = ''
cs = ''#snowflake connection cursor
exec_copy_dml = ''
copy_result_field_metadata = ''
copy_result = ''
snowflake_copy_result_df = ''
#-------------------------------------------------------------------------------------------------------------------------------
# load JSON file(s) into Snowflake
snowflake_connection = snowflake.connector.connect(
user = snowflake_username,
password = snowflake_password,
account = snowflake_account,
warehouse = snowflake_warehouse,
role = snowflake_role)
cs = snowflake_connection.cursor()
exec_copy_dml = cs.execute(copy_dml)
copy_result = exec_copy_dml.fetchall()
copy_result_field_metadata = cs.description
snowflake_copy_result_df = snowflake_results_df(copy_result_field_metadata,copy_result)
except snowflake.connector.errors.ProgrammingError as copy_error:
copy_exception_message = "There was a problem loading JSON files to Snowflake," + \
"a snowflake.connector.errors.ProgrammingError exception was raised."
print(copy_exception_message)
raise
except Exception as error_message:
raise
finally:
snowflake_connection.close()
I believe it won't raise exception for load status, you have to check the load status and take necessary action if required.
After you issue your COPY INTO dml, you can run the following query -
SELECT * FROM TABLE(VALIDATE(TABLE_NAME, job_id => '_last'))
This will give you details on the files that you were trying to load. It will normally return empty, unless you encountered issues upload.
You can save this save results in an object and make necessary control adjustments.
I am currently working on a google cloud project in free trial mode. I have cron job to fetch the data from a data vendor and store it in the data store. I wrote the code to fetch the data couple of weeks ago and it was all working fine but all of sudden , i started receiving error "DeadlineExceededError: The overall deadline for responding to the HTTP request was exceeded" for last two days. I believe cron job is supposed to timeout only after 60 minutes any idea why i am getting the error?.
cron task
def run():
try:
config = cron.config
actual_data_source = config['xxx']['xxxx']
original_data_source = actual_data_source
company_list = cron.rest_client.load(config, "companies", '')
if not company_list:
logging.info("Company list is empty")
return "Ok"
for row in company_list:
company_repository.save(row,original_data_source, actual_data_source)
return "OK"
Repository code
def save( dto, org_ds , act_dp):
try:
key = 'FIN/%s' % (dto['ticker'])
company = CompanyInfo(id=key)
company.stock_code = key
company.ticker = dto['ticker']
company.name = dto['name']
company.original_data_source = org_ds
company.actual_data_provider = act_dp
company.put()
return company
except Exception:
logging.exception("company_repository: error occurred saving the company
record ")
raise
RestClient
def load(config, resource, filter):
try:
username = config['xxxx']['xxxx']
password = config['xxxx']['xxxx']
headers = {"Authorization": "Basic %s" % base64.b64encode(username + ":"
+ password)}
if filter:
from_date = filter['from']
to_date = filter['to']
ticker = filter['ticker']
start_date = datetime.strptime(from_date, '%Y%m%d').strftime("%Y-%m-%d")
end_date = datetime.strptime(to_date, '%Y%m%d').strftime("%Y-%m-%d")
current_page = 1
data = []
while True:
if (filter):
url = config['xxxx']["endpoints"][resource] % (ticker, current_page, start_date, end_date)
else:
url = config['xxxx']["endpoints"][resource] % (current_page)
response = urlfetch.fetch(
url=url,
deadline=60,
method=urlfetch.GET,
headers=headers,
follow_redirects=False,
)
if response.status_code != 200:
logging.error("xxxx GET received status code %d!" % (response.status_code))
logging.error("error happend for url: %s with headers %s", url, headers)
return 'Sorry, xxxx API request failed', 500
db = json.loads(response.content)
if not db['data']:
break
data.extend(db['data'])
if db['total_pages'] == current_page:
break
current_page += 1
return data
except Exception:
logging.exception("Error occured with xxxx API request")
raise
I'm guessing this is the same question as this, but now with more code:
DeadlineExceededError: The overall deadline for responding to the HTTP request was exceeded
I modified your code to write to the database after each urlfetch. If there are more pages, then it relaunches itself in a deferred task, which should be well before the 10 minute timeout.
Uncaught exceptions in a deferred task cause it to retry, so be mindful of that.
It was unclear to me how actual_data_source & original_data_source worked, but I think you should be able to modify that part.
crontask
def run(current_page=0):
try:
config = cron.config
actual_data_source = config['xxx']['xxxx']
original_data_source = actual_data_source
data, more = cron.rest_client.load(config, "companies", '', current_page)
for row in data:
company_repository.save(row, original_data_source, actual_data_source)
# fetch the rest
if more:
deferred.defer(run, current_page + 1)
except Exception as e:
logging.exception("run() experienced an error: %s" % e)
RestClient
def load(config, resource, filter, current_page):
try:
username = config['xxxx']['xxxx']
password = config['xxxx']['xxxx']
headers = {"Authorization": "Basic %s" % base64.b64encode(username + ":"
+ password)}
if filter:
from_date = filter['from']
to_date = filter['to']
ticker = filter['ticker']
start_date = datetime.strptime(from_date, '%Y%m%d').strftime("%Y-%m-%d")
end_date = datetime.strptime(to_date, '%Y%m%d').strftime("%Y-%m-%d")
url = config['xxxx']["endpoints"][resource] % (ticker, current_page, start_date, end_date)
else:
url = config['xxxx']["endpoints"][resource] % (current_page)
response = urlfetch.fetch(
url=url,
deadline=60,
method=urlfetch.GET,
headers=headers,
follow_redirects=False,
)
if response.status_code != 200:
logging.error("xxxx GET received status code %d!" % (response.status_code))
logging.error("error happend for url: %s with headers %s", url, headers)
return [], False
db = json.loads(response.content)
return db['data'], (db['total_pages'] != current_page)
except Exception as e:
logging.exception("Error occured with xxxx API request: %s" % e)
return [], False
I would prefer to write this as a comment, but I need more reputation to do that.
What happens when you run the actual data fetch directly instead of
through the cron job?
Have you tried measuring a time delta from the start to the end of
the job?
Has the number of companies being retrieved increased dramatically?
You appear to be doing some form of stock quote aggregation - is it
possible that the provider has started blocking you?
Simply I need three conditions.
1) Log-in
2) Multiple request
3) Synchronous request ( sequential like 'C' )
I realized 'yield' should be used for multiple request.
But I think 'yield' works differently with 'C' and not sequential.
So I want to use request without 'yield' like below.
But crawl method wasn`t called ordinarily.
How can I call crawl method sequentially like C ?
class HotdaySpider(scrapy.Spider):
name = "hotday"
allowed_domains = ["test.com"]
login_page = "http://www.test.com"
start_urls = ["http://www.test.com"]
maxnum = 27982
runcnt = 10
def parse(self, response):
return [FormRequest.from_response(response,formname='login_form',formdata={'id': 'id', 'password': 'password'}, callback=self.after_login)]
def after_login(self, response):
global maxnum
global runcnt
i = 0
while i < runcnt :
**Request(url="http://www.test.com/view.php?idx=" + str(maxnum) + "/",callback=self.crawl)**
i = i + 1
def crawl(self, response):
global maxnum
filename = 'hotday.html'
with open(filename, 'wb') as f:
f.write(unicode(response.body.decode(response.encoding)).encode('utf-8'))
maxnum = maxnum + 1
When you return a list of requests (that's what you do when you yield many of them) Scrapy will schedule them and you can't control the order in which the responses will come.
If you want to process one response at a time and in order, you would have to return only one request in your after_login method and construct the next request in your crawl method.
def after_login(self, response):
return Request(url="http://www.test.com/view.php?idx=0/", callback=self.crawl)
def crawl(self, response):
global maxnum
global runcnt
filename = 'hotday.html'
with open(filename, 'wb') as f:
f.write(unicode(response.body.decode(response.encoding)).encode('utf-8'))
maxnum = maxnum + 1
next_page = int(re.search('\?idx=(\d*)', response.request.url).group(1)) + 1
if < runcnt:
return Request(url="http://www.test.com/view.php?idx=" + next_page + "/", callback=self.crawl)
I'm building a web app with django. I use postgresql for the db. The app code is getting really messy(my begginer skills being a big factor) and slow, even when I run the app locally.
This is an excerpt of my models.py file:
REPEATS_CHOICES = (
(NEVER, 'Never'),
(DAILY, 'Daily'),
(WEEKLY, 'Weekly'),
(MONTHLY, 'Monthly'),
...some more...
)
class Transaction(models.Model):
name = models.CharField(max_length=30)
type = models.IntegerField(max_length=1, choices=TYPE_CHOICES) # 0 = 'Income' , 1 = 'Expense'
amount = models.DecimalField(max_digits=12, decimal_places=2)
date = models.DateField(default=date.today)
frequency = models.IntegerField(max_length=2, choices=REPEATS_CHOICES)
ends = models.DateField(blank=True, null=True)
active = models.BooleanField(default=True)
category = models.ForeignKey(Category, related_name='transactions', blank=True, null=True)
account = models.ForeignKey(Account, related_name='transactions')
The problem is with date, frequency and ends. With this info I can know all the dates in which transactions occurs and use it to fill a cashflow table. Doing things this way involves creating a lot of structures(dictionaries, lists and tuples) and iterating them a lot. Maybe there is a very simple way of solving this with the actual schema, but I couldn't realize how.
I think that the app would be easier to code if, at the creation of a transaction, I could save all the dates in the db. I don't know if it's possible or if it's a good idea.
I'm reading a book about google app engine and the datastore's multivalued properties. What do you think about this for solving my problem?.
Edit: I didn't know about the PickleField. I'm now reading about it, maybe I could use it to store all the transaction's datetime objects.
Edit2: This is an excerpt of my cashflow2 view(sorry for the horrible code):
def cashflow2(request, account_name="Initial"):
if account_name == "Initial":
uri = "/cashflow/new_account"
return HttpResponseRedirect(uri)
month_info = {}
cat_info = {}
m_y_list = [] # [(month,year),]
trans = []
min, max = [] , []
account = Account.objects.get(name=account_name, user=request.user)
categories = account.categories.all()
for year in range(2006,2017):
for month in range(1,13):
month_info[(month, year)] = [0, 0, 0]
for cat in categories:
cat_info[(cat, month, year)] = 0
previous_months = 1 # previous months from actual
next_months = 5
dates_list = month_year_list(previous_month, next_months) # Returns [(month,year)] from the requested range
m_y_list = [(date.month, date.year) for date in month_year_list(1,5)]
min, max = dates_list[0], dates_list[-1]
INCOME = 0
EXPENSE = 1
ONHAND = 2
transacs_in_dates = []
txs = account.transactions.order_by('date')
for tx in txs:
monthyear = ()
monthyear = (tx.date.month, tx.date.year)
if tx.frequency == 0:
if tx.type == 0:
month_info[monthyear][INCOME] += tx.amount
if tx.category:
cat_info[(tx.category, monthyear[0], monthyear[1])] += tx.amount
else:
month_info[monthyear][EXPENSE] += tx.amount
if tx.category:
cat_info[(tx.category, monthyear[0], monthyear[1])] += tx.amount
if monthyear in lista_m_a:
if tx not in transacs_in_dates:
transacs_in_dates.append(tx)
elif tx.frequency == 4: # frequency = 'Monthly'
months_dif = relativedelta.relativedelta(tx.ends, tx.date).months
if tx.ends.day < tx.date.day:
months_dif += 1
years_dif = relativedelta.relativedelta(tx.ends, tx.date).years
dif = months_dif + (years_dif*12)
dates_range = dif + 1
for i in range(dates_range):
dt = tx.date+relativedelta.relativedelta(months=+i)
if (dt.month, dt.year) in m_y_list:
if tx not in transacs_in_dates:
transacs_in_dates.append(tx)
if tx.type == 0:
month_info[(fch.month,fch.year)][INCOME] += tx.amount
if tx.category:
cat_info[(tx.category, fch.month, fch.year)] += tx.amount
else:
month_info[(fch.month,fch.year)][EXPENSE] += tx.amount
if tx.category:
cat_info[(tx.category, fch.month, fch.year)] += tx.amount
import operator
thelist = []
thelist = sorted((my + tuple(v) for my, v in month_info.iteritems()),
key = operator.itemgetter(1, 0))
thelistlist = []
for atuple in thelist:
thelistlist.append(list(atuple))
for i in range(len(thelistlist)):
if i != 0:
thelistlist[i][4] = thelistlist[i-1][2] - thelistlist[i-1][3] + thelistlist[i-1][4]
list = []
for el in thelistlist:
if (el[0],el[1]) in lista_m_a:
list.append(el)
transactions = account.transactions.all()
cats_in_dates_income = []
cats_in_dates_expense = []
for t in transacs_in_dates:
if t.category and t.type == 0:
if t.category not in cats_in_dates_income:
cats_in_dates_income.append(t.category)
elif t.category and t.type == 1:
if t.category not in cats_in_dates_expense:
cats_in_dates_expense.append(t.category)
cat_infos = []
for k, v in cat_info.items():
cat_infos.append((k[0], k[1], k[2], v))
Depends on how relevant App Engine is here. P.S. If you'd like to store pickled objects as well as JSON objects in the Google Datastore, check out these two code snippets:
http://kovshenin.com/archives/app-engine-json-objects-google-datastore/
http://kovshenin.com/archives/app-engine-python-objects-in-the-google-datastore/
Also note that the Google Datastore is a non-relational database, so you might have other trouble refactoring your code to switch to that.
Cheers and good luck!