I'm beginner Python. I have many PCs connected with network switch. One is manager PC. others are client PCs. I'm going to make remotely file cCopy/delete batch program such as DOS batch command in Python.
any help how to start ?
DOS Batch command
xcopy E:\Share_main\A*.* \124.122.11.101\A\ /e /h /k /Y
Here is sample code.
#!/usr/bin/env python-3.x
# -*- coding: utf-8 -*-
# based on Carnival http://ask.python.kr/users/6970/carnival/
import os, sys, csv, re, datetime
from multiprocessing import Process
class Server:
def __init__(self, addr, path):
self.addr = addr
self.path = path
def multi_distribute_from_buffers(server, dirpath, filenames, subdir, buffers):
l = re.findall(r"[\w']+",subdir)
m = re.findall(r"[\w']+",dirpath)
cnt_l = len(l)
cnt_m = len(m)
remotepath = "//%s/%s" % (server.addr, server.path)
if(cnt_m > cnt_l):
for j in range(cnt_m - cnt_l):
remotepath += "/%s" % (m[cnt_l + j])
index = 0
for filename in filenames:
remotepathfile = "%s/%s" % (remotepath, filename)
with open(remotepathfile, 'wb') as outFile:
outFile.write(buffers[index])
index = index + 1
def make_dir(server_list, subdir, dirpath):
for server in server_list:
l = re.findall(r"[\w']+",subdir)
m = re.findall(r"[\w']+",dirpath)
cnt_l = len(l)
cnt_m = len(m)
path = "//%s/%s" % (server.addr, server.path)
if(cnt_m > cnt_l):
for j in range(cnt_m - cnt_l):
path += "/%s" % (m[cnt_l + j])
d = os.path.dirname(path)
if not os.path.exists(d):
print ("{}, Make dir {}".format(datetime.datetime.now(), d))
os.makedirs(d)
if not os.path.exists(path):
print ("{}, Make dir {}".format(datetime.datetime.now(), path))
os.makedirs(path)
def dist_mems(server_list, subdir):
filecount = 0
for dirpath, dirnames, filenames in os.walk(subdir):
make_dir(server_list, subdir, dirpath)
buffers = []
for filename in filenames:
pathname = os.path.join(dirpath, filename)
print("{}, {} : Read from {}".format(++filecount, datetime.datetime.now(), pathname))
with open(pathname, 'rb') as inFile:
buffers.append(inFile.read())
for server in server_list:
Process(target = multi_distribute_from_buffers,args=(server, dirpath, filenames, subdir, buffers)).start()
def get_server_list(filename):
mydictionary = []
csvFile = csv.reader(open(filename, "r"))
for row in csvFile:
mydictionary.append(Server(row[0], row[1]))
return mydictionary
if __name__ == '__main__':
start = datetime.datetime.now()
clientListfile = 'C:\\Users\\Public\\client_list.csv'
if(sys.argv[1] != ''):
clientListfile = sys.argv[1]
sourceFolder = 'C:\\Users\\Public\\'
if(sys.argv[2] != ''):
sourceFolder = sys.argv[2]
server_list = get_server_list(clientListfile)
dist_mems(server_list, sourceFolder)
end = datetime.datetime.now()
diff = end - start
print(" XCopying Start to {} clients : {}".format(len(server_list), start))
print(" XCopying finished : {}".format(end))
print(" XCopying Total time span : {}".format(diff))
Related
Here I am simply calling a 3rd party API to get the prices of stocks through multiprocessing. I am using this function multiple times as I want the timeframe of stocks as (5 min, 10 min, 30 min). But when I run it, it does not wait for the previous functions to finish and instead move on to the last function to complete it. How to run each and every function in order ?
import pickle
import pandas as pd
import datetime
import multiprocessing
import time
import subprocess,os
def historical_data(timeframe):
global prices
def split_dict_equally(input_dict, chunks=2):
"Splits dict by keys. Returns a list of dictionaries."
# prep with empty dicts
return_list = [dict() for idx in range(chunks)]
idx = 0
for k,v in input_dict.items():
return_list[idx][k] = v
if idx < chunks-1: # indexes start at 0
idx += 1
else:
idx = 0
return return_list
with open('zerodha_login.pkl', 'rb') as file:
# Call load method to deserialze
login_credentials = pickle.load(file)
with open('zerodha_instruments.pkl', 'rb') as file:
# Call load method to deserialze
inst_dict = pickle.load(file)
csv = pd.read_csv('D:\\Business\\Website\\Trendlines\\FO Stocks.csv')
csv['Stocks'] = csv['Stocks'].str.replace(' ','')
fo_stocks = csv['Stocks'].to_list()
inst = pd.DataFrame(inst_dict)
filtered_inst = inst.copy()
filtered_inst = inst[(inst['segment'] == 'NSE') & (inst['name'] != '') & (inst['tick_size'] == 0.05) ]
filtered_inst = filtered_inst[filtered_inst['tradingsymbol'].isin(fo_stocks)]
tickers_dict = dict(zip(filtered_inst['instrument_token'],filtered_inst['tradingsymbol']))
tickers_dict = dict(zip(filtered_inst['instrument_token'],filtered_inst['tradingsymbol']))
number_process = 16
tickers_dict_list = split_dict_equally(tickers_dict,number_process)
def prices(stock):
print('inside_function',os.getpid())
for x,y in stock.items():
print('inside_stock_loop')
while True:
try:
print('Timeframe::',timeframe,y)
data = login_credentials['kite'].historical_data(instrument_token=x, from_date=today_date - datetime.timedelta(days=1000),interval=str(timeframe),to_date=today_date )
df = pd.DataFrame(data)
g = [e for e in df.columns if 'Un' not in e]
df = df[g]
df['date'] = df['date'].astype(str)
df['date'] = df['date'].str.split('+')
df['Date'] = df['date'].str[0]
df = df[['Date','open','high','low','close','volume']]
df['Date'] = pd.to_datetime(df['Date'],format='%Y-%m-%d %H:%M:%S')
df['Time'] = df['Date'].dt.time
df['Date'] = df['Date'].dt.date
df.rename(columns={'open':'Open','high':'High','low':'Low','close':'Close','volume':'Volume'},inplace=True)
df.to_csv('D:\\Business\\Website\\Trendlines\\4th Cut\\Historical data\\'+str(timeframe)+'\\'+str(y)+'.csv')
break
except:
print('Issue ::',y)
pass
new_list = []
if __name__ == '__main__':
for process in tickers_dict_list:
p = multiprocessing.Process(target=prices, args=(process,))
p.start()
new_list.append(p)
for p in new_list:
print('joining_',p)
p.join()
historical_data('5minute')
historical_data('10minute')
I had a company assign me an assignment to implement a fileSystem class to run shell commands through python without using any libraries. Does anyone have any suggestions on how to get started? Not quite sure how to tackle this problem.
Problem:
Implement a FileSystem class using python
Root path is '/'.
Path separator is '/'.
Parent directory is addressable as '..'.
Directory names consist only of English alphabet letters (A-Z and a-z).
All functions should support both relative and absolute paths.
All function parameters are the minimum required/recommended parameters.
Any additional class/function can be added.
What I've worked on so far:
class Path:
def __init__(self, path):
self.current_path = path.split("/")
def cd(self, new_path):
new_split = new_path.split("/")
for i in new_split:
if i == "..":
new_split.pop(0)
self.current_path = self.current_path[:-1]
self.current_path += new_split
def getString(self):
return "/".join(self.current_path)
def pwd(self, path):
return self.current_path
def mkdir():
pass
def rmdir():
pass
#driver code
fs = Path()
fs.mkdir('usr')
fs.cd('usr')
fs.mkdir('local')
fs.cd('local')
return fs.pwd()
So, this is what I came up with. I know I need to clean it up
'''
class Path:
dir_stack = []
def __init__(self):
print("started")
main_dir = {'/': {}}
self.dir_stack.insert( len(self.dir_stack), main_dir)
def getCurrentMap():
global current_Level
current_Level = self.dir_stack[len(self.dir_stack) - 1]
def cd(self, folder):
if(folder == '../'):
self.dir_stack.pop()
current_Level = self.dir_stack[len(self.dir_stack) - 1]
current_Map = current_Level[(list(current_Level.keys())[0])]
print('lev', current_Map)
if folder in current_Map:
print('here')
self.dir_stack.insert(len(self.dir_stack), current_Map)
else:
print ("no existing folder")
def pwd(self):
path = ''
print(self.dir_stack)
for x in self.dir_stack:
path += (list(x.keys())[0]) + '/'
print(path)
def ls(self):
current_Level = self.dir_stack[len(self.dir_stack) - 1]
current_Map = current_Level[(list(current_Level.keys())[0])]
print(current_Map)
def mkdir(self, folder_Name):
current_Level = self.dir_stack[len(self.dir_stack) - 1]
newDir = {folder_Name: {}}
current_Map = current_Level[(list(current_Level.keys())[0])]
if folder_Name in current_Map:
warning = folder_Name + ' already exists in directory'
print(warning)
else:
current_Map.update(newDir)
def rmdir(self, folder_Name):
current_Level = self.dir_stack[len(self.dir_stack) - 1]
#make global var current_Map
current_Map = current_Level[(list(current_Level.keys())[0])]
if folder_Name in current_Map:
del current_Map[folder_Name]
else:
print('folder doesnt exist')
# driver code
fs = Path()
fs.mkdir('usr')
fs.mkdir('new')
fs.mkdir('files')
fs.cd('usr')
fs.mkdir('local')
fs.cd('new')
fs.pwd()
fs.cd('../')
fs.ls()
# fs.mkdir('local')
# fs.cd('local')
fs.pwd()
I have a trained freezed graph that I am trying to run on an ARM device. Basically, I am using contrib/pi_examples/label_image, but with my network instead of Inception. My network was trained with dropout, which now causes me troubles:
Invalid argument: No OpKernel was registered to support Op 'Switch' with these attrs. Registered kernels:
device='CPU'; T in [DT_FLOAT]
device='CPU'; T in [DT_INT32]
device='GPU'; T in [DT_STRING]
device='GPU'; T in [DT_BOOL]
device='GPU'; T in [DT_INT32]
device='GPU'; T in [DT_FLOAT]
[[Node: l_fc1_dropout/cond/Switch = Switch[T=DT_BOOL](is_training_pl, is_training_pl)]]
One solution I can see is to build such TF static library that includes the corresponding operation. From other hand, it might be a better idea to eliminate the dropout ops from the network in order to make it simpler and faster. Is there a way to do that?
Thanks.
#!/usr/bin/env python2
import argparse
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
def print_graph(input_graph):
for node in input_graph.node:
print "{0} : {1} ( {2} )".format(node.name, node.op, node.input)
def strip(input_graph, drop_scope, input_before, output_after, pl_name):
input_nodes = input_graph.node
nodes_after_strip = []
for node in input_nodes:
print "{0} : {1} ( {2} )".format(node.name, node.op, node.input)
if node.name.startswith(drop_scope + '/'):
continue
if node.name == pl_name:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
if new_node.name == output_after:
new_input = []
for node_name in new_node.input:
if node_name == drop_scope + '/cond/Merge':
new_input.append(input_before)
else:
new_input.append(node_name)
del new_node.input[:]
new_node.input.extend(new_input)
nodes_after_strip.append(new_node)
output_graph = graph_pb2.GraphDef()
output_graph.node.extend(nodes_after_strip)
return output_graph
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input-graph', action='store', dest='input_graph')
parser.add_argument('--input-binary', action='store_true', default=True, dest='input_binary')
parser.add_argument('--output-graph', action='store', dest='output_graph')
parser.add_argument('--output-binary', action='store_true', dest='output_binary', default=True)
args = parser.parse_args()
input_graph = args.input_graph
input_binary = args.input_binary
output_graph = args.output_graph
output_binary = args.output_binary
if not tf.gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return
input_graph_def = tf.GraphDef()
mode = "rb" if input_binary else "r"
with tf.gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read().decode("utf-8"), input_graph_def)
print "Before:"
print_graph(input_graph_def)
output_graph_def = strip(input_graph_def, u'l_fc1_dropout', u'l_fc1/Relu', u'prediction/MatMul', u'is_training_pl')
print "After:"
print_graph(output_graph_def)
if output_binary:
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
else:
with tf.gfile.GFile(output_graph, "w") as f:
f.write(text_format.MessageToString(output_graph_def))
print("%d ops in the final graph." % len(output_graph_def.node))
if __name__ == "__main__":
main()
How about this as a more general solution:
for node in temp_graph_def.node:
for idx, i in enumerate(node.input):
input_clean = node_name_from_input(i)
if input_clean.endswith('/cond/Merge') and input_clean.split('/')[-3].startswith('dropout'):
identity = node_from_map(input_node_map, i).input[0]
assert identity.split('/')[-1] == 'Identity'
parent = node_from_map(input_node_map, node_from_map(input_node_map, identity).input[0])
pred_id = parent.input[1]
assert pred_id.split('/')[-1] == 'pred_id'
good = parent.input[0]
node.input[idx] = good
I am trying to download files from a ftp server and importing the data to django. So i created a list contain server address,login details,path,file name,and the path where the file to be download and pass to a function which do downloading. it is working file in my sytem when move it to client server it showing error like
" error downloading C_VAR1_31012014_1.DAT - [Errno 20] Not a directory: 'common/VARRate/C_VAR1_31012014_1.DAT"
this is how the list look like
self.fileDetails = {
'NSE FO VAR RATE FILE': ('ftp.xxx.com', username, passwd, 'common/VARRate', 'C_VAR1_\d{4}201[45]_\d{1}.DAT', 'Data/samba/Ftp/Capex10/NSECM/VAR RATE'),
}
for fileType in self.fileDetails:
self.ftpDownloadFiles(fileType)
This details will pass to the function following function
def ftpDownloadFiles(self, fileType):
logging.info('Started ' + str(fileType))
try:
ftpclient = ftplib.FTP(self.fileDetails[fileType][FDTL_SRV_POS],
self.fileDetails[fileType][FDTL_USR_POS],
self.fileDetails[fileType][FDTL_PSWD_POS],
timeout=120)
#ftpclient.set_debuglevel(2)
ftpclient.set_pasv(True)
logging.info('Logged in to ' + self.fileDetails[fileType][FDTL_SRV_POS] +\
time.asctime())
logging.info('\tfor type: '+ fileType)
except BaseException as e:
print e
return
remotepath = self.fileDetails[fileType][FDTL_PATH_POS]
#matched, unmatched, downloaded = 0
try:
ftpclient.cwd(remotepath)
ftpclient.dir(filetimestamps.append)
except BaseException as e:
logging.info('\tchange dir error : ' + remotepath + ' ' +\
e.__str__())
self.walkTree(ftpclient, remotepath, fileType)
#logging.info('\n\tMatched %d, Unmatched %d, Downloaded %d'
# % (matched, unmatched, downloaded))
ftpclient.close()
From here it will call next function here the download process will start
def walkTree(self, ftpclient, remotepath, fileType):
# process files inside remotepath; cwd already done
# remotepath to be created if it doesnt exist locally
copied=matched=downloaded=imported = 0
files = ftpclient.nlst()
localpath = self.fileDetails[fileType][FDTL_DSTPATH_POS]
rexpCompiled = re.compile(self.fileDetails[fileType][FDTL_PATRN_POS])
for eachFile in files:
try:
ftpclient.cwd(remotepath+'/'+eachFile)
self.walkTree(ftpclient, remotepath+'/'+eachFile+'/', fileType)
except ftplib.error_perm: # not a folder, process the file
# every file to be saved in same local folder as on ftp srv
saveFolder = remotepath
saveTo = remotepath + '/' + eachFile
if not os.path.exists(saveFolder):
try:
os.makedirs(saveFolder)
print "directory created"
except BaseException as e:
logging.info('\tcreating %s : %s' % (saveFolder, e.__str__()))
if (not os.path.exists(saveTo)):
try:
ftpclient.retrbinary('RETR ' + eachFile, open(saveTo, 'wb').write)
#logging.info('\tdownloaded ' + saveTo)
downloaded += 1
except BaseException as e:
logging.info('\terror downloading %s - %s' % (eachFile, e.__str__()))
except ftplib.error_perm:
logging.info('\terror downloading %s - %s' % (eachFile, ftplib.error_perm))
elif (fileType == 'NSE CASH CLOSING FILE'): # spl case if file exists
try:
# rename file
yr = int(time.strftime('%Y')) - 1
os.rename(saveTo, saveTo + str(yr))
# download it
ftpclient.retrbinary('RETR ' + eachFile, open(saveTo, 'wb').write)
downloaded += 1
except BaseException as e:
logging.info('\terror rename/ download %s - %s' % (eachFile, e.__str__()))
Can any one help me to resolve this problem
Try to use os.path.join() in stead of the hardcoded slashes as path dividers for the os to download to. / or \ depends of the local os.
e.g. in your code:
saveTo = remotepath + '/' + eachFile
would become:
saveTo = os.path.join(remotepath,eachFile)
see https://docs.python.org/2/library/os.path.html
In Waf how can I create multiple custom tasks, that can run parallel (with --jobs=JOBS)?
Sources = ["C:\\src1.c", "C:\\Mod1\src2.c", ... 30pcs] # one per call
Incl_Paths = ["Mod1". "Mod1"] # list all of them in all call
INCL_ST = "-I%s" # how to format an include path in an argument
Ext_out = "_loc" # output file extension
The goal:
C:\\LOC.exe -IMod1 -IMod2 C:\\src1.c > build\\src1.c_loc //or better src1_loc
C:\\LOC.exe -IMod1 -IMod2 C:\\Mod1\src2.c > build\\src2.c_loc //or better src2_loc
...
I couldn't get it work
def build(bld):
for i in Sources:
bld.new_task_gen(
source = i,
rule='C:\\LOC.exe ${INCL_ST:Incl_Paths} ${SRC} > ' + i + Ext_out,
)
Also I couldn't extract the exe
# find_program(self, filename, path_list=[], var=None, environ=None, exts=''):
cfg.find_program("C:\\LOC.exe", var='LOC')
To change from:
rule='C:\\LOC.exe ...'
To:
rule='${LOC} ...'
Something like this should work with waf 1.7:
from waflib.Task import Task
from waflib.TaskGen import extension
Ext_out = "_loc" # output file extension
def configure(conf):
# loc.exe must be in the system path for this to work
conf.find_program(
'loc',
var = "LOC",
)
conf.env.Incl_Paths = ["Mod1", "Mod1"]
conf.env.INCL_ST = "-I%s"
#extension('.c')
def process_loc(self, node):
out_node = node.change_ext(Ext_out)
tsk = self.create_task('loc')
tsk.set_inputs(node)
tsk.set_outputs(out_node)
class loc_task(Task):
ext_in = ['.c']
ext_out = ['_loc']
run_str = "${LOC} ${INCL_ST:Incl_Paths} ${SRC} > ${TGT}"
def build(bld):
bld(source = ["src1.c", "src2.c"])
Well it works for me on linux faking loc ...