Implement FileSystem - filesystems

I had a company assign me an assignment to implement a fileSystem class to run shell commands through python without using any libraries. Does anyone have any suggestions on how to get started? Not quite sure how to tackle this problem.
Problem:
Implement a FileSystem class using python
Root path is '/'.
Path separator is '/'.
Parent directory is addressable as '..'.
Directory names consist only of English alphabet letters (A-Z and a-z).
All functions should support both relative and absolute paths.
All function parameters are the minimum required/recommended parameters.
Any additional class/function can be added.
What I've worked on so far:
class Path:
def __init__(self, path):
self.current_path = path.split("/")
def cd(self, new_path):
new_split = new_path.split("/")
for i in new_split:
if i == "..":
new_split.pop(0)
self.current_path = self.current_path[:-1]
self.current_path += new_split
def getString(self):
return "/".join(self.current_path)
def pwd(self, path):
return self.current_path
def mkdir():
pass
def rmdir():
pass
#driver code
fs = Path()
fs.mkdir('usr')
fs.cd('usr')
fs.mkdir('local')
fs.cd('local')
return fs.pwd()

So, this is what I came up with. I know I need to clean it up
'''
class Path:
dir_stack = []
def __init__(self):
print("started")
main_dir = {'/': {}}
self.dir_stack.insert( len(self.dir_stack), main_dir)
def getCurrentMap():
global current_Level
current_Level = self.dir_stack[len(self.dir_stack) - 1]
def cd(self, folder):
if(folder == '../'):
self.dir_stack.pop()
current_Level = self.dir_stack[len(self.dir_stack) - 1]
current_Map = current_Level[(list(current_Level.keys())[0])]
print('lev', current_Map)
if folder in current_Map:
print('here')
self.dir_stack.insert(len(self.dir_stack), current_Map)
else:
print ("no existing folder")
def pwd(self):
path = ''
print(self.dir_stack)
for x in self.dir_stack:
path += (list(x.keys())[0]) + '/'
print(path)
def ls(self):
current_Level = self.dir_stack[len(self.dir_stack) - 1]
current_Map = current_Level[(list(current_Level.keys())[0])]
print(current_Map)
def mkdir(self, folder_Name):
current_Level = self.dir_stack[len(self.dir_stack) - 1]
newDir = {folder_Name: {}}
current_Map = current_Level[(list(current_Level.keys())[0])]
if folder_Name in current_Map:
warning = folder_Name + ' already exists in directory'
print(warning)
else:
current_Map.update(newDir)
def rmdir(self, folder_Name):
current_Level = self.dir_stack[len(self.dir_stack) - 1]
#make global var current_Map
current_Map = current_Level[(list(current_Level.keys())[0])]
if folder_Name in current_Map:
del current_Map[folder_Name]
else:
print('folder doesnt exist')
# driver code
fs = Path()
fs.mkdir('usr')
fs.mkdir('new')
fs.mkdir('files')
fs.cd('usr')
fs.mkdir('local')
fs.cd('new')
fs.pwd()
fs.cd('../')
fs.ls()
# fs.mkdir('local')
# fs.cd('local')
fs.pwd()

Related

Wipe out dropout operations from TensorFlow graph

I have a trained freezed graph that I am trying to run on an ARM device. Basically, I am using contrib/pi_examples/label_image, but with my network instead of Inception. My network was trained with dropout, which now causes me troubles:
Invalid argument: No OpKernel was registered to support Op 'Switch' with these attrs. Registered kernels:
device='CPU'; T in [DT_FLOAT]
device='CPU'; T in [DT_INT32]
device='GPU'; T in [DT_STRING]
device='GPU'; T in [DT_BOOL]
device='GPU'; T in [DT_INT32]
device='GPU'; T in [DT_FLOAT]
[[Node: l_fc1_dropout/cond/Switch = Switch[T=DT_BOOL](is_training_pl, is_training_pl)]]
One solution I can see is to build such TF static library that includes the corresponding operation. From other hand, it might be a better idea to eliminate the dropout ops from the network in order to make it simpler and faster. Is there a way to do that?
Thanks.
#!/usr/bin/env python2
import argparse
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
def print_graph(input_graph):
for node in input_graph.node:
print "{0} : {1} ( {2} )".format(node.name, node.op, node.input)
def strip(input_graph, drop_scope, input_before, output_after, pl_name):
input_nodes = input_graph.node
nodes_after_strip = []
for node in input_nodes:
print "{0} : {1} ( {2} )".format(node.name, node.op, node.input)
if node.name.startswith(drop_scope + '/'):
continue
if node.name == pl_name:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
if new_node.name == output_after:
new_input = []
for node_name in new_node.input:
if node_name == drop_scope + '/cond/Merge':
new_input.append(input_before)
else:
new_input.append(node_name)
del new_node.input[:]
new_node.input.extend(new_input)
nodes_after_strip.append(new_node)
output_graph = graph_pb2.GraphDef()
output_graph.node.extend(nodes_after_strip)
return output_graph
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input-graph', action='store', dest='input_graph')
parser.add_argument('--input-binary', action='store_true', default=True, dest='input_binary')
parser.add_argument('--output-graph', action='store', dest='output_graph')
parser.add_argument('--output-binary', action='store_true', dest='output_binary', default=True)
args = parser.parse_args()
input_graph = args.input_graph
input_binary = args.input_binary
output_graph = args.output_graph
output_binary = args.output_binary
if not tf.gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return
input_graph_def = tf.GraphDef()
mode = "rb" if input_binary else "r"
with tf.gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read().decode("utf-8"), input_graph_def)
print "Before:"
print_graph(input_graph_def)
output_graph_def = strip(input_graph_def, u'l_fc1_dropout', u'l_fc1/Relu', u'prediction/MatMul', u'is_training_pl')
print "After:"
print_graph(output_graph_def)
if output_binary:
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
else:
with tf.gfile.GFile(output_graph, "w") as f:
f.write(text_format.MessageToString(output_graph_def))
print("%d ops in the final graph." % len(output_graph_def.node))
if __name__ == "__main__":
main()
How about this as a more general solution:
for node in temp_graph_def.node:
for idx, i in enumerate(node.input):
input_clean = node_name_from_input(i)
if input_clean.endswith('/cond/Merge') and input_clean.split('/')[-3].startswith('dropout'):
identity = node_from_map(input_node_map, i).input[0]
assert identity.split('/')[-1] == 'Identity'
parent = node_from_map(input_node_map, node_from_map(input_node_map, identity).input[0])
pred_id = parent.input[1]
assert pred_id.split('/')[-1] == 'pred_id'
good = parent.input[0]
node.input[idx] = good

How does field renaming works on django migrations?

Django migration can detect if a field was renamed and ask you about it (instead of the old fashion delete/create)
Even if multiple fields are changed it seems to find the corresponding match. For example:
Before:
class DirectoryMirror(models.Model):
directory_origin = models.ForeignKey(TapeDirectory)
machine_target = models.ForeignKey(GenericMachine)
directory_target = models.CharField(max_length=255, blank=False)
After (changing field names):
class DirectoryMirror(models.Model):
source_directory = models.ForeignKey(TapeDirectory)
target_machine = models.ForeignKey(GenericMachine)
target_directory = models.CharField(max_length=255, blank=False)
Generating migration:
$ ./manage.py makemigrations
Did you rename directorymirror.directory_origin to directorymirror.source_directory (a ForeignKey)? [y/N] y
Did you rename directorymirror.directory_target to directorymirror.target_directory (a CharField)? [y/N] y
Did you rename directorymirror.machine_target to directorymirror.target_machine (a ForeignKey)? [y/N] y
How does it manage to detect the renaming and find the correct match?
Here it is the algorithm https://github.com/django/django/blob/bc77eb6d0858652e197c08c299efaeb06c51efee/django/db/migrations/autodetector.py#L757
Copying it here
def generate_renamed_fields(self):
"""
Works out renamed fields
"""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name))
if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
if old_field_dec == field_dec:
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
break

Waf:Create custom parallel tasks

In Waf how can I create multiple custom tasks, that can run parallel (with --jobs=JOBS)?
Sources = ["C:\\src1.c", "C:\\Mod1\src2.c", ... 30pcs] # one per call
Incl_Paths = ["Mod1". "Mod1"] # list all of them in all call
INCL_ST = "-I%s" # how to format an include path in an argument
Ext_out = "_loc" # output file extension
The goal:
C:\\LOC.exe -IMod1 -IMod2 C:\\src1.c > build\\src1.c_loc //or better src1_loc
C:\\LOC.exe -IMod1 -IMod2 C:\\Mod1\src2.c > build\\src2.c_loc //or better src2_loc
...
I couldn't get it work
def build(bld):
for i in Sources:
bld.new_task_gen(
source = i,
rule='C:\\LOC.exe ${INCL_ST:Incl_Paths} ${SRC} > ' + i + Ext_out,
)
Also I couldn't extract the exe
# find_program(self, filename, path_list=[], var=None, environ=None, exts=''):
cfg.find_program("C:\\LOC.exe", var='LOC')
To change from:
rule='C:\\LOC.exe ...'
To:
rule='${LOC} ...'
Something like this should work with waf 1.7:
from waflib.Task import Task
from waflib.TaskGen import extension
Ext_out = "_loc" # output file extension
def configure(conf):
# loc.exe must be in the system path for this to work
conf.find_program(
'loc',
var = "LOC",
)
conf.env.Incl_Paths = ["Mod1", "Mod1"]
conf.env.INCL_ST = "-I%s"
#extension('.c')
def process_loc(self, node):
out_node = node.change_ext(Ext_out)
tsk = self.create_task('loc')
tsk.set_inputs(node)
tsk.set_outputs(out_node)
class loc_task(Task):
ext_in = ['.c']
ext_out = ['_loc']
run_str = "${LOC} ${INCL_ST:Incl_Paths} ${SRC} > ${TGT}"
def build(bld):
bld(source = ["src1.c", "src2.c"])
Well it works for me on linux faking loc ...

'Model is not immutable' TypeError

I am getting this traceback;
--- Trimmed parts ---
File "C:\Users\muhammed\Desktop\gifdatabase\gifdatabase.py", line 76, in maketransaction
gif.tags = list(set(gif.tags + tags))
File "C:\Program Files (x86)\Google\google_appengine\google\appengine\ext\ndb\model.py", line 2893, in __hash__
raise TypeError('Model is not immutable')
TypeError: Model is not immutable
Here is related parts of my code;
class Gif(ndb.Model):
author = ndb.UserProperty()
#tags = ndb.StringProperty(repeated=True)
tags = ndb.KeyProperty(repeated=True)
#classmethod
def get_by_tag(cls,tag_name):
return cls.query(cls.tags == ndb.Key(Tag, tag_name)).fetch()
class Tag(ndb.Model):
gif_count = ndb.IntegerProperty()
class PostGif(webapp2.RequestHandler):
def post(self):
user = users.get_current_user()
if user is None:
self.redirect(users.create_login_url("/static/submit.html"))
return
link = self.request.get('gif_link')
tag_names = shlex.split(self.request.get('tags').lower())
#ndb.transactional(xg=True)
def maketransaction():
tags = [Tag.get_or_insert(tag_name) for tag_name in tag_names]
gif = Gif.get_or_insert(link)
if not gif.author: # first time submission
gif.author = user
gif.tags = list(set(gif.tags + tags))
gif.put()
for tag in tags:
tag.gif_count += 1
tag.put()
if validate_link(link) and tag_names:
maketransaction()
self.redirect('/static/submit_successful.html')
else:
self.redirect('/static/submit_fail.html')
What is the problem with gif.tags = list(set(gif.tags + tags)) line?
You are inserting tags instead of keys, you need to access
tags = [Tag.get_or_insert(tag_name).key .....]
but you can also make this a single network hop like this
futures = [Tag.get_or_insert_async(tag_name) for tag_name in tag_names]
futures.append(Gif.get_or_insert_async(link))
ndb.Future.wait_all(futures)
gif = futures.pop().get_result()
tags = [future.get_result() for future in futures]
but that's not really the question just a suggestion ^, for clearer answer with .key is
gif.tags = gif.tags + [tag.key for tag in tags]
# or
gif.tags.extend([tag.key for tag in tags])

How to make remotely file xCopy/delete batch program with Python

I'm beginner Python. I have many PCs connected with network switch. One is manager PC. others are client PCs. I'm going to make remotely file cCopy/delete batch program such as DOS batch command in Python.
any help how to start ?
DOS Batch command
xcopy E:\Share_main\A*.* \124.122.11.101\A\ /e /h /k /Y
Here is sample code.
#!/usr/bin/env python-3.x
# -*- coding: utf-8 -*-
# based on Carnival http://ask.python.kr/users/6970/carnival/
import os, sys, csv, re, datetime
from multiprocessing import Process
class Server:
def __init__(self, addr, path):
self.addr = addr
self.path = path
def multi_distribute_from_buffers(server, dirpath, filenames, subdir, buffers):
l = re.findall(r"[\w']+",subdir)
m = re.findall(r"[\w']+",dirpath)
cnt_l = len(l)
cnt_m = len(m)
remotepath = "//%s/%s" % (server.addr, server.path)
if(cnt_m > cnt_l):
for j in range(cnt_m - cnt_l):
remotepath += "/%s" % (m[cnt_l + j])
index = 0
for filename in filenames:
remotepathfile = "%s/%s" % (remotepath, filename)
with open(remotepathfile, 'wb') as outFile:
outFile.write(buffers[index])
index = index + 1
def make_dir(server_list, subdir, dirpath):
for server in server_list:
l = re.findall(r"[\w']+",subdir)
m = re.findall(r"[\w']+",dirpath)
cnt_l = len(l)
cnt_m = len(m)
path = "//%s/%s" % (server.addr, server.path)
if(cnt_m > cnt_l):
for j in range(cnt_m - cnt_l):
path += "/%s" % (m[cnt_l + j])
d = os.path.dirname(path)
if not os.path.exists(d):
print ("{}, Make dir {}".format(datetime.datetime.now(), d))
os.makedirs(d)
if not os.path.exists(path):
print ("{}, Make dir {}".format(datetime.datetime.now(), path))
os.makedirs(path)
def dist_mems(server_list, subdir):
filecount = 0
for dirpath, dirnames, filenames in os.walk(subdir):
make_dir(server_list, subdir, dirpath)
buffers = []
for filename in filenames:
pathname = os.path.join(dirpath, filename)
print("{}, {} : Read from {}".format(++filecount, datetime.datetime.now(), pathname))
with open(pathname, 'rb') as inFile:
buffers.append(inFile.read())
for server in server_list:
Process(target = multi_distribute_from_buffers,args=(server, dirpath, filenames, subdir, buffers)).start()
def get_server_list(filename):
mydictionary = []
csvFile = csv.reader(open(filename, "r"))
for row in csvFile:
mydictionary.append(Server(row[0], row[1]))
return mydictionary
if __name__ == '__main__':
start = datetime.datetime.now()
clientListfile = 'C:\\Users\\Public\\client_list.csv'
if(sys.argv[1] != ''):
clientListfile = sys.argv[1]
sourceFolder = 'C:\\Users\\Public\\'
if(sys.argv[2] != ''):
sourceFolder = sys.argv[2]
server_list = get_server_list(clientListfile)
dist_mems(server_list, sourceFolder)
end = datetime.datetime.now()
diff = end - start
print(" XCopying Start to {} clients : {}".format(len(server_list), start))
print(" XCopying finished : {}".format(end))
print(" XCopying Total time span : {}".format(diff))

Resources