def __init__(self):
self.bok = {'peter': 123, 'maria': 321, 'harry': 888}
def save(self):
file = open('test.txt', 'w')
file.write(str(self.bok))
file.close()
print "File was successfully saved."
def load(self):
try:
file = open('test.txt', 'r')
newdict = eval(file.read())
self.bok = newdict
file.close()
print "File was successfully loaded."
except IOError:
print "No file was found."
return
How do i make it look like this inside the text file:
value1:key1
value2:key2
==>
123:peter
321:maria
Currently it looks like normal, a dictionary:
{'peter': 123, 'maria': 321, 'harry': 888}
But the issue that would appear is to load the file since it doesn't look like a dictionary anymore?
(Load = loads the dictionary from a .txt file to self.bok{})
(Save = saves the dictionary to a .txt file)
import csv
def save(self, filepath):
with open(filepath, 'w') as fout:
outfile = csv.writer(fout, delimiter=":")
for k,v in self.bok.iteritems():
outfile.writerow([v,k])
def load(self, filepath):
if not os.path.isfile(filepath):
print "No file was found"
return
with open(filepath) as infile:
for v,k in csv.reader(infile, delimiter=":"):
self.bok[k] = v
Without any help from csv:
def save(self, filepath):
with open(filepath, 'w') as outfile:
for k,v in self.bok.iteritems():
outfile.write("{}:{}\n".format(v,k))
def load(self, filepath):
if not os.path.isfile(filepath):
print "No file was found"
return
with open(filepath) as infile:
for line in infile:
v,k = line.strip().split(":")
self.bok[k] = v
Related
I have the following class for loading and converting my images into train and test arrays for a deep learning model in Tensorflow 2.
The images are in three folders, named 'Car', 'Cat' and 'Man', which are within the Train and Test main folders. Each image is of 300 x 400 pixels.
import os
import pickle
import cv2
import numpy as np
os.getcwd()
out: 'C:\\Users\\me\\Jupiter_Notebooks\\Dataset_Thermal\\SeekThermal'
path_train = "../SeekThermal/Train"
path_test = "../SeekThermal/Test"
class MasterImage(object):
def __init__(self,PATH='', IMAGE_SIZE = 50):
self.PATH = PATH
self.IMAGE_SIZE = IMAGE_SIZE
self.image_data = []
self.x_data = []
self.y_data = []
self.CATEGORIES = []
# This will get List of categories
self.list_categories = []
def get_categories(self):
for path in os.listdir(self.PATH):
if '.DS_Store' in path:
pass
else:
self.list_categories.append(path)
print("Found Categories ",self.list_categories,'\n')
return self.list_categories
def process_image(self):
try:
"""
Return Numpy array of image
:return: X_Data, Y_Data
"""
self.CATEGORIES = self.get_categories()
for categories in self.CATEGORIES: # Iterate over categories
train_folder_path = os.path.join(self.PATH, categories) # Folder Path
class_index = self.CATEGORIES.index(categories) # this will get index for classification
for img in os.listdir(train_folder_path): # This will iterate in the Folder
new_path = os.path.join(train_folder_path, img) # image Path
try: # if any image is corrupted
image_data_temp = cv2.imread(new_path) # Read Image as numbers
image_temp_resize = cv2.resize(image_data_temp,(self.IMAGE_SIZE,self.IMAGE_SIZE))
self.image_data.append([image_temp_resize,class_index])
random.shuffle(self.image_data)
except:
pass
data = np.asanyarray(self.image_data) # or, data = np.asanyarray(self.image_data,dtype=object)
# Iterate over the Data
for x in data:
self.x_data.append(x[0]) # Get the X_Data
self.y_data.append(x[1]) # get the label
X_Data = np.asarray(self.x_data) / (255.0) # Normalize Data
Y_Data = np.asarray(self.y_data)
# reshape x_Data
X_Data = X_Data.reshape(-1, self.IMAGE_SIZE, self.IMAGE_SIZE, 3)
return X_Data, Y_Data
except:
print("Failed to run Function Process Image ")
def pickle_image(self):
"""
:return: None Creates a Pickle Object of DataSet
"""
# Call the Function and Get the Data
X_Data,Y_Data = self.process_image()
# Write the Entire Data into a Pickle File
pickle_out = open('X_Data','wb')
pickle.dump(X_Data, pickle_out)
pickle_out.close()
# Write the Y Label Data
pickle_out = open('Y_Data', 'wb')
pickle.dump(Y_Data, pickle_out)
pickle_out.close()
print("Pickled Image Successfully ")
return X_Data,Y_Data
def load_dataset(self):
try:
# Read the Data from Pickle Object
X_Temp = open('..\SeekThermal\X_Data','rb')
X_Data = pickle.load(X_Temp)
Y_Temp = open('..\SeekThermal\Y_Data','rb')
Y_Data = pickle.load(Y_Temp)
print('Reading Dataset from Pickle Object')
return X_Data,Y_Data
except:
print('Could not Found Pickle File ')
print('Loading File and Dataset ..........')
X_Data,Y_Data = self.pickle_image()
return X_Data,Y_Data
I dont understand what the problem is with the pickle file, because just last week I able to create these arrays successfully with the same code??
Is there an easier way to load images in Tensorflow rather than through the custom defined class?
a = MasterImage(PATH = path_train,IMAGE_SIZE = 224)
a.process_image()
out:
it produces an array with a warning.
VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
data = np.asanyarray(self.image_data)
a.pickle_image()
out:
TypeError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_1692\507657192.py in <cell line: 1>()
----> 1 a.pickle_image()
~\AppData\Local\Temp\ipykernel_1692\1410849712.py in pickle_image(self)
71 """
72 # Call the Function and Get the Data
---> 73 X_Data,Y_Data = self.process_image()
74
75 # Write the Entire Data into a Pickle File
TypeError: cannot unpack non-iterable NoneType object
a.load_dataset()
out:
Could not Found Pickle File
Loading File and Dataset ..........
Found Categories ['Car', 'Cat', 'Man', 'Car', 'Cat', 'Man']
Pickled Image Successfully
I'm running Python 3.8.8 via anaconda on Windows 10. Thank you for any advice.
The code I have at the moment doesn't accompany for all users as the json file which it stores the "coins" in only saves it as userid. This is the code for storing the coins in the json file:
#client.command()
async def Shibaku1(ctx, coin1, coin2, coin3, coin4, coin5, coin6):
with open('Shibaku1.json', 'r') as f:
coins_data = json.load(f)
coins_data['userid'] = (coin1, coin2, coin3, coin4, coin5, coin6)
with open('Shibaku1.json', 'w') as f:
json.dump(coins_data, f)
Example of what gets stored in the json file:
{"userid": [":Helicopter:", ":Skateboard1:", ":swords:", ":mace:", ":mace:", ":mangosteen:"]}
How do I make it so it stores a different set for each user?
Replace coins_data['userid'] with coins_data[str(ctx.author.id)].
Then your json file will look like this:
{"123456789": [":Helicopter:", ":Skateboard1:", ":swords:", ":mace:", ":mace:", ":mangosteen:"]}
I'm adapting a script.py to achieve transfer learning. I find many script to retrain a model by TFRecord files, but none of them worked for me bacause of something about TF2.0 and contrib, so I'm trying to convert a script to adapt to TF2 and to my model.
This is my script at the moment:
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
EPOCHS = 1
# Data preprocessing
import pathlib
#data_dir = tf.keras.utils.get_file(origin="/home/pi/venv/raccoon_dataset/", fname="raccoons_dataset")
#data_dir = pathlib.Path(data_dir)
data_dir = "/home/pi/.keras/datasets/ssd_mobilenet_v1_coco_2018_01_28/saved_model/saved_model.pb"
######################
# Read the TFRecords #
######################
def imgs_input_fn(filenames, perform_shuffle=False, repeat_count=1, batch_size=1):
def _parse_function(serialized):
features = \
{
'image': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64)
}
# Parse the serialized data so we get a dict with our data.
parsed_example = tf.io.parse_single_example(serialized=serialized,
features=features)
print("\nParsed example:\n", parsed_example, "\nEnd of parsed example:\n")
# Get the image as raw bytes.
image_shape = tf.stack([300, 300, 3])
image_raw = parsed_example['image']
label = tf.cast(parsed_example['label'], tf.float32)
# Decode the raw bytes so it becomes a tensor with type.
image = tf.io.decode_raw(image_raw, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, image_shape)
#image = tf.subtract(image, 116.779) # Zero-center by mean pixel
#image = tf.reverse(image, axis=[2]) # 'RGB'->'BGR'
d = dict(zip(["image"], [image])), [label]
return d
dataset = tf.data.TFRecordDataset(filenames=filenames)
# Parse the serialized data in the TFRecords files.
# This returns TensorFlow tensors for the image and labels.
#print("\nDataset before parsing:\n",dataset,"\n")
dataset = dataset.map(_parse_function)
#print("\nDataset after parsing:\n",dataset,"\n")
if perform_shuffle:
# Randomizes input using a window of 256 elements (read into memory)
dataset = dataset.shuffle(buffer_size=256)
dataset = dataset.repeat(repeat_count) # Repeats dataset this # times
dataset = dataset.batch(batch_size) # Batch size to use
print("\nDataset batched:\n", dataset, "\nEnd dataset\n")
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
print("\nIterator shape:\n", tf.compat.v1.data.get_output_shapes(iterator),"\nEnd\n")
#print("\nIterator:\n",iterator.get_next(),"\nEnd Iterator\n")
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
raw_train = tf.compat.v1.estimator.TrainSpec(input_fn=imgs_input_fn(
"/home/pi/venv/raccoon_dataset/data/train.record",
perform_shuffle=True,
repeat_count=5,
batch_size=20),
max_steps=1)
and this is the resulting screen:
Parsed example:
{'image': <tf.Tensor 'ParseSingleExample/ParseSingleExample:0' shape=() dtype=string>, 'label': <tf.Tensor 'ParseSingleExample/ParseSingleExample:1' shape=() dtype=int64>}
End of parsed example:
Dataset batched:
<BatchDataset shapes: ({image: (None, 300, 300, 3)}, (None, 1)), types: ({image: tf.float32}, tf.float32)>
End dataset
Iterator shape:
({'image': TensorShape([None, 300, 300, 3])}, TensorShape([None, 1]))
End
2019-11-20 14:01:14.493817: W tensorflow/core/framework/op_kernel.cc:1622] OP_REQUIRES failed at example_parsing_ops.cc:240 : Invalid argument: Feature: image (data type: string) is required but could not be found.
2019-11-20 14:01:14.495019: W tensorflow/core/framework/op_kernel.cc:1622] OP_REQUIRES failed at iterator_ops.cc:929 : Invalid argument: {{function_node __inference_Dataset_map__parse_function_27}} Feature: image (data type: string) is required but could not be found.
[[{{node ParseSingleExample/ParseSingleExample}}]]
Traceback (most recent call last):
File "transfer_learning.py", line 127, in <module>
batch_size=20),
File "transfer_learning.py", line 107, in imgs_input_fn
batch_features, batch_labels = iterator.get_next()
File "/home/pi/venv/lib/python3.7/site-packages/tensorflow_core/python/data/ops/iterator_ops.py", line 737, in get_next
return self._next_internal()
File "/home/pi/venv/lib/python3.7/site-packages/tensorflow_core/python/data/ops/iterator_ops.py", line 651, in _next_internal
output_shapes=self._flat_output_shapes)
File "/home/pi/venv/lib/python3.7/site-packages/tensorflow_core/python/ops/gen_dataset_ops.py", line 2673, in iterator_get_next_sync
_six.raise_from(_core._status_to_exception(e.code, message), None)
File "<string>", line 3, in raise_from
tensorflow.python.framework.errors_impl.InvalidArgumentError: {{function_node __inference_Dataset_map__parse_function_27}} Feature: image (data type: string) is required but could not be found.
[[{{node ParseSingleExample/ParseSingleExample}}]] [Op:IteratorGetNextSync]
I don't know what I'm doing wrong.
Good day,
I'm getting the following error: is not JSON serializable
but I'm not sure why am I getting it. Everything was working fine until I decided to start making use of sessions to fire up my user cart adding and removal of items
This is my view:
def add_or_update_cart(request, slug):
request.session.set_expiry(180)
new_total = 0.00
try:
# check that session exists
the_cart_id = request.session['cart_id']
except:
new_cart_id = Cart()
new_cart_id.save()
request.session['cart_id'] = new_cart_id
the_cart_id = new_cart_id.id
cart = Cart.objects.get(id=the_cart_id)
try:
product = Product.objects.get(slug=slug)
except Product.DoesNotExist:
pass
except:
pass
if not product in cart.products.all():
cart.products.add(product)
else:
cart.products.remove(product)
for item in cart.products.all():
new_total += float(item.price)
request.session['items_total'] = cart.products.count()
cart.total = new_total
cart.save()
print(cart.products.count())
return HttpResponseRedirect(reverse('cart:cart'))
Models:
class Cart(models.Model):
products = models.ManyToManyField(Product, null=True, blank=True)
total = models.DecimalField(max_digits=100, decimal_places=2, default=0.00)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
cart_status = models.BooleanField(default=False)
def __str__(self):
return '%s' % self.id
def item_name(self):
return " ".join([str(p) for p in self.product.all()])
and in my template:
<li role="presentation">Cart <span class="badge">{{ request.session.items_total }}</span></li>
The traceback:
Internal Server Error: /my-cart/puma/
Traceback (most recent call last):
File "/home/drcongo/.virtualenvs/eCommerce/lib/python3.4/site-packages/django/core/handlers/base.py", line 235, in get_response
response = middleware_method(request, response)
File "/home/drcongo/.virtualenvs/eCommerce/lib/python3.4/site-packages/django/contrib/sessions/middleware.py", line 50, in process_response
request.session.save()
File "/home/drcongo/.virtualenvs/eCommerce/lib/python3.4/site-packages/django/contrib/sessions/backends/db.py", line 82, in save
obj = self.create_model_instance(data)
File "/home/drcongo/.virtualenvs/eCommerce/lib/python3.4/site-packages/django/contrib/sessions/backends/db.py", line 68, in create_model_instance
session_data=self.encode(data),
File "/home/drcongo/.virtualenvs/eCommerce/lib/python3.4/site-packages/django/contrib/sessions/backends/base.py", line 88, in encode
serialized = self.serializer().dumps(session_dict)
File "/home/drcongo/.virtualenvs/eCommerce/lib/python3.4/site-packages/django/core/signing.py", line 95, in dumps
return json.dumps(obj, separators=(',', ':')).encode('latin-1')
File "/usr/lib/python3.4/json/__init__.py", line 237, in dumps
**kw).encode(obj)
File "/usr/lib/python3.4/json/encoder.py", line 192, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/lib/python3.4/json/encoder.py", line 250, in iterencode
return _iterencode(o, 0)
File "/usr/lib/python3.4/json/encoder.py", line 173, in default
raise TypeError(repr(o) + " is not JSON serializable")
TypeError: <Cart: 31> is not JSON serializable
I will appreciate any help on this.
The error arises when Django tries to serialize a model instance here
request.session['cart_id'] = new_cart_id
new_cart_id is a model instance and cannot be serialized.
It seems you wanted to assign the primary key of the instance to the key cart_id.
request.session['cart_id'] = new_cart_id.id
Whenever I attempt to save using this code:
def openfile(self):
self.filename = askopenfilename(filetypes=(("Extinction Save files", "*.exis"), ("All files", "*.*")))
def save(self):
try:
if not self.filename:
self.filename = asksaveasfile(mode='w', defaultextension=".exis", filetypes=(("Extinction Save files", "*.exis"), ("All files", "*.*")))
# if not '.exis' in self.filename:
# self.filename += ".exis"
self.filename.close()
with open(self.filename, "w") as file:
file.write(self.compiledata)
except Exception as error:
print(str(error))
def saveas(self):
try:
self.filename = asksaveasfile(mode='w', defaultextension=".exis", filetypes=(("Extinction Save files", "*.exis"), ("All files", "*.*")))
# if not '.exis' in self.filename:
# self.filename += ".exis"
self.filename.close()
with open(self.filename, "w") as file:
file.write(self.compiledata)
except Exception as error:
print(str(error))
def compiledata(self):
char = comboBox.currentText()
health = lineEdit_2.text()
level = lineEdit_3.text()
XP = lineEdit_4.text()
inv = []
for index in range(self.listWidget.count()):
inv.append(self.listWidget.item(index))
return char + '\ninv[' + ', '.split(inv) + '\n]' + health + '\n' + level + '\n' + XP
I get the following error message:
invalid file: <_io.TextIOWrapper name='C:/Users/Joan/Desktop/file.exis' mode='w' encoding='cp1252'>
I have seen one instance on this site where someone had a somewhat similar error message, but the solution did not help me. I just don't understand what I am supposed to do at this point.
Because you use asksaveasfile(), self.filename is not a filename, but an opened file. You then use self.filename.close(), so self.filename becomes a closed file, but open(...) expects a filename.
To get a the filename from the file you can use self.filename.name, so:
with open(self.filename.name, "w") as file: