I have three arrays of data. I want to loop them over and save the values in a temporary file if some condition is meet. If the condition is not meet I would like to open the temorary file and find the index of the maximum value then save to another file. When I try the code below I get this error. This is my fist time using tempfile.NamedTemporaryFile() so I very well may not be using it correctly. Thanks
Traceback (most recent call last):
File "<ipython-input-19-7c44ca7dcbd6>", line 1, in <module>
runfile('C:/Users/Khary/Documents/Astrophysics/Bolshoi/Halo Formation History Project/Codes/Find V_max.py', wdir='C:/Users/Khary/Documents/Astrophysics/Bolshoi/Halo Formation History Project/Codes')
File "C:\Anaconda3\lib\site-packages\spyderlib\widgets\externalshell\sitecustomize.py", line 685, in runfile
execfile(filename, namespace)
File "C:\Anaconda3\lib\site-packages\spyderlib\widgets\externalshell\sitecustomize.py", line 85, in execfile
exec(compile(open(filename, 'rb').read(), filename, 'exec'), namespace)
File "C:/Users/Khary/Documents/Astrophysics/Bolshoi/Halo Formation History Project/Codes/Find V_max.py", line 222, in <module>
formation_def()
File "C:/Users/Khary/Documents/Astrophysics/Bolshoi/Halo Formation History Project/Codes/Find V_max.py", line 129, in formation_def
FT = np.loadtxt(TF,skiprows=0)
File "C:\Anaconda3\lib\site-packages\numpy\lib\npyio.py", line 770, in loadtxt
first_line = next(fh)
UnsupportedOperation: not readable
My code
import numpy as np
import temp
#large arrays of data
Id = np.array([some size])
MASS = np.array([some size])
V = np.array([some size])
def filesave(MAS,V): #Functioin to write and save values to file
Mc = str(MAS)
Vel = str(V)
w.write(Mc)
w.write('\t')
w.write(Vel)
w.write('\n')
return()
def formation_def():
count = 1
l =len(ID)
for i in range(l):
if ID[i] == count:
for j in range(i,l):
TF = tempfile.NamedTemporaryFile(mode='a')
if ID[j] <= ID[i]:
T = str(ID[j])
M = str(MASS[j])
Vel = str(V[j])
TF.write(T)
TF.write('\t')
TF.write(M)
TF.write('\t')
TF.write(Vel)
TF.write('\n')
elif ID[j]>ID[i]: # if ID[j]>TID[i] then we are in the next halo in the list
FT = np.loadtxt(TF,skiprows=0)
MASS2 = FT[:,0]
V2 = FT[:,2]
vel_max = np.argmax(V2)
filesave(MASS2[vel_max],V2[vel_max])
TF.close()
count+=1 # and must indcrement the counter and break out of loop
break
elif:
count = ID[i]+1
return()
When you created your temporary file, you assigned it to append data to the file. That is a write operation.
TF = tempfile.NamedTemporaryFile(mode='a')
Where it seems to be failing, you are trying to read data from the file and put it in FT.
FT = np.loadtxt(TF,skiprows=0)
Change TF to mode='r' and you should have better luck.
Related
Others have reported a similar error, but the solutions given do not solve my problem.
For example there is a good answer here. The answer in the link mentions how ndb changes from a first use to a later use and suggests there is a problem because a first run produces a None in the Datastore. I cannot reproduce or see that happening in the Datastore for my sdk, but that may be because I am running it here from the interactive console.
I am pretty sure I got an initial good run with the GAE interactive console, but every run since then has failed with the error in my Title to this question.
I have left the print statements in the following code because they show good results and assure me that the error is occuring in the put() at the very end.
from google.appengine.ext import ndb
class Account(ndb.Model):
week = ndb.IntegerProperty(repeated=True)
weeksNS = ndb.IntegerProperty(repeated=True)
weeksEW = ndb.IntegerProperty(repeated=True)
terry=Account(week=[],weeksNS=[],weeksEW=[])
terry_key=terry.put()
terry = terry_key.get()
print terry
for t in list(range(4)): #just dummy input, but like real input
terry.week.append(t)
print terry.week
region = 1 #same error message for region = 0
if region :
terry.weeksEW.append(terry.week)
else:
terry.weeksNS.append(terry.week)
print 'EW'+str(terry.weeksEW)
print 'NS'+str(terry.weeksNS)
terry.week = []
print 'week'+str(terry.week)
terry.put()
The idea of my code is to first build up the terry.week list values incrementally and then later store the whole list to the appropriate region, either NS or EW. So I'm looking for a workaround for this scheme.
The error message is likely of no value but I am reproducing it here.
Traceback (most recent call last):
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/tools/devappserver2/python/runtime/request_handler.py", line 237, in handle_interactive_request
exec(compiled_code, self._command_globals)
File "<string>", line 55, in <module>
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 3458, in _put
return self._put_async(**ctx_options).get_result()
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/tasklets.py", line 383, in get_result
self.check_success()
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/tasklets.py", line 427, in _help_tasklet_along
value = gen.throw(exc.__class__, exc, tb)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/context.py", line 824, in put
key = yield self._put_batcher.add(entity, options)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/tasklets.py", line 430, in _help_tasklet_along
value = gen.send(val)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/context.py", line 358, in _put_tasklet
keys = yield self._conn.async_put(options, datastore_entities)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/datastore/datastore_rpc.py", line 1858, in async_put
pbs = [entity_to_pb(entity) for entity in entities]
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 697, in entity_to_pb
pb = ent._to_pb()
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 3167, in _to_pb
prop._serialize(self, pb, projection=self._projection)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 1422, in _serialize
values = self._get_base_value_unwrapped_as_list(entity)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 1192, in _get_base_value_unwrapped_as_list
wrapped = self._get_base_value(entity)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 1180, in _get_base_value
return self._apply_to_values(entity, self._opt_call_to_base_type)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 1352, in _apply_to_values
value[:] = map(function, value)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 1234, in _opt_call_to_base_type
value = _BaseValue(self._call_to_base_type(value))
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 1255, in _call_to_base_type
return call(value)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 1331, in call
newvalue = method(self, value)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 1781, in _validate
(value,))
BadValueError: Expected integer, got [0, 1, 2, 3]
I believe the error comes from these lines:
terry.weeksEW.append(terry.week)
terry.weeksNS.append(terry.week)
You are not appending another integer; You are appending a list, when an integer is expected.
>>> aaa = [1,2,3]
>>> bbb = [4,5,6]
>>> aaa.append(bbb)
>>> aaa
[1, 2, 3, [4, 5, 6]]
>>>
This fails the ndb.IntegerProperty test.
Try:
terry.weeksEW += terry.week
terry.weeksNS += terry.week
EDIT: To save a list of lists, do not use the IntegerProperty(), but instead the JsonProperty(). Better still, the ndb datastore is deprecated, so... I recommend Firestore, which uses JSON objects by default. At least use Cloud Datastore, or Cloud NDB.
I want to convert a variable to a string and then to an array that I can use to compare, but i dont know how to do that.
my code:
import face_recognition
import numpy as np
a = face_recognition.load_image_file('C:\\Users\zivsi\OneDrive\תמונות\סרט צילום\WIN_20191115_10_32_24_Pro.jpg') # my picture 1
b = face_recognition.load_image_file('C:\\Users\zivsi\OneDrive\תמונות\סרט צילום\WIN_20191115_09_48_56_Pro.jpg') # my picture 2
c = face_recognition.load_image_file(
'C:\\Users\zivsi\OneDrive\תמונות\סרט צילום\WIN_20191115_09_48_52_Pro.jpg') # my picture 3
d = face_recognition.load_image_file('C:\\Users\zivsi\OneDrive\תמונות\סרט צילום\ziv sion.jpg') # my picture 4
e = face_recognition.load_image_file(
'C:\\Users\zivsi\OneDrive\תמונות\סרט צילום\WIN_20191120_17_46_40_Pro.jpg') # my picture 5
f = face_recognition.load_image_file(
'C:\\Users\zivsi\OneDrive\תמונות\סרט צילום\WIN_20191117_16_19_11_Pro.jpg') # my picture 6
a = face_recognition.face_encodings(a)[0]
b = face_recognition.face_encodings(b)[0]
c = face_recognition.face_encodings(c)[0]
d = face_recognition.face_encodings(d)[0]
e = face_recognition.face_encodings(e)[0]
f = face_recognition.face_encodings(f)[0]
Here I tried to convert the variable to a string
str_variable = str(a)
array_variable = np.array(str_variable)
my_face = a, b, c, d, e, f, array_variable
while True:
new = input('path: ')
print('Recognizing...')
unknown = face_recognition.load_image_file(new)
unknown_encodings = face_recognition.face_encodings(unknown)[0]
The program cannot use the variable:
results = face_recognition.compare_faces(array_variable, unknown_encodings, tolerance=0.4)
print(results)
recognize_times = int(results.count(True))
if (3 <= recognize_times):
print('hello boss!')
my_face = *my_face, unknown_encodings
please help me
The error shown:
Traceback (most recent call last):
File "C:/Users/zivsi/PycharmProjects/AI/pytt.py", line 37, in <module>
results = face_recognition.compare_faces(my_face, unknown_encodings, tolerance=0.4)
File "C:\Users\zivsi\AppData\Local\Programs\Python\Python36\lib\site-
packages\face_recognition\api.py", line 222, in compare_faces
return list(face_distance(known_face_encodings, face_encoding_to_check) <= tolerance)
File "C:\Users\zivsi\AppData\Local\Programs\Python\Python36\lib\site-packages\face_recognition\api.py", line 72, in face_distance
return np.linalg.norm(face_encodings - face_to_compare, axis=1)
ValueError: operands could not be broadcast together with shapes (7,) (128,)
First of all, the array_variable should actually be a list of the known encodings and not a numpy array.
Also you do not need str.
Now, in your case, if the input images i.e., a,b,c,d,f,e do NOT have the same dimensions, the error will persist. You can not compare images that have different sizes using this function. The reason is that the comparison is based on the distance and distance is defined on vectors of the same length.
Here is a working simple example using the photos from https://github.com/ageitgey/face_recognition/tree/master/examples:
import face_recognition
import numpy as np
from PIL import Image, ImageDraw
from IPython.display import display
# Load a sample picture and learn how to recognize it.
obama_image = face_recognition.load_image_file("obama.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
# Load a second sample picture and learn how to recognize it.
biden_image = face_recognition.load_image_file("biden.jpg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
array_variable = [obama_face_encoding,biden_face_encoding] # list of known encodings
# compare the list with the biden_face_encoding
results = face_recognition.compare_faces(array_variable, biden_face_encoding, tolerance=0.4)
print(results)
[False, True] # True means match, False mismatch
# False: coming from obama_face_encoding VS biden_face_encoding
# True: coming from biden_face_encoding VS biden_face_encoding
To run it go here: https://beta.deepnote.com/project/09705740-31c0-4d9a-8890-269ff1c3dfaf#
Documentation: https://face-recognition.readthedocs.io/en/latest/face_recognition.html
EDIT
To save the known encodings you can use numpy.save
np.save('encodings',biden_face_encoding) # save
load_again = np.load('encodings.npy') # load again
Using python3 and tensorflow, I've tried to put my data as training data into tf.train.slice_input_producer and tf.train.shuffle_batch
def batch_data():
...
# trX as training_data and trY as training_labels.
# Both are numpy array
data_queues = tf.train.slice_input_producer([trX, trY])
X, Y = tf.train.shuffle_batch(data_queues, num_threads=num_threads,
batch_size=batch_size,
capacity=batch_size * 64,
min_after_dequeue=batch_size * 32,
allow_smaller_final_batch=False)
return X, Y
But I got error Tensor > 2GB:
data_queues = tf.train.slice_input_producer([trX, trY])
File "C:\Users\ellamunde\AppData\Local\Continuum\anaconda2\envs\python36\lib\site-packages\tensorflow\python\training\input.py", line 302, in slice_input_producer
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
File "C:\Users\ellamunde\AppData\Local\Continuum\anaconda2\envs\python36\lib\site-packages\tensorflow\python\framework\ops.py", line 1153, in convert_n_to_tensor_or_indexed_slices
values=values, dtype=dtype, name=name, as_ref=False)
File "C:\Users\ellamunde\AppData\Local\Continuum\anaconda2\envs\python36\lib\site-packages\tensorflow\python\framework\ops.py", line 1124, in internal_convert_n_to_tensor_or_indexed_slices
value, dtype=dtype, name=n, as_ref=as_ref))
File "C:\Users\ellamunde\AppData\Local\Continuum\anaconda2\envs\python36\lib\site-packages\tensorflow\python\framework\ops.py", line 1083, in internal_convert_to_tensor_or_indexed_slices
value, dtype=dtype, name=name, as_ref=as_ref)
File "C:\Users\ellamunde\AppData\Local\Continuum\anaconda2\envs\python36\lib\site-packages\tensorflow\python\framework\ops.py", line 926, in internal_convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
File "C:\Users\ellamunde\AppData\Local\Continuum\anaconda2\envs\python36\lib\site-packages\tensorflow\python\framework\constant_op.py", line 229, in _constant_tensor_conversion_function
return constant(v, dtype=dtype, name=name)
File "C:\Users\ellamunde\AppData\Local\Continuum\anaconda2\envs\python36\lib\site-packages\tensorflow\python\framework\constant_op.py", line 208, in constant
value, dtype=dtype, shape=shape, verify_shape=verify_shape))
File "C:\Users\ellamunde\AppData\Local\Continuum\anaconda2\envs\python36\lib\site-packages\tensorflow\python\framework\tensor_util.py", line 447, in make_tensor_proto
"Cannot create a tensor proto whose content is larger than 2GB.")
ValueError: Cannot create a tensor proto whose content is larger than 2GB.
I tried to handle it with variable
def batch_data():
...
Xplaceholder = tf.placeholder(trX.dtype, shape=trX.shape, name='Xplaceholder')
Xvar = tf.get_variable('XVariable', shape=trX.shape, dtype=trX.dtype, initializer=tf.zeros_initializer())
Yplaceholder = tf.placeholder(trY.dtype, shape=trY.shape, name='Yplaceholder')
Yvar = tf.get_variable('YVariable', shape=trY.shape, dtype=trY.dtype, initializer=tf.zeros_initializer())
Xassign = Xvar.assign(Xplaceholder)
Yassign = Yvar.assign(Yplaceholder)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
session.run(Xassign, feed_dict={Xplaceholder: trX})
session.run(Yassign, feed_dict={Yplaceholder: trY})
session.close()
data_queues = tf.train.slice_input_producer([Xvar, Yvar])
X, Y = tf.train.shuffle_batch(data_queues, num_threads=num_threads,
batch_size=batch_size,
capacity=batch_size * 64,
min_after_dequeue=batch_size * 32,
allow_smaller_final_batch=False)
Actually, it works. But when I monitored the loss value for each training was different and the loss value always increase each training and never decrease.
Does anybody can give me insight why does it happen?
Thanks
I am trying to read few files from a path as extension to my previous question The answer given by Jianxun Definitely makes sense but I am getting a key error. very very new to pandas and not able to fix error.
Note: I use Python 2.7 and Pandas 0.16
File_1.csv
Ids,12:00:00
2341,9865
7352,8969
File_2.csv
Ids,12:45:00
1234,9865
8435,8969
Master.csv
Ids,00:00:00,00:30:00,00:45:00
1234,1000,500,100
8435,5243,300,200
2341,563,400,400
7352,345,500,600
Programs:
import pandas as pd
import numpy as np
from StringIO import StringIO
# your csv file contents
csv_file1 = 'Path/Transition_Data/Test_1.csv '
csv_file2 = 'Path/Transition_Data/Test_2.csv '
csv_file_all = [csv_file1, csv_file2]
# read csv into df using list comprehension
# I use buffer here, replace stringIO with your file path
df_all = [pd.read_csv(StringIO(csv_file)) for csv_file in csv_file_all]
# processing
# =====================================================
# concat along axis=0, outer join on axis=1
merged = pd.concat(df_all, axis=0, ignore_index=True, join='outer').set_index('Ids')
# custom function to handle/merge duplicates on Ids (axis=0)
def apply_func(group):
return group.fillna(method='ffill').iloc[-1]
# remove Ids duplicates
merged_unique = merged.groupby(level='Ids').apply(apply_func)
# do the subtraction
master_csv_file = 'Path/Data_repository/Master1_Test.csv'
df_master = pd.read_csv(io.StringIO(master_csv_file), index_col=['Ids']).sort_index()
# select matching records and horizontal concat
df_matched = pd.concat([df_master,merged_unique.reindex(df_master.index)], axis=1)
# use broadcasting
df_matched.iloc[:, 1:] = df_matched.iloc[:, 1:].sub(df_matched.iloc[:, 0], axis=0)
Error:
Traceback (most recent call last):
File "distribute_count.py", line 18, in <module>
merged = pd.concat(df_all, axis=0, ignore_index=True, join='outer').set_index('Ids')
File "/usr/lib/pymodules/python2.7/pandas/core/frame.py", line 2583, in set_index
level = frame[col].values
File "/usr/lib/pymodules/python2.7/pandas/core/frame.py", line 1787, in __getitem__
return self._getitem_column(key)
File "/usr/lib/pymodules/python2.7/pandas/core/frame.py", line 1794, in _getitem_column
return self._get_item_cache(key)
File "/usr/lib/pymodules/python2.7/pandas/core/generic.py", line 1079, in _get_item_cache
values = self._data.get(item)
File "/usr/lib/pymodules/python2.7/pandas/core/internals.py", line 2843, in get
loc = self.items.get_loc(item)
File "/usr/lib/pymodules/python2.7/pandas/core/index.py", line 1437, in get_loc
return self._engine.get_loc(_values_from_object(key))
File "index.pyx", line 134, in pandas.index.IndexEngine.get_loc (pandas/index.c:3786)
File "index.pyx", line 154, in pandas.index.IndexEngine.get_loc (pandas/index.c:3664)
File "hashtable.pyx", line 697, in pandas.hashtable.PyObjectHashTable.get_item (pandas/hashtable.c:11943)
File "hashtable.pyx", line 705, in pandas.hashtable.PyObjectHashTable.get_item (pandas/hashtable.c:11896)
KeyError: 'Ids'
import pandas as pd
import numpy as np
# your csv file contents
csv_file1 = '/home/Jian/Downloads/stack_flow_bundle/Transition_Data/Test_1.csv'
csv_file2 = '/home/Jian/Downloads/stack_flow_bundle/Transition_Data/Test_2.csv'
master_csv_file = '/home/Jian/Downloads/stack_flow_bundle/Data_repository/master_lac_Test.csv'
csv_file_all = [csv_file1, csv_file2]
# read csv into df using list comprehension
# I use buffer here, replace stringIO with your file path
df_all = [pd.read_csv(csv_file) for csv_file in csv_file_all]
# processing
# =====================================================
# concat along axis=0, outer join on axis=1
merged = pd.concat(df_all, axis=0, ignore_index=True, join='outer').set_index('Ids')
# custom function to handle/merge duplicates on Ids (axis=0)
def apply_func(group):
return group.fillna(method='ffill').iloc[-1]
# remove Ids duplicates
merged_unique = merged.groupby(level='Ids').apply(apply_func)
# do the subtraction
df_master = pd.read_csv(master_csv_file, index_col=['Ids']).sort_index()
# select matching records and horizontal concat
df_matched = pd.concat([df_master,merged_unique.reindex(df_master.index)], axis=1)
# use broadcasting
df_matched.iloc[:, 1:] = df_matched.iloc[:, 1:].sub(df_matched.iloc[:, 0], axis=0)
print(df_matched)
00:00:00 00:30:00 00:45:00 12:00:00 12:45:00
Ids
1234 1000 -500 -900 NaN 8865
2341 563 -163 -163 9302 NaN
7352 345 155 255 8624 NaN
8435 5243 -4943 -5043 NaN 3726
I get the error;
Error: Array length mismatch (expected 3, got 13)
TypeError: a float is required
Traceback (most recent call last):
File "\Test.py", line 393, in from_pydata
File "C:\Program Files (x86)\Blender Foundation\Blender\2.68\2.68\scripts\modules\bpy_types.py", line 393, in from_pydata
self.vertices.foreach_set("co", vertices_flat)
TypeError: couldn't access the py sequence
Error: Python script fail, look in the console for now...
Here is the code:
filePath = "C:\\Users\\siba\\Desktop\\1x1x1.blb"
f = open(filePath)
line = f.readline()
while line:
if(line == "POSITION:\n"):
POS1 = f.readline().replace('\n','')
line = f.readline()
f.close()
coord1 = POS1
Verts = [coord1]
import bpy
profile_mesh = bpy.data.meshes.new("Base_Profile_Data")
profile_mesh.from_pydata(Verts, [], [])
profile_mesh.update()
profile_object = bpy.data.objects.new("Base_Profile", profile_mesh)
profile_object.data = profile_mesh
scene = bpy.context.scene
scene.objects.link(profile_object)
profile_object.select = True
Here is 1x1x1.blb:
POSITION:
0.5 0.5 0.5
Just a stab in the dark, as I don't script Blender and I cannot be bothered to find the docs, but I would imagine Verts needs to be a list of floats, and you are providing a space-separated string, so this might work:
coord1 = POS1.split(' ')
map(float, coord1)
Verts = coord1