Blender from_pydata error with reading vertex positions from file - arrays

I get the error;
Error: Array length mismatch (expected 3, got 13)
TypeError: a float is required
Traceback (most recent call last):
File "\Test.py", line 393, in from_pydata
File "C:\Program Files (x86)\Blender Foundation\Blender\2.68\2.68\scripts\modules\bpy_types.py", line 393, in from_pydata
self.vertices.foreach_set("co", vertices_flat)
TypeError: couldn't access the py sequence
Error: Python script fail, look in the console for now...
Here is the code:
filePath = "C:\\Users\\siba\\Desktop\\1x1x1.blb"
f = open(filePath)
line = f.readline()
while line:
if(line == "POSITION:\n"):
POS1 = f.readline().replace('\n','')
line = f.readline()
f.close()
coord1 = POS1
Verts = [coord1]
import bpy
profile_mesh = bpy.data.meshes.new("Base_Profile_Data")
profile_mesh.from_pydata(Verts, [], [])
profile_mesh.update()
profile_object = bpy.data.objects.new("Base_Profile", profile_mesh)
profile_object.data = profile_mesh
scene = bpy.context.scene
scene.objects.link(profile_object)
profile_object.select = True
Here is 1x1x1.blb:
POSITION:
0.5 0.5 0.5

Just a stab in the dark, as I don't script Blender and I cannot be bothered to find the docs, but I would imagine Verts needs to be a list of floats, and you are providing a space-separated string, so this might work:
coord1 = POS1.split(' ')
map(float, coord1)
Verts = coord1

Related

Datastore error: BadValueError: Expected integer, got [0, 1, 2, 3]

Others have reported a similar error, but the solutions given do not solve my problem.
For example there is a good answer here. The answer in the link mentions how ndb changes from a first use to a later use and suggests there is a problem because a first run produces a None in the Datastore. I cannot reproduce or see that happening in the Datastore for my sdk, but that may be because I am running it here from the interactive console.
I am pretty sure I got an initial good run with the GAE interactive console, but every run since then has failed with the error in my Title to this question.
I have left the print statements in the following code because they show good results and assure me that the error is occuring in the put() at the very end.
from google.appengine.ext import ndb
class Account(ndb.Model):
week = ndb.IntegerProperty(repeated=True)
weeksNS = ndb.IntegerProperty(repeated=True)
weeksEW = ndb.IntegerProperty(repeated=True)
terry=Account(week=[],weeksNS=[],weeksEW=[])
terry_key=terry.put()
terry = terry_key.get()
print terry
for t in list(range(4)): #just dummy input, but like real input
terry.week.append(t)
print terry.week
region = 1 #same error message for region = 0
if region :
terry.weeksEW.append(terry.week)
else:
terry.weeksNS.append(terry.week)
print 'EW'+str(terry.weeksEW)
print 'NS'+str(terry.weeksNS)
terry.week = []
print 'week'+str(terry.week)
terry.put()
The idea of my code is to first build up the terry.week list values incrementally and then later store the whole list to the appropriate region, either NS or EW. So I'm looking for a workaround for this scheme.
The error message is likely of no value but I am reproducing it here.
Traceback (most recent call last):
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/tools/devappserver2/python/runtime/request_handler.py", line 237, in handle_interactive_request
exec(compiled_code, self._command_globals)
File "<string>", line 55, in <module>
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 3458, in _put
return self._put_async(**ctx_options).get_result()
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/tasklets.py", line 383, in get_result
self.check_success()
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/tasklets.py", line 427, in _help_tasklet_along
value = gen.throw(exc.__class__, exc, tb)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/context.py", line 824, in put
key = yield self._put_batcher.add(entity, options)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/tasklets.py", line 430, in _help_tasklet_along
value = gen.send(val)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/context.py", line 358, in _put_tasklet
keys = yield self._conn.async_put(options, datastore_entities)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/datastore/datastore_rpc.py", line 1858, in async_put
pbs = [entity_to_pb(entity) for entity in entities]
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 697, in entity_to_pb
pb = ent._to_pb()
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 3167, in _to_pb
prop._serialize(self, pb, projection=self._projection)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 1422, in _serialize
values = self._get_base_value_unwrapped_as_list(entity)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 1192, in _get_base_value_unwrapped_as_list
wrapped = self._get_base_value(entity)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 1180, in _get_base_value
return self._apply_to_values(entity, self._opt_call_to_base_type)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 1352, in _apply_to_values
value[:] = map(function, value)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 1234, in _opt_call_to_base_type
value = _BaseValue(self._call_to_base_type(value))
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 1255, in _call_to_base_type
return call(value)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 1331, in call
newvalue = method(self, value)
File "/Users/brian/google-cloud-sdk/platform/google_appengine/google/appengine/ext/ndb/model.py", line 1781, in _validate
(value,))
BadValueError: Expected integer, got [0, 1, 2, 3]
I believe the error comes from these lines:
terry.weeksEW.append(terry.week)
terry.weeksNS.append(terry.week)
You are not appending another integer; You are appending a list, when an integer is expected.
>>> aaa = [1,2,3]
>>> bbb = [4,5,6]
>>> aaa.append(bbb)
>>> aaa
[1, 2, 3, [4, 5, 6]]
>>>
This fails the ndb.IntegerProperty test.
Try:
terry.weeksEW += terry.week
terry.weeksNS += terry.week
EDIT: To save a list of lists, do not use the IntegerProperty(), but instead the JsonProperty(). Better still, the ndb datastore is deprecated, so... I recommend Firestore, which uses JSON objects by default. At least use Cloud Datastore, or Cloud NDB.

Tensorflow > 2GB array as an input for tf.slice_input_producer

Using python3 and tensorflow, I've tried to put my data as training data into tf.train.slice_input_producer and tf.train.shuffle_batch
def batch_data():
...
# trX as training_data and trY as training_labels.
# Both are numpy array
data_queues = tf.train.slice_input_producer([trX, trY])
X, Y = tf.train.shuffle_batch(data_queues, num_threads=num_threads,
batch_size=batch_size,
capacity=batch_size * 64,
min_after_dequeue=batch_size * 32,
allow_smaller_final_batch=False)
return X, Y
But I got error Tensor > 2GB:
data_queues = tf.train.slice_input_producer([trX, trY])
File "C:\Users\ellamunde\AppData\Local\Continuum\anaconda2\envs\python36\lib\site-packages\tensorflow\python\training\input.py", line 302, in slice_input_producer
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
File "C:\Users\ellamunde\AppData\Local\Continuum\anaconda2\envs\python36\lib\site-packages\tensorflow\python\framework\ops.py", line 1153, in convert_n_to_tensor_or_indexed_slices
values=values, dtype=dtype, name=name, as_ref=False)
File "C:\Users\ellamunde\AppData\Local\Continuum\anaconda2\envs\python36\lib\site-packages\tensorflow\python\framework\ops.py", line 1124, in internal_convert_n_to_tensor_or_indexed_slices
value, dtype=dtype, name=n, as_ref=as_ref))
File "C:\Users\ellamunde\AppData\Local\Continuum\anaconda2\envs\python36\lib\site-packages\tensorflow\python\framework\ops.py", line 1083, in internal_convert_to_tensor_or_indexed_slices
value, dtype=dtype, name=name, as_ref=as_ref)
File "C:\Users\ellamunde\AppData\Local\Continuum\anaconda2\envs\python36\lib\site-packages\tensorflow\python\framework\ops.py", line 926, in internal_convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
File "C:\Users\ellamunde\AppData\Local\Continuum\anaconda2\envs\python36\lib\site-packages\tensorflow\python\framework\constant_op.py", line 229, in _constant_tensor_conversion_function
return constant(v, dtype=dtype, name=name)
File "C:\Users\ellamunde\AppData\Local\Continuum\anaconda2\envs\python36\lib\site-packages\tensorflow\python\framework\constant_op.py", line 208, in constant
value, dtype=dtype, shape=shape, verify_shape=verify_shape))
File "C:\Users\ellamunde\AppData\Local\Continuum\anaconda2\envs\python36\lib\site-packages\tensorflow\python\framework\tensor_util.py", line 447, in make_tensor_proto
"Cannot create a tensor proto whose content is larger than 2GB.")
ValueError: Cannot create a tensor proto whose content is larger than 2GB.
I tried to handle it with variable
def batch_data():
...
Xplaceholder = tf.placeholder(trX.dtype, shape=trX.shape, name='Xplaceholder')
Xvar = tf.get_variable('XVariable', shape=trX.shape, dtype=trX.dtype, initializer=tf.zeros_initializer())
Yplaceholder = tf.placeholder(trY.dtype, shape=trY.shape, name='Yplaceholder')
Yvar = tf.get_variable('YVariable', shape=trY.shape, dtype=trY.dtype, initializer=tf.zeros_initializer())
Xassign = Xvar.assign(Xplaceholder)
Yassign = Yvar.assign(Yplaceholder)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
session.run(Xassign, feed_dict={Xplaceholder: trX})
session.run(Yassign, feed_dict={Yplaceholder: trY})
session.close()
data_queues = tf.train.slice_input_producer([Xvar, Yvar])
X, Y = tf.train.shuffle_batch(data_queues, num_threads=num_threads,
batch_size=batch_size,
capacity=batch_size * 64,
min_after_dequeue=batch_size * 32,
allow_smaller_final_batch=False)
Actually, it works. But when I monitored the loss value for each training was different and the loss value always increase each training and never decrease.
Does anybody can give me insight why does it happen?
Thanks

error: (-209) The operation is neither 'array op array'....-(Python 3.4,opencv,picamera)

I want to give an input, a video which was taken from picamera(.h264) to my python code in my PC, [as opencv-python is not getting installed in my raspbian OS].
I converted the video which was in .h264 to mp4 and gave that as input video file,and I get the below error.
OpenCV Error: Sizes of input arguments do not match (The operation is neither 'array op array' (where arrays have the same size and the same number of channels), nor 'array op scalar', nor 'scalar op array') in arithm_op, file /io/opencv/modules/core/src/arithm.cpp, line 659
Traceback (most recent call last):
File "/home/ramakrishna/PycharmProjects/Lanedect/driving-lane-departure-warning-master/main.py", line 36, in <module>
clip = clip1.fl_image(process_frame) #NOTE: it should be in BGR format
File "/home/ramakrishna/.local/lib/python3.4/site-packages/moviepy/video/VideoClip.py", line 533, in fl_image
return self.fl(lambda gf, t: image_func(gf(t)), apply_to)
File "/home/ramakrishna/.local/lib/python3.4/site-packages/moviepy/Clip.py", line 136, in fl
newclip = self.set_make_frame(lambda t: fun(self.get_frame, t))
File "<decorator-gen-57>", line 2, in set_make_frame
File "/home/ramakrishna/.local/lib/python3.4/site-packages/moviepy/decorators.py", line 14, in outplace
f(newclip, *a, **k)
File "/home/ramakrishna/.local/lib/python3.4/site-packages/moviepy/video/VideoClip.py", line 694, in set_make_frame
self.size = self.get_frame(0).shape[:2][::-1]
File "<decorator-gen-14>", line 2, in get_frame
File "/home/ramakrishna/.local/lib/python3.4/site-packages/moviepy/decorators.py", line 89, in wrapper
return f(*new_a, **new_kw)
File "/home/ramakrishna/.local/lib/python3.4/site-packages/moviepy/Clip.py", line 95, in get_frame
return self.make_frame(t)
File "/home/ramakrishna/.local/lib/python3.4/site-packages/moviepy/Clip.py", line 136, in <lambda>
newclip = self.set_make_frame(lambda t: fun(self.get_frame, t))
File "/home/ramakrishna/.local/lib/python3.4/site-packages/moviepy/video/VideoClip.py", line 533, in <lambda>
return self.fl(lambda gf, t: image_func(gf(t)), apply_to)
File "/home/ramakrishna/PycharmProjects/Lanedect/driving-lane-departure-warning-master/lane.py", line 619, in process_frame
output = create_output_frame(offcenter, pts, img_undist_, fps, curvature, curve_direction, binary_sub)
File "/home/ramakrishna/PycharmProjects/Lanedect/driving-lane-departure-warning-master/lane.py", line 486, in create_output_frame
output = cv2.addWeighted(undist_ori, 1, newwarp_, 0.3, 0)
cv2.error: /io/opencv/modules/core/src/arithm.cpp:659: error: (-209) The operation is neither 'array op array' (where arrays have the same size and the same number of channels), nor 'array op scalar', nor 'scalar op array' in function arithm_op
Process finished with exit code 1
Please help me solve this error.
Below are the size of input file already set in the code.But my video file size is 20.5 MB and dimensions 1920 x 1080. How can I change the dimensions if I have to?
left_lane = Lane()
right_lane = Lane()
frame_width = 1280
frame_height = 720
LANEWIDTH = 3.7 # highway lane width in US: 3.7 meters
input_scale = 4
output_frame_scale = 4
N = 4 # buffer previous N lines
# fullsize:1280x720
x = [194, 1117, 705, 575]
y = [719, 719, 461, 461]
X = [290, 990, 990, 290]
Y = [719, 719, 0, 0]

Cannot Query Array of Integers in Postgres

Migrated column type from HSTORE to JSONB and am using this snippet of code...
from sqlalchemy.dialects.postgresql import ARRAY, JSONB
if employment_type:
base = base.filter(Candidate.bio["employment_type"].cast(ARRAY).contains(employment_type))
and am getting this error...
127.0.0.1 - - [28/Mar/2016 12:25:13] "GET /candidate_filter/?employment_type_3=true HTTP/1.1" 500 -
Traceback (most recent call last):
File "/Library/Python/2.7/site-packages/flask/app.py", line 1836, in __call__
return self.wsgi_app(environ, start_response)
File "/Library/Python/2.7/site-packages/flask/app.py", line 1820, in wsgi_app
response = self.make_response(self.handle_exception(e))
File "/Library/Python/2.7/site-packages/flask/app.py", line 1403, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Library/Python/2.7/site-packages/flask/app.py", line 1817, in wsgi_app
response = self.full_dispatch_request()
File "/Library/Python/2.7/site-packages/flask/app.py", line 1477, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Library/Python/2.7/site-packages/flask/app.py", line 1381, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Library/Python/2.7/site-packages/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/Library/Python/2.7/site-packages/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/surajkapoor/Desktop/lhv-talenttracker/app/views.py", line 660, in investor_filter
base = base.filter(Candidate.bio["employment_type"].cast(ARRAY).contains(employment_type))
File "/Library/Python/2.7/site-packages/sqlalchemy/dialects/postgresql/json.py", line 93, in cast
return self.astext.cast(type_)
File "/Library/Python/2.7/site-packages/sqlalchemy/dialects/postgresql/json.py", line 95, in cast
return sql.cast(self, type_)
File "<string>", line 2, in cast
File "/Library/Python/2.7/site-packages/sqlalchemy/sql/elements.py", line 2314, in __init__
self.type = type_api.to_instance(type_)
File "/Library/Python/2.7/site-packages/sqlalchemy/sql/type_api.py", line 1142, in to_instance
return typeobj(*arg, **kw)
TypeError: __init__() takes at least 2 arguments (1 given)
Candidate.bio["employment_type"] is an array of integers and I'm simply trying to query all the rows that contain a specific integer in them.
Also, .cast() works perfectly on the same column when calling Integer...
if internship:
base = base.filter(Candidate.bio["internship"].cast(Integer) == 1)
SqlAlchemy is probably having difficulty constructing the where clause because it can't figure out what type bio->'employment_type' is.
If the contains method is called from a String object, it would generate a LIKE clause, but for JSONB or ARRAY it would need to generate the #> operator.
To give SqlAlchemy the necessary hints, use explicit casting everywhere, i.e. write your query like
from sqlalchemy import cast
if employment_type:
casted_field = Candidate.bio['employment_type'].cast(JSONB)
casted_values = cast(employment_type, JSONB)
stmt = base.filter(casted_field.contains(casted_values))
In my example, I have a JSONB column named bio with the following data:
{"employment_type": [1, 2, 3]}
Edit: Casting to JSONB works:
>>> from sqlalchemy.dialects.postgresql import JSONB
>>> employment_type = 2
>>> query = (
... session.query(Candidate)
... .filter(Candidate.bio['employment_type'].cast(JSONB).contains(employment_type)))
>>> query.one().bio
{"employment_type": [1, 2, 3]}
Original answer:
I couldn't get .contains to work on Candidate.bio['employment_type'], but we can do the equivalent of the following SQL:
SELECT * FROM candidate WHERE candidate.bio #> '{"employment_type": [2]}';
like this:
>>> employment_type = 2
>>> test = {'employment_type': [employment_type]}
>>> query = (
... session.query(Candidate)
... .filter(Candidate.bio.contains(test)))
>>> query.one().bio
{"employment_type": [1, 2, 3]}

Appending a temp file in a for loop

I have three arrays of data. I want to loop them over and save the values in a temporary file if some condition is meet. If the condition is not meet I would like to open the temorary file and find the index of the maximum value then save to another file. When I try the code below I get this error. This is my fist time using tempfile.NamedTemporaryFile() so I very well may not be using it correctly. Thanks
Traceback (most recent call last):
File "<ipython-input-19-7c44ca7dcbd6>", line 1, in <module>
runfile('C:/Users/Khary/Documents/Astrophysics/Bolshoi/Halo Formation History Project/Codes/Find V_max.py', wdir='C:/Users/Khary/Documents/Astrophysics/Bolshoi/Halo Formation History Project/Codes')
File "C:\Anaconda3\lib\site-packages\spyderlib\widgets\externalshell\sitecustomize.py", line 685, in runfile
execfile(filename, namespace)
File "C:\Anaconda3\lib\site-packages\spyderlib\widgets\externalshell\sitecustomize.py", line 85, in execfile
exec(compile(open(filename, 'rb').read(), filename, 'exec'), namespace)
File "C:/Users/Khary/Documents/Astrophysics/Bolshoi/Halo Formation History Project/Codes/Find V_max.py", line 222, in <module>
formation_def()
File "C:/Users/Khary/Documents/Astrophysics/Bolshoi/Halo Formation History Project/Codes/Find V_max.py", line 129, in formation_def
FT = np.loadtxt(TF,skiprows=0)
File "C:\Anaconda3\lib\site-packages\numpy\lib\npyio.py", line 770, in loadtxt
first_line = next(fh)
UnsupportedOperation: not readable
My code
import numpy as np
import temp
#large arrays of data
Id = np.array([some size])
MASS = np.array([some size])
V = np.array([some size])
def filesave(MAS,V): #Functioin to write and save values to file
Mc = str(MAS)
Vel = str(V)
w.write(Mc)
w.write('\t')
w.write(Vel)
w.write('\n')
return()
def formation_def():
count = 1
l =len(ID)
for i in range(l):
if ID[i] == count:
for j in range(i,l):
TF = tempfile.NamedTemporaryFile(mode='a')
if ID[j] <= ID[i]:
T = str(ID[j])
M = str(MASS[j])
Vel = str(V[j])
TF.write(T)
TF.write('\t')
TF.write(M)
TF.write('\t')
TF.write(Vel)
TF.write('\n')
elif ID[j]>ID[i]: # if ID[j]>TID[i] then we are in the next halo in the list
FT = np.loadtxt(TF,skiprows=0)
MASS2 = FT[:,0]
V2 = FT[:,2]
vel_max = np.argmax(V2)
filesave(MASS2[vel_max],V2[vel_max])
TF.close()
count+=1 # and must indcrement the counter and break out of loop
break
elif:
count = ID[i]+1
return()
When you created your temporary file, you assigned it to append data to the file. That is a write operation.
TF = tempfile.NamedTemporaryFile(mode='a')
Where it seems to be failing, you are trying to read data from the file and put it in FT.
FT = np.loadtxt(TF,skiprows=0)
Change TF to mode='r' and you should have better luck.

Resources