Getting No such file or directory [[{{node ReadFile}}]] [[IteratorGetNext]] [Op:__inference_train_function_9137] error - file

This may be a simple answer, but currently making a neural network using keras and I ran into this problem through this code
\`EPOCHS = 50
callbacks = \[
tf.keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='min', min_delta=0.0001),
tf.keras.callbacks.ModelCheckpoint(
'weights.tf', monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True),
tf.keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=0, patience=15, verbose=1, restore_best_weights=True)
\]
history = model.fit(
train_ds,
validation_data=val_ds,
verbose=1,
callbacks=callbacks,
epochs=EPOCHS,
)
model.load_weights('weights.tf')
model.evaluate(val_ds)\`
Output:
`Epoch 1/50
NotFoundError Traceback (most recent call last)
\<ipython-input-15-265d39d703c7\> in \<module\>
10 \]
11
\---\> 12 history = model.fit(
13 train_ds,
14 validation_data=val_ds,
1 frames
/usr/local/lib/python3.8/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
52 try:
53 ctx.ensure_initialized()
\---\> 54 tensors = pywrap_tfe.TFE_Py_Execute(ctx.\_handle, device_name, op_name,
55 inputs, attrs, num_outputs)
56 except core.\_NotOkStatusException as e:
NotFoundError: Graph execution error:
train/60377.jpg; No such file or directory
\[\[{{node ReadFile}}\]\]
\[\[IteratorGetNext\]\] \[Op:\__inference_train_function_9137\]
`
Here's my data:
FairFace Dataset from Kaggle
Here's how I preprocessed (through code I borrowed) the images from the FairFace dataset.
\`IMG_SIZE = 224
AUTOTUNE = tf.data.AUTOTUNE
BATCH_SIZE = 224
NUM_CLASSES = len(labels_map)
# Dataset creation
y_train = tf.keras.utils.to_categorical(train.race, num_classes=NUM_CLASSES, dtype='float32')
y_val = tf.keras.utils.to_categorical(val.race, num_classes=NUM_CLASSES, dtype='float32')
train_ds = tf.data.Dataset.from_tensor_slices((train.file, y_train)).shuffle(len(y_train))
val_ds = tf.data.Dataset.from_tensor_slices((val.file, y_val))
assert len(train_ds) == len(train.file) == len(train.race)
assert len(val_ds) == len(val.file) == len(val.race)
# Read files
def map_fn(path, label):
image = tf.io.decode_jpeg(tf.io.read_file(path))
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
return image, label
# Read files
train_ds = train_ds.map(lambda path, lbl: (tf.io.decode_jpeg(tf.io.read_file(path)), lbl), num_parallel_calls=AUTOTUNE)
val_ds = val_ds.map(lambda path, lbl: (tf.io.decode_jpeg(tf.io.read_file(path)), lbl), num_parallel_calls=AUTOTUNE)
# Batch and resize after batch, then prefetch
train_ds = val_ds.map(lambda imgs, lbls: (tf.image.resize(imgs, (IMG_SIZE, IMG_SIZE)), lbls), num_parallel_calls=AUTOTUNE)
val_ds = val_ds.map(lambda imgs, lbls: (tf.image.resize(imgs, (IMG_SIZE, IMG_SIZE)), lbls), num_parallel_calls=AUTOTUNE)
train_ds = train_ds.batch(BATCH_SIZE)
val_ds = val_ds.batch(BATCH_SIZE)
# Performance enchancement - cache, batch, prefetch
train_ds = train_ds.prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.prefetch(buffer_size=AUTOTUNE)\`
I tried changing the jpg file name but to no avail.

Related

Numpy Stack :: all input arrays must have the same shape

I was Trying to to Calculate initial embedding of all data frame which is first step to implement my GNN which is Heterogeneous in nature.I have used twitter msg data set in the task I loaded it in following way
load_path = '/Users/hemangjiwnani/Desktop/Projects/Paper1/KPGNN/datasets/Twitter/'
save_path = '/Users/hemangjiwnani/Desktop/Projects/Paper1/KPGNN/datasets/Twitter/'
# load dataset`enter code here`
p_part1 = load_path + '68841_tweets_multiclasses_filtered_0722_part1.npy'
p_part2 = load_path + '68841_tweets_multiclasses_filtered_0722_part2.npy'
#"./datasets/Twitter/68841_tweets_multiclasses_filtered_0722_part1.npy"
df_np_part1 = np.load(p_part1, allow_pickle=True)
df_np_part2 = np.load(p_part2, allow_pickle=True)
Then I have created a data frame of the same with the help of following code
df_np = np.concatenate((df_np_part1, df_np_part2), axis = 0) #Axis = 0 means horizontal
print("Loaded data.")
df = pd.DataFrame(data=df_np, columns=["event_id", "tweet_id", "text", "user_id", "created_at", "user_loc",\
"place_type", "place_full_name", "place_country_code", "hashtags", "user_mentions", "image_urls", "entities",
"words", "filtered_words", "sampled_words"])
print("Data converted to dataframe.")
print(df.shape)
print(df.head(5))
Which was having following output
Loaded data.
Data converted to dataframe.
(68841, 16)
event_id ... sampled_words
0 0 ... []
1 0 ... []
2 0 ... []
3 0 ... []
4 0 ... []
[5 rows x 16 columns]
This Function Below is raising a error while returning
def documents_to_features(df):
nlp = spacy.load("en_core_web_sm"
#nlp = en_core_web_lg.load()
features = df.filtered_words.apply(lambda x: nlp(' '.join(x)).vector).values
-->return np.stack(features, axis=0)
ERROR
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-12-2e8bc5e83009> in <module>()
----> 1 d_features = documents_to_features(df)
2 print("Document features generated.")
3 t_features = df_to_t_features(df)
4 print("Time features generated.")
5 combined_features = np.concatenate((d_features, t_features), axis=1)
1 frames
<ipython-input-9-b772a7744232> in documents_to_features(df)
3 #nlp = en_core_web_lg.load()
4 features = df.filtered_words.apply(lambda x: nlp(' '.join(x)).vector).values
----> 5 return np.stack(features, axis=0)
<__array_function__ internals> in stack(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/numpy/core/shape_base.py in stack(arrays, axis, out)
425 shapes = {arr.shape for arr in arrays}
426 if len(shapes) != 1:
--> 427 raise ValueError('all input arrays must have the same shape')
428
429 result_ndim = arrays[0].ndim + 1
ValueError: all input arrays must have the same shape

Tensorflow: convert PrefetchDataset to BatchDataset

Tensorflow: convert PrefetchDataset to BatchDataset
With latest Tensorflow version 2.3.1I am trying to follow basic text classification example at: https://www.tensorflow.org/tutorials/keras/text_classification. Instead of creating dataset from directory as example does, I am using a csv file:
SELECT_COLUMNS = ['SentimentText','Sentiment']
LABEL_COLUMN = 'Sentiment'
LABELS = [0, 1]
def get_dataset(file_path, **kwargs):
dataset = tf.data.experimental.make_csv_dataset(
file_path,
batch_size=3, # Artificially small to make examples easier to show.
label_name=LABEL_COLUMN,
na_value="?",
num_epochs=1,
ignore_errors=True,
**kwargs)
return dataset
all_data = get_dataset(data_path, select_columns=SELECT_COLUMNS)
As a result I get:
type(all_data)
tensorflow.python.data.ops.dataset_ops.PrefetchDataset
Example loads data from directory with:
batch_size = 32
seed = 42
raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory(
'aclImdb/train',
batch_size=batch_size,
validation_split=0.2,
subset='training',
seed=seed)
And gets dataset of another type:
type(raw_train_ds)
tensorflow.python.data.ops.dataset_ops.BatchDataset
Now when I try to standardise and vectorise data with functions from example:
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
stripped_html = tf.strings.regex_replace(lowercase, '<br />', ' ')
return tf.strings.regex_replace(stripped_html,
'[%s]' % re.escape(string.punctuation),
'')
max_features = 10000
sequence_length = 250
vectorize_layer = TextVectorization(
standardize=custom_standardization,
max_tokens=max_features,
output_mode='int',
output_sequence_length=sequence_length)
And apply them to my dataset I get error:
# Make a text-only dataset (without labels), then call adapt
train_text = all_data.map(lambda x, y: x)
vectorize_layer.adapt(train_text)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-20-1f1fc445912d> in <module>
1 # Make a text-only dataset (without labels), then call adapt
2 train_text = all_data.map(lambda x, y: x)
----> 3 vectorize_layer.adapt(train_text)
/opt/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/layers/preprocessing/text_vectorization.py in adapt(self, data, reset_state)
378 shape = dataset_ops.get_legacy_output_shapes(data)
379 if not isinstance(shape, tensor_shape.TensorShape):
--> 380 raise ValueError("The dataset passed to 'adapt' must contain a single "
381 "tensor value.")
382 if shape.rank == 0:
ValueError: The dataset passed to 'adapt' must contain a single tensor value.
How to convert PrefetchDataset to BatchDataset ?
You could use tf.stack method to pack the features into a single array. The below function is from Custom training: walkthrough in Tensorflow.
def pack_features_vector(features, labels):
features = tf.stack(list(features.values()), axis=1)
return features, labels
all_data = get_dataset(data_path, select_columns=SELECT_COLUMNS)
train_dataset = all_data.map(pack_features_vector)
train_text = train_dataset.map(lambda x, y: x)
vectorize_layer.adapt(train_text)

Estimator doesn't work even if input is right

I'm adapting a script.py to achieve transfer learning. I find many script to retrain a model by TFRecord files, but none of them worked for me bacause of something about TF2.0 and contrib, so I'm trying to convert a script to adapt to TF2 and to my model.
This is my script at the moment:
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
EPOCHS = 1
# Data preprocessing
import pathlib
#data_dir = tf.keras.utils.get_file(origin="/home/pi/venv/raccoon_dataset/", fname="raccoons_dataset")
#data_dir = pathlib.Path(data_dir)
data_dir = "/home/pi/.keras/datasets/ssd_mobilenet_v1_coco_2018_01_28/saved_model/saved_model.pb"
######################
# Read the TFRecords #
######################
def imgs_input_fn(filenames, perform_shuffle=False, repeat_count=1, batch_size=1):
def _parse_function(serialized):
features = \
{
'image': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64)
}
# Parse the serialized data so we get a dict with our data.
parsed_example = tf.io.parse_single_example(serialized=serialized,
features=features)
print("\nParsed example:\n", parsed_example, "\nEnd of parsed example:\n")
# Get the image as raw bytes.
image_shape = tf.stack([300, 300, 3])
image_raw = parsed_example['image']
label = tf.cast(parsed_example['label'], tf.float32)
# Decode the raw bytes so it becomes a tensor with type.
image = tf.io.decode_raw(image_raw, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, image_shape)
#image = tf.subtract(image, 116.779) # Zero-center by mean pixel
#image = tf.reverse(image, axis=[2]) # 'RGB'->'BGR'
d = dict(zip(["image"], [image])), [label]
return d
dataset = tf.data.TFRecordDataset(filenames=filenames)
# Parse the serialized data in the TFRecords files.
# This returns TensorFlow tensors for the image and labels.
#print("\nDataset before parsing:\n",dataset,"\n")
dataset = dataset.map(_parse_function)
#print("\nDataset after parsing:\n",dataset,"\n")
if perform_shuffle:
# Randomizes input using a window of 256 elements (read into memory)
dataset = dataset.shuffle(buffer_size=256)
dataset = dataset.repeat(repeat_count) # Repeats dataset this # times
dataset = dataset.batch(batch_size) # Batch size to use
print("\nDataset batched:\n", dataset, "\nEnd dataset\n")
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
print("\nIterator shape:\n", tf.compat.v1.data.get_output_shapes(iterator),"\nEnd\n")
#print("\nIterator:\n",iterator.get_next(),"\nEnd Iterator\n")
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
raw_train = tf.compat.v1.estimator.TrainSpec(input_fn=imgs_input_fn(
"/home/pi/venv/raccoon_dataset/data/train.record",
perform_shuffle=True,
repeat_count=5,
batch_size=20),
max_steps=1)
and this is the resulting screen:
Parsed example:
{'image': <tf.Tensor 'ParseSingleExample/ParseSingleExample:0' shape=() dtype=string>, 'label': <tf.Tensor 'ParseSingleExample/ParseSingleExample:1' shape=() dtype=int64>}
End of parsed example:
Dataset batched:
<BatchDataset shapes: ({image: (None, 300, 300, 3)}, (None, 1)), types: ({image: tf.float32}, tf.float32)>
End dataset
Iterator shape:
({'image': TensorShape([None, 300, 300, 3])}, TensorShape([None, 1]))
End
2019-11-20 14:01:14.493817: W tensorflow/core/framework/op_kernel.cc:1622] OP_REQUIRES failed at example_parsing_ops.cc:240 : Invalid argument: Feature: image (data type: string) is required but could not be found.
2019-11-20 14:01:14.495019: W tensorflow/core/framework/op_kernel.cc:1622] OP_REQUIRES failed at iterator_ops.cc:929 : Invalid argument: {{function_node __inference_Dataset_map__parse_function_27}} Feature: image (data type: string) is required but could not be found.
[[{{node ParseSingleExample/ParseSingleExample}}]]
Traceback (most recent call last):
File "transfer_learning.py", line 127, in <module>
batch_size=20),
File "transfer_learning.py", line 107, in imgs_input_fn
batch_features, batch_labels = iterator.get_next()
File "/home/pi/venv/lib/python3.7/site-packages/tensorflow_core/python/data/ops/iterator_ops.py", line 737, in get_next
return self._next_internal()
File "/home/pi/venv/lib/python3.7/site-packages/tensorflow_core/python/data/ops/iterator_ops.py", line 651, in _next_internal
output_shapes=self._flat_output_shapes)
File "/home/pi/venv/lib/python3.7/site-packages/tensorflow_core/python/ops/gen_dataset_ops.py", line 2673, in iterator_get_next_sync
_six.raise_from(_core._status_to_exception(e.code, message), None)
File "<string>", line 3, in raise_from
tensorflow.python.framework.errors_impl.InvalidArgumentError: {{function_node __inference_Dataset_map__parse_function_27}} Feature: image (data type: string) is required but could not be found.
[[{{node ParseSingleExample/ParseSingleExample}}]] [Op:IteratorGetNextSync]
I don't know what I'm doing wrong.

Use numpy arrays as arguments in odeint

I am trying to solve a system with differential equations using odeint. I have 4 txt files (that look like the picture below). I read them and I save them in numpy arrays (length:8000) (maby not with the most effective way, but anyway...). I want to pass these 4 arrays as arguments in my odeint and solve the system. For example, at every time step the odeint takes (one from the 8000) to solve the system, I want it to use a different value from these arrays. Is there any way to do it automatically without getting lost in for loops? I tried to do it like this (see code below) but I get the error:
if g2>0: ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
g2 supposed to be 1x1 size at every loop of odeint. So it has to be something with the way I use the 4 arrays (xdot,ydot,xdotdot,ydotdot).
I am new to python and I use python 2.7.12 on Ubuntu 16.04 LTS.
Thank you in advance.
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
added_mass_x = 0.03 # kg
added_mass_y = 0.04
mb = 0.3 # kg
m1 = mb-added_mass_x
m2 = mb-added_mass_y
l1 = 0.07 # m
l2 = 0.05 # m
J = 0.00050797 # kgm^2
Sa = 0.0110 # m^2
Cd = 2.44
Cl = 3.41
Kd = 0.000655 # kgm^2
r = 1000 # kg/m^3
c1 = 0.5*r*Sa*Cd
c2 = 0.5*r*Sa*Cl
c3 = 0.5*mb*(l1**2)
c4 = Kd/J
c5 = (1/(2*J))*(l1**2)*mb*l2
c6 = (1/(3*J))*(l1**3)*mb
theta_0 = 10*(np.pi/180) # rad
theta_A = 20*(np.pi/180) # rad
f = 2 # Hz
###################################################################
t = np.linspace(0,100,8000) # s
###################################################################
# Save data from txt files into numpy arrays
xdot_list = []
ydot_list = []
xdotdot_list = []
ydotdot_list = []
with open('xdot.txt', 'r') as filehandle:
filecontents = filehandle.readlines()
for line in filecontents:
current_place = line[:-1]
xdot_list.append(current_place)
xdot = np.array(xdot_list, dtype=np.float32)
with open('ydot.txt', 'r') as filehandle:
filecontents = filehandle.readlines()
for line in filecontents:
current_place = line[:-1]
ydot_list.append(current_place)
ydot = np.array(ydot_list, dtype=np.float32)
with open('xdotdot.txt', 'r') as filehandle:
filecontents = filehandle.readlines()
for line in filecontents:
current_place = line[:-1]
xdotdot_list.append(current_place)
xdotdot = np.array(xdotdot_list, dtype=np.float32)
with open('ydotdot.txt', 'r') as filehandle:
filecontents = filehandle.readlines()
for line in filecontents:
current_place = line[:-1]
ydotdot_list.append(current_place)
ydotdot = np.array(ydotdot_list, dtype=np.float32)
def inverse(k,t,xdot,ydot,xdotdot,ydotdot):
vcx_i = k[0]
vcy_i = k[1]
psi_i = k[2]
wz_i = k[3]
theta_i = k[4]
theta_deg_i = k[5]
# Subsystem 4
vcx_i = xdot*np.cos(psi_i)-ydot*np.sin(psi_i)
vcy_i = ydot*np.cos(psi_i)+xdot*np.sin(psi_i)
psidot_i = wz_i
vcxdot_i = xdotdot*np.cos(psi_i)-xdot*np.sin(psi_i)*psidot_i-ydotdot*np.sin(psi_i)-ydot*np.cos(psi_i)*psidot_i
vcydot_i = ydotdot*np.cos(psi_i)-ydot*np.sin(psi_i)*psidot_i+xdotdot*np.sin(psi_i)+xdot*np.cos(psi_i)*psidot_i
g1 = -(m1/c3)*vcxdot_i+(m2/c3)*vcy_i*wz_i-(c1/c3)*vcx_i*np.sqrt((vcx_i**2)+(vcy_i**2))+(c2/c3)*vcy_i*np.sqrt((vcx_i**2)+(vcy_i**2))*np.arctan2(vcy_i,vcx_i)
g2 = (m2/c3)*vcydot_i+(m1/c3)*vcx_i*wz_i+(c1/c3)*vcy_i*np.sqrt((vcx_i**2)+(vcy_i**2))+(c2/c3)*vcx_i*np.sqrt((vcx_i**2)+(vcy_i**2))*np.arctan2(vcy_i,vcx_i)
A = 12*np.sin(2*np.pi*f*t+np.pi) # eksiswsi tail_frequency apo simulink
if A>=0.1:
wzdot_i = ((m1-m2)/J)*vcx_i*vcy_i-c4*wz_i**2*np.sign(wz_i)-c5*g2-c6*np.sqrt((g1**2)+(g2**2))
elif A<-0.1:
wzdot_i = ((m1-m2)/J)*vcx_i*vcy_i-c4*wz_i**2*np.sign(wz_i)-c5*g2+c6*np.sqrt((g1**2)+(g2**2))
else:
wzdot_i = ((m1-m2)/J)*vcx_i*vcy_i-c4*wz_i**2*np.sign(wz_i)-c5*g2
# Subsystem 5
if g2>0:
theta_i = np.arctan2(g1,g2)
elif g2<0 and g1>=0:
theta_i = np.arctan2(g1,g2)-np.pi
elif g2<0 and g1<0:
theta_i = np.arctan2(g1,g2)+np.pi
elif g2==0 and g1>0:
theta_i = -np.pi/2
elif g2==0 and g1<0:
theta_i = np.pi/2
elif g1==0 and g2==0:
theta_i = 0
theta_deg_i = (theta_i*180)/np.pi
return [vcxdot_i, vcydot_i, psidot_i, wzdot_i, theta_i, theta_deg_i]
# initial conditions
vcx_i_0 = 0.1257
vcy_i_0 = 0
psi_i_0 = 0
wz_i_0 = 0
theta_i_0 = 0
theta_deg_i_0 = 0
#theta_i_0 = 0.1745
#theta_deg_i_0 = 9.866
k0 = [vcx_i_0, vcy_i_0, psi_i_0, wz_i_0, theta_i_0, theta_deg_i_0]
# epilysi systimatos diaforikwn
k = odeint(inverse, k0, t, args=(xdot,ydot,xdotdot,ydotdot), tfirst=False)
# apothikeysi apotelesmatwn
vcx_i = k[:,0]
vcy_i = k[:,1]
psi_i = k[:,2]
wz_i = k[:,3]
theta_i = k[:,4]
theta_deg_i = k[:,5]
# Epanalipsi tu Subsystem 5 gia na mporun na plotaristun ta theta_i, theta_deg_i
theta_i = [inverse(k_i, t_i)[4] for t_i, k_i in zip(t, k)]
theta_deg_i = [inverse(k_i, t_i)[5] for t_i, k_i in zip(t, k)]
# Ypologismos mesis gwnias theta kai platus talantwsis
mesi_gwnia = sum(theta_i)/len(theta_i) # rad
platos = (max(theta_i)-min(theta_i))/2
UPDATE:
The most relevant solution I found so far is this:
Solving a system of odes (with changing constant!) using scipy.integrate.odeint?
But since I have only values of my variables in arrays and not the equation of the variables that depend on time (e.g. xdot=f(t)), I tried to aply an interpolation between the values in my arrays, as shown here: ODEINT with multiple parameters (time-dependent)
I managed to make the code running without errors, but the total time increased dramatically and the results of the system solved are completely wrong. I tried any possible type of interpolation that I found here: https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html but still wring outcome. That means that my interpolation isn't the best possible, or my points in the arrays (8000 values) are too much to interpolate between them and solve the system correctly.

TypeError: ufunc 'add' did not contain a loop

I use Anaconda and gdsCAD and get an error when all packages are installed correctly.
Like explained here: http://pythonhosted.org/gdsCAD/
TypeError: ufunc 'add' did not contain a loop with signature matching types dtype('S32') dtype('S32') dtype('S32')
My imports look like this (In the end I imported everything):
import numpy as np
from gdsCAD import *
import matplotlib.pyplot as plt
My example code looks like this:
something = core.Elements()
box=shapes.Box( (5,5),(1,5),0.5)
core.default_layer = 1
core.default_colors = 2
something.add(box)
something.show()
My error message looks like this:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-2f90b960c1c1> in <module>()
31 puffer_wafer = shapes.Circle((0.,0.), puffer_wafer_radius, puffer_line_thickness)
32 bp.add(puffer_wafer)
---> 33 bp.show()
34 wafer = shapes.Circle((0.,0.), wafer_radius, wafer_line_thickness)
35 bp.add(wafer)
C:\Users\rpilz\AppData\Local\Continuum\Anaconda2\lib\site-packages\gdscad-0.4.5-py2.7.egg\gdsCAD\core.pyc in _show(self)
80 ax.margins(0.1)
81
---> 82 artists=self.artist()
83 for a in artists:
84 a.set_transform(a.get_transform() + ax.transData)
C:\Users\rpilz\AppData\Local\Continuum\Anaconda2\lib\site-packages\gdscad-0.4.5-py2.7.egg\gdsCAD\core.pyc in artist(self, color)
952 art=[]
953 for p in self:
--> 954 art+=p.artist()
955 return art
956
C:\Users\rpilz\AppData\Local\Continuum\Anaconda2\lib\site-packages\gdscad-0.4.5-py2.7.egg\gdsCAD\core.pyc in artist(self, color)
475 poly = lines.buffer(self.width/2.)
476
--> 477 return [descartes.PolygonPatch(poly, lw=0, **self._layer_properties(self.layer))]
478
479
C:\Users\rpilz\AppData\Local\Continuum\Anaconda2\lib\site-packages\gdscad-0.4.5-py2.7.egg\gdsCAD\core.pyc in _layer_properties(layer)
103 # Default colors from previous versions
104 colors = ['k', 'r', 'g', 'b', 'c', 'm', 'y']
--> 105 colors += matplotlib.cm.gist_ncar(np.linspace(0.98, 0, 15))
106 color = colors[layer % len(colors)]
107 return {'color': color}
TypeError: ufunc 'add' did not contain a loop with signature matching types dtype('S32') dtype('S32') dtype('S32')
The gdsCAD has been a pain from shapely install to this plotting issue.
This issue is because of wrong datatype being passed to colors function. It can be solved by editing the following line in core.py
colors += matplotlib.cm.gist_ncar(np.linspace(0.98, 0, 15))
to
colors += list(matplotlib.cm.gist_ncar(np.linspace(0.98, 0, 15)))
If you dont know where the core.py is located. Just type in:
from gdsCAD import *
core
This will give you the path of core.py file. Good luck !
Well first, I'd ask that you please include actual code, as the 'example code' in the file is obviously different based on the traceback. When debugging, the details matter, and I need to be able to actually run the code.
You obviously have a data type problem. Chances are pretty good it's in the variables here:
puffer_wafer = shapes.Circle((0.,0.), puffer_wafer_radius, puffer_line_thickness)
I had the same error thrown when I was running a call to Pandas. I changed the data to str(data) and the code worked.
I don't know if this helps I am fairly new to this myself, but I had a similar error and found that it is due to a type casting issue as suggested by previous answer. I can't see from the example in the question exactly what you are trying to do. Below is a small example of my issue and solution. My code is making a simple Random Forest model using scikit learn.
Here is an example that will give the error and it is caused by the third to last line, concatenating the results to write to file.
import scipy
import math
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn import preprocessing, metrics, cross_validation
Data = pd.read_csv("Free_Energy_exp.csv", sep=",")
Data = Data.fillna(Data.mean()) # replace the NA values with the mean of the descriptor
header = Data.columns.values # Ues the column headers as the descriptor labels
Data.head()
test_name = "Test.csv"
npArray = np.array(Data)
print header.shape
npheader = np.array(header[1:-1])
print("Array shape X = %d, Y = %d " % (npArray.shape))
datax, datay = npArray.shape
names = npArray[:,0]
X = npArray[:,1:-1].astype(float)
y = npArray[:,-1] .astype(float)
X = preprocessing.scale(X)
XTrain, XTest, yTrain, yTest = cross_validation.train_test_split(X,y, random_state=0)
# Predictions results initialised
RFpredictions = []
RF = RandomForestRegressor(n_estimators = 10, max_features = 5, max_depth = 5, random_state=0)
RF.fit(XTrain, yTrain) # Train the model
print("Training R2 = %5.2f" % RF.score(XTrain,yTrain))
RFpreds = RF.predict(XTest)
with open(test_name,'a') as fpred :
lenpredictions = len(RFpreds)
lentrue = yTest.shape[0]
if lenpredictions == lentrue :
fpred.write("Names/Label,, Prediction Random Forest,, True Value,\n")
for i in range(0,lenpredictions) :
fpred.write(RFpreds[i]+",,"+yTest[i]+",\n")
else :
print "ERROR - names, prediction and true value array size mismatch."
This leads to an error of;
Traceback (most recent call last):
File "min_example.py", line 40, in <module>
fpred.write(RFpreds[i]+",,"+yTest[i]+",\n")
TypeError: ufunc 'add' did not contain a loop with signature matching types dtype('S32') dtype('S32') dtype('S32')
The solution is to make each variable a str() type on the third to last line then write to file. No other changes to then code have been made from the above.
import scipy
import math
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn import preprocessing, metrics, cross_validation
Data = pd.read_csv("Free_Energy_exp.csv", sep=",")
Data = Data.fillna(Data.mean()) # replace the NA values with the mean of the descriptor
header = Data.columns.values # Ues the column headers as the descriptor labels
Data.head()
test_name = "Test.csv"
npArray = np.array(Data)
print header.shape
npheader = np.array(header[1:-1])
print("Array shape X = %d, Y = %d " % (npArray.shape))
datax, datay = npArray.shape
names = npArray[:,0]
X = npArray[:,1:-1].astype(float)
y = npArray[:,-1] .astype(float)
X = preprocessing.scale(X)
XTrain, XTest, yTrain, yTest = cross_validation.train_test_split(X,y, random_state=0)
# Predictions results initialised
RFpredictions = []
RF = RandomForestRegressor(n_estimators = 10, max_features = 5, max_depth = 5, random_state=0)
RF.fit(XTrain, yTrain) # Train the model
print("Training R2 = %5.2f" % RF.score(XTrain,yTrain))
RFpreds = RF.predict(XTest)
with open(test_name,'a') as fpred :
lenpredictions = len(RFpreds)
lentrue = yTest.shape[0]
if lenpredictions == lentrue :
fpred.write("Names/Label,, Prediction Random Forest,, True Value,\n")
for i in range(0,lenpredictions) :
fpred.write(str(RFpreds[i])+",,"+str(yTest[i])+",\n")
else :
print "ERROR - names, prediction and true value array size mismatch."
These examples are from a larger code so I hope the examples are clear enough.

Resources