This is what I have so far:
m_dest_set_sel = maya.api.OpenMaya.MSelectionList()
m_dest_set_sel.add('lambert4SG')
m_dest_set_obj = m_dest_set_sel.getDependNode(0)
m_dest_set = maya.api.OpenMaya.MFnSet(m_dest_set_obj)
ss = maya.api.OpenMaya.MSelectionList()
ss.add('pCube3.f[2]')
m_dest_set.addMembers(ss)
And it gives me this error:
# Error: Cannot add the following items to the set since they would break the exclusivity constraint: pCube3.f[2] #
# Error: RuntimeError: file <maya console> line 1: (kFailure): Unexpected Internal Failure #
i tried removing it from the "initialShadingGroup" to no avail:
m_dest_set_sel = maya.api.OpenMaya.MSelectionList()
m_dest_set_sel.add('initialShadingGroup')
m_dest_set_obj = m_dest_set_sel.getDependNode(0)
m_dest_set = maya.api.OpenMaya.MFnSet(m_dest_set_obj)
ss = maya.api.OpenMaya.MSelectionList()
ss.add('pCube3.f[2]')
m_dest_set.removeMembers(ss)
# Error: RuntimeError: file <maya console> line 8: (kInvalidParameter): Cannot find item of required type #
This works for me:
import maya.api.OpenMaya as om2
m_dest_set_sel = om2.MSelectionList()
m_dest_set_sel.add('initialShadingGroup')
m_dest_set_obj = m_dest_set_sel.getDependNode(0)
m_dest_set = om2.MFnSet(m_dest_set_obj)
ss = om2.MSelectionList()
ss.add('pCubeShape1')
m_dest_set.removeMembers(ss)
m_dest_set_sel = om2.MSelectionList()
m_dest_set_sel.add('lambert2SG')
m_dest_set_obj = m_dest_set_sel.getDependNode(0)
m_dest_set = om2.MFnSet(m_dest_set_obj)
ss = om2.MSelectionList()
ss.add('pCube1.f[2]')
m_dest_set.addMembers(ss)
Related
This may be a simple answer, but currently making a neural network using keras and I ran into this problem through this code
\`EPOCHS = 50
callbacks = \[
tf.keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='min', min_delta=0.0001),
tf.keras.callbacks.ModelCheckpoint(
'weights.tf', monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True),
tf.keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=0, patience=15, verbose=1, restore_best_weights=True)
\]
history = model.fit(
train_ds,
validation_data=val_ds,
verbose=1,
callbacks=callbacks,
epochs=EPOCHS,
)
model.load_weights('weights.tf')
model.evaluate(val_ds)\`
Output:
`Epoch 1/50
NotFoundError Traceback (most recent call last)
\<ipython-input-15-265d39d703c7\> in \<module\>
10 \]
11
\---\> 12 history = model.fit(
13 train_ds,
14 validation_data=val_ds,
1 frames
/usr/local/lib/python3.8/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
52 try:
53 ctx.ensure_initialized()
\---\> 54 tensors = pywrap_tfe.TFE_Py_Execute(ctx.\_handle, device_name, op_name,
55 inputs, attrs, num_outputs)
56 except core.\_NotOkStatusException as e:
NotFoundError: Graph execution error:
train/60377.jpg; No such file or directory
\[\[{{node ReadFile}}\]\]
\[\[IteratorGetNext\]\] \[Op:\__inference_train_function_9137\]
`
Here's my data:
FairFace Dataset from Kaggle
Here's how I preprocessed (through code I borrowed) the images from the FairFace dataset.
\`IMG_SIZE = 224
AUTOTUNE = tf.data.AUTOTUNE
BATCH_SIZE = 224
NUM_CLASSES = len(labels_map)
# Dataset creation
y_train = tf.keras.utils.to_categorical(train.race, num_classes=NUM_CLASSES, dtype='float32')
y_val = tf.keras.utils.to_categorical(val.race, num_classes=NUM_CLASSES, dtype='float32')
train_ds = tf.data.Dataset.from_tensor_slices((train.file, y_train)).shuffle(len(y_train))
val_ds = tf.data.Dataset.from_tensor_slices((val.file, y_val))
assert len(train_ds) == len(train.file) == len(train.race)
assert len(val_ds) == len(val.file) == len(val.race)
# Read files
def map_fn(path, label):
image = tf.io.decode_jpeg(tf.io.read_file(path))
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
return image, label
# Read files
train_ds = train_ds.map(lambda path, lbl: (tf.io.decode_jpeg(tf.io.read_file(path)), lbl), num_parallel_calls=AUTOTUNE)
val_ds = val_ds.map(lambda path, lbl: (tf.io.decode_jpeg(tf.io.read_file(path)), lbl), num_parallel_calls=AUTOTUNE)
# Batch and resize after batch, then prefetch
train_ds = val_ds.map(lambda imgs, lbls: (tf.image.resize(imgs, (IMG_SIZE, IMG_SIZE)), lbls), num_parallel_calls=AUTOTUNE)
val_ds = val_ds.map(lambda imgs, lbls: (tf.image.resize(imgs, (IMG_SIZE, IMG_SIZE)), lbls), num_parallel_calls=AUTOTUNE)
train_ds = train_ds.batch(BATCH_SIZE)
val_ds = val_ds.batch(BATCH_SIZE)
# Performance enchancement - cache, batch, prefetch
train_ds = train_ds.prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.prefetch(buffer_size=AUTOTUNE)\`
I tried changing the jpg file name but to no avail.
I'm using HDBSCAN clustering algorithm and using RandomizedSearchCV. When I fit the features with labels, I get error "IndexError: too many indices for array: array is 1-dimensional, but 2 were indexed". Shape of embedding is (5000,4) and of hdb_labels is (5000,). Below is my code
# UMAP
umap_hdb = umap.UMAP(n_components=4, random_state = 42)
embedding = umap_hdb.fit_transform(customer_data_hdb)
# creating HDBSCAN wrapper
class HDBSCANWrapper(hdbscan.HDBSCAN):
def predict(self,X):
return self.labels_.astype(int)
# HBDSCAN
clusterer_hdb = HDBSCANWrapper(min_samples=40, min_cluster_size=1000, metric='manhattan', gen_min_span_tree=True).fit(embedding)
hdb_labels = clusterer_hdb.labels_
# specify parameters and distributions to sample from
param_dist = {'min_samples': [10,30,50,60,100,150],
'min_cluster_size':[100,200,300,400,500],
'cluster_selection_method' : ['eom','leaf'],
'metric' : ['euclidean','manhattan']
}
# validity_scroer
validity_scorer = make_scorer(hdbscan.validity.validity_index,greater_is_better=True)
n_iter_search = 20
random_search = RandomizedSearchCV(clusterer_hdb
,param_distributions=param_dist
,n_iter=n_iter_search
,scoring=validity_scorer
,random_state=42)
random_search.fit(embedding, hdb_labels)
I'm getting an error in the random_search.fit and could not get rid of it. Any suggestions/help would be appreciated.
I am trying to run a fairly simple MCMC sample for some time series data. I believe I am including all the required args, but I'm still getting an error.
The library versions:
tensorflow==1.14.0
tensorflow-proability==0.7.0
I tried rolling back to tfp 0.6.0 and got a matmul error. I tried pushing forward to tf nightly and got the same error as below.
The code
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tf.enable_eager_execution()
rate_prior = tfd.Exponential(1./mean_users)
users_before = [...] # bogus positive ints
users_after = [...] # bogus positive ints
def unnormalized_log_prob(rate_before, rate_after):
users_before_prior = tfd.Poisson(rate_before)
users_after_prior = tfd.Poisson(rate_after)
return (rate_prior.log_prob(rate_before)
+ rate_prior.log_prob(rate_after)
+ tf.reduce_sum(users_before_prior.log_prob(users_before))
+ tf.reduce_sum(users_after_prior.log_prob(users_after))
)
bijectors = [tfp.bijectors.Exp, tfp.bijectors.Exp]
hmc = tfp.mcmc.TransformedTransitionKernel(
tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_log_prob,
step_size=10.,
num_leapfrog_steps=3,
),
bijectors
)
states = tfp.mcmc.sample_chain(
num_results=10000,
current_state=[tf.ones(2) * mean_users],
kernel=hmc,
trace_fn=None
)
This returns an error.
.../tensorflow_probability/python/mcmc/transformed_kernel.py in <listcomp>(.0)
71 def fn(state_parts):
72 return [b.inverse(sp)
---> 73 for b, sp in zip(bijector, state_parts)]
74 return fn
75
TypeError: inverse() missing 1 required positional argument: 'y'`
Try
bijectors = [tfp.bijectors.Exp(), tfp.bijectors.Exp()]]
I get "IndexError: list is out of range" when I input this code. Also, the retmax is set at 614 because that's the total number of results when I make the request. Is there a way to make the retmode equal to the number of results using a variable that changes depending on the search results?
#!/usr/bin/env python
from Bio import Entrez
Entrez.email = "something#gmail.com"
handle1 = Entrez.esearch(db = "nucleotide", term = "dengue full genome", retmax = 614)
record = Entrez.read(handle1)
IdNums = [int(i) for i in record['IdList']]
while i >= 0 and i <= len(IdNums):
handle2 = Entrez.esearch(db = "nucleotide", id = IdNums[i], type = "gb", retmode = "text")
record = Entrez.read(handle2)
print(record)
i += 1
Rather than using a while loop, you can use a for loop...
from Bio import Entrez
Entrez.email = 'youremailaddress'
handle1 = Entrez.esearch(db = 'nucleotide', term = 'dengue full genome', retmax = 614)
record = Entrez.read(handle1)
IdNums = [int(i) for i in record['IdList']]
for i in IdNums:
print(i)
handle2 = Entrez.esearch(db = 'nucleotide', term = 'dengue full genome', id = i, rettype = 'gb', retmode = 'text')
record = Entrez.read(handle2)
print(record)
I ran it on my computer and it seems to work. The for loop solved the out of bounds, and adding the term to handle2 solved the calling error.
In Waf how can I create multiple custom tasks, that can run parallel (with --jobs=JOBS)?
Sources = ["C:\\src1.c", "C:\\Mod1\src2.c", ... 30pcs] # one per call
Incl_Paths = ["Mod1". "Mod1"] # list all of them in all call
INCL_ST = "-I%s" # how to format an include path in an argument
Ext_out = "_loc" # output file extension
The goal:
C:\\LOC.exe -IMod1 -IMod2 C:\\src1.c > build\\src1.c_loc //or better src1_loc
C:\\LOC.exe -IMod1 -IMod2 C:\\Mod1\src2.c > build\\src2.c_loc //or better src2_loc
...
I couldn't get it work
def build(bld):
for i in Sources:
bld.new_task_gen(
source = i,
rule='C:\\LOC.exe ${INCL_ST:Incl_Paths} ${SRC} > ' + i + Ext_out,
)
Also I couldn't extract the exe
# find_program(self, filename, path_list=[], var=None, environ=None, exts=''):
cfg.find_program("C:\\LOC.exe", var='LOC')
To change from:
rule='C:\\LOC.exe ...'
To:
rule='${LOC} ...'
Something like this should work with waf 1.7:
from waflib.Task import Task
from waflib.TaskGen import extension
Ext_out = "_loc" # output file extension
def configure(conf):
# loc.exe must be in the system path for this to work
conf.find_program(
'loc',
var = "LOC",
)
conf.env.Incl_Paths = ["Mod1", "Mod1"]
conf.env.INCL_ST = "-I%s"
#extension('.c')
def process_loc(self, node):
out_node = node.change_ext(Ext_out)
tsk = self.create_task('loc')
tsk.set_inputs(node)
tsk.set_outputs(out_node)
class loc_task(Task):
ext_in = ['.c']
ext_out = ['_loc']
run_str = "${LOC} ${INCL_ST:Incl_Paths} ${SRC} > ${TGT}"
def build(bld):
bld(source = ["src1.c", "src2.c"])
Well it works for me on linux faking loc ...