I'm trying to create a class and set up some variables in the init portion that will call certain indexes in the array when the class is called.
Howeever, my index method does not seem to be working correctly in this context as I get this error
IndexError Traceback (most recent call last)
Input In [82], in <cell line: 17>()
14 def index(self, n = 0):
15 return (len(self.array) - n).astype(int)
---> 17 something = Do_something()
Input In [82], in Do_something.__init__(self, a_number)
6 self.number = a_number
7 self.array = np.zeros((1,3), dtype = float)
----> 9 self.a = self.array[self.index,0]
10 self.b = self.array[self.index,1]
11 self.c = self.array[self.index,2]
IndexError: only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices
Here is the script
import numpy as np
class Do_something:
def __init__(self, a_number = 0):
self.number = a_number
self.array = np.zeros((1,3), dtype = float)
self.a = self.array[self.index,0]
self.b = self.array[self.index,1]
self.c = self.array[self.index,2]
#return a negative indwex of n
def index(self, n = 0):
return (len(self.array) - n).astype(int)
something = Do_something()
What am I not doing correcly?
EDIT
Fixed it by adding the parenthesesto the self.index and adding n + 1 to the return of the index function.
import numpy as np
class Do_something:
def __init__(self, a_number = 0):
self.number = a_number
self.array = np.array([[1,2,3],[4,5,6], [7,8,9]])
self.a = self.array[self.index(),0]
self.b = self.array[self.index(),1]
self.c = self.array[self.index(),2]
#return a negative indwex of n
def index(self, n = 2):
return (len(self.array) - (n + 1))
something = Do_something()
print("length is " + str(len(something.array)))
# print(len(something.array) - (0-1))
print(something.a)
print(something.b)
print(something.index())
Thanks :)
Related
I modified Distiller (https://github.com/IntelLabs/distiller) to emulate in-memory computing circuit, especially added a convolution layer quantization during QAT. However, accuracy drops over 60% (90% > 30%) even with 32b quantization for sanity check. I also want to say that it was perfectly fine when I just add a calculated quantization noise. Below is the code.
import torch
import torch.nn as nn
import math
__all__ = ['preact_resnet20_cifar', 'preact_resnet32_cifar', 'preact_resnet44_cifar', 'preact_resnet56_cifar',
'preact_resnet110_cifar', 'preact_resnet20_cifar_conv_ds', 'preact_resnet32_cifar_conv_ds',
'preact_resnet44_cifar_conv_ds', 'preact_resnet56_cifar_conv_ds', 'preact_resnet110_cifar_conv_ds']
NUM_CLASSES = 10
device = torch.device("cuda")
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def quantize(tensor, n_bits=32, dim=144, clip_ratio=1):
max_value = dim*clip_ratio
min_value = -dim*clip_ratio
delta = max_value - min_value
y = tensor.clone()
y = torch.clamp(y, min = min_value, max = max_value)
lsb = delta / (2**n_bits)
y = (y // lsb)*lsb
return y
class PreactBasicBlock(nn.Module):
expansion = 1
def __init__(self, block_gates, inplanes, planes, stride=1, downsample=None, preact_downsample=True, resolution = 32):
super(PreactBasicBlock, self).__init__()
self.block_gates = block_gates
self.pre_bn = nn.BatchNorm2d(inplanes)
self.pre_relu = nn.ReLU(inplace=False) # To enable layer removal inplace must be False
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=False)
self.conv2 = conv3x3(planes, planes)
self.downsample = downsample
self.stride = stride
self.preact_downsample = preact_downsample
self.resolution = resolution
def forward(self, x):
need_preact = self.block_gates[0] or self.block_gates[1] or self.downsample and self.preact_downsample # add pre-activation for block_gates 0 and 1 and when is downsampled
if need_preact: # x > bn > relu > out
preact = self.pre_bn(x)
preact = self.pre_relu(preact)
out = preact
else: # x > out
preact = out = x
if self.block_gates[0]: # (preact) > conv > bn > relu
out = self.conv1(out)
dim = self.conv1.in_channels * self.conv1.kernel_size[0] * self.conv1.kernel_size[1]
out = quantize(out, n_bits=self.resolution, dim=dim, clip_ratio=1)
out = self.bn(out)
out = self.relu(out)
if self.block_gates[1]: # (preact)> conv
out = self.conv2(out)
dim = self.conv2.in_channels * self.conv2.kernel_size[0] * self.conv2.kernel_size[1]
out = quantize(out, n_bits=self.resolution, dim=dim, clip_ratio=1)
if self.downsample is not None:
if self.preact_downsample:
residual = self.downsample(preact)
else:
residual = self.downsample(x)
else:
residual = x
out += residual
return out
class PreactResNetCifar(nn.Module):
def __init__(self, block, layers, num_classes=NUM_CLASSES, conv_downsample=False):
self.nlayers = 0
# Each layer manages its own gates
self.layer_gates = []
for layer in range(3):
# For each of the 3 layers, create block gates: each block has two layers
self.layer_gates.append([]) # [True, True] * layers[layer])
for blk in range(layers[layer]):
self.layer_gates[layer].append([True, True])
self.inplanes = 16 # 64
super(PreactResNetCifar, self).__init__()
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(self.layer_gates[0], block, 16, layers[0],
conv_downsample=conv_downsample)
self.layer2 = self._make_layer(self.layer_gates[1], block, 32, layers[1], stride=2,
conv_downsample=conv_downsample)
self.layer3 = self._make_layer(self.layer_gates[2], block, 64, layers[2], stride=2,
conv_downsample=conv_downsample)
self.final_bn = nn.BatchNorm2d(64 * block.expansion)
self.final_relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, layer_gates, block, planes, blocks, stride=1, conv_downsample=False):
downsample = None
outplanes = planes * block.expansion
if stride != 1 or self.inplanes != outplanes:
if conv_downsample:
downsample = nn.Conv2d(self.inplanes, outplanes,
kernel_size=1, stride=stride, bias=False)
else:
# Identity downsample uses strided average pooling + padding instead of convolution
pad_amount = int(self.inplanes / 2)
downsample = nn.Sequential(
nn.AvgPool2d(2),
nn.ConstantPad3d((0, 0, 0, 0, pad_amount, pad_amount), 0)
)
layers = []
layers.append(block(layer_gates[0], self.inplanes, planes, stride, downsample, conv_downsample))
self.inplanes = outplanes
for i in range(1, blocks):
layers.append(block(layer_gates[i], self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.final_bn(x)
x = self.final_relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def preact_resnet20_cifar(**kwargs):
model = PreactResNetCifar(PreactBasicBlock, [3, 3, 3], **kwargs)
return model
def preact_resnet32_cifar(**kwargs):
model = PreactResNetCifar(PreactBasicBlock, [5, 5, 5], **kwargs)
return model
def preact_resnet44_cifar(**kwargs):
model = PreactResNetCifar(PreactBasicBlock, [7, 7, 7], **kwargs)
return model
def preact_resnet56_cifar(**kwargs):
model = PreactResNetCifar(PreactBasicBlock, [9, 9, 9], **kwargs)
return model
def preact_resnet110_cifar(**kwargs):
model = PreactResNetCifar(PreactBasicBlock, [18, 18, 18], **kwargs)
return model
def preact_resnet182_cifar(**kwargs):
model = PreactResNetCifar(PreactBasicBlock, [30, 30, 30], **kwargs)
return model
def preact_resnet20_cifar_conv_ds(**kwargs):
return preact_resnet20_cifar(conv_downsample=True)
def preact_resnet32_cifar_conv_ds(**kwargs):
return preact_resnet32_cifar(conv_downsample=True)
def preact_resnet44_cifar_conv_ds(**kwargs):
return preact_resnet44_cifar(conv_downsample=True)
def preact_resnet56_cifar_conv_ds(**kwargs):
return preact_resnet56_cifar(conv_downsample=True)
def preact_resnet110_cifar_conv_ds(**kwargs):
return preact_resnet110_cifar(conv_downsample=True)
def preact_resnet182_cifar_conv_ds(**kwargs):
return preact_resnet182_cifar(conv_downsample=True)
I use distiller.example.classifier_compression.compress_classifier.py in Distiller, and use terminal with schedule file. Command is "python compress_classifier.py -a preact_resnet20_cifar --lr 0.1 -p 50 -b 128 -j 1 --epochs 200 --compress=../quantization/quant_aware_train/preact_resnet_cifar_dorefa.yaml --wd=0.0002 --vs=0 --gpus 0", for your reference.
I tried to add quantization noise, and accuracy was fine. But in my opinion it is not perfect emulation for quantization because data distribution is not uniform.
Here I am simply calling a 3rd party API to get the prices of stocks through multiprocessing. I am using this function multiple times as I want the timeframe of stocks as (5 min, 10 min, 30 min). But when I run it, it does not wait for the previous functions to finish and instead move on to the last function to complete it. How to run each and every function in order ?
import pickle
import pandas as pd
import datetime
import multiprocessing
import time
import subprocess,os
def historical_data(timeframe):
global prices
def split_dict_equally(input_dict, chunks=2):
"Splits dict by keys. Returns a list of dictionaries."
# prep with empty dicts
return_list = [dict() for idx in range(chunks)]
idx = 0
for k,v in input_dict.items():
return_list[idx][k] = v
if idx < chunks-1: # indexes start at 0
idx += 1
else:
idx = 0
return return_list
with open('zerodha_login.pkl', 'rb') as file:
# Call load method to deserialze
login_credentials = pickle.load(file)
with open('zerodha_instruments.pkl', 'rb') as file:
# Call load method to deserialze
inst_dict = pickle.load(file)
csv = pd.read_csv('D:\\Business\\Website\\Trendlines\\FO Stocks.csv')
csv['Stocks'] = csv['Stocks'].str.replace(' ','')
fo_stocks = csv['Stocks'].to_list()
inst = pd.DataFrame(inst_dict)
filtered_inst = inst.copy()
filtered_inst = inst[(inst['segment'] == 'NSE') & (inst['name'] != '') & (inst['tick_size'] == 0.05) ]
filtered_inst = filtered_inst[filtered_inst['tradingsymbol'].isin(fo_stocks)]
tickers_dict = dict(zip(filtered_inst['instrument_token'],filtered_inst['tradingsymbol']))
tickers_dict = dict(zip(filtered_inst['instrument_token'],filtered_inst['tradingsymbol']))
number_process = 16
tickers_dict_list = split_dict_equally(tickers_dict,number_process)
def prices(stock):
print('inside_function',os.getpid())
for x,y in stock.items():
print('inside_stock_loop')
while True:
try:
print('Timeframe::',timeframe,y)
data = login_credentials['kite'].historical_data(instrument_token=x, from_date=today_date - datetime.timedelta(days=1000),interval=str(timeframe),to_date=today_date )
df = pd.DataFrame(data)
g = [e for e in df.columns if 'Un' not in e]
df = df[g]
df['date'] = df['date'].astype(str)
df['date'] = df['date'].str.split('+')
df['Date'] = df['date'].str[0]
df = df[['Date','open','high','low','close','volume']]
df['Date'] = pd.to_datetime(df['Date'],format='%Y-%m-%d %H:%M:%S')
df['Time'] = df['Date'].dt.time
df['Date'] = df['Date'].dt.date
df.rename(columns={'open':'Open','high':'High','low':'Low','close':'Close','volume':'Volume'},inplace=True)
df.to_csv('D:\\Business\\Website\\Trendlines\\4th Cut\\Historical data\\'+str(timeframe)+'\\'+str(y)+'.csv')
break
except:
print('Issue ::',y)
pass
new_list = []
if __name__ == '__main__':
for process in tickers_dict_list:
p = multiprocessing.Process(target=prices, args=(process,))
p.start()
new_list.append(p)
for p in new_list:
print('joining_',p)
p.join()
historical_data('5minute')
historical_data('10minute')
I am pursuing a Deep Learning Course on GUVI wiz being managed by IIT Chennai known as PadhaiOneFourthLabs. I am currently in the Feed Forward Neural Networks now. This error is persistent and wouldn't go how much ever i try. The code that i implemented is:
class SigmoidNeuron():
def __init__(self):
self.w = None
self.b = None
def perceptron(self,e):
return (np.dot(e,self.w) + self.b)
def sigmoid(self,e):
return 1/(1+np.exp(-e))
def grad_w_mse(self,e,f):
f_cal = self.sigmoid(self.perceptron(e))
return (f_cal - f) * f_cal * (1 - f_cal) * e
def grad_b_mse(self,e,f):
f_cal = self.sigmoid(self.perceptron(e))
return (f_cal - f) * f_cal * (1 - f_cal)
def grad_w_ce(self,e,f):
f_cal = self.sigmoid(self.perceptron(e))
if f == 0:
return f_cal * e
elif f == 1:
return -1 * (1-f_cal) * e
else:
raise ValueError("f can only be 0 or 1")
def grad_b_ce(self,e,f):
if f == 0:
return f_cal
elif f == 1:
return -1 * (1-f_cal)
else:
raise ValueError('f can only be 1 or 0')
def fit(self,E,F,epochs= 1,learning_rate= 1, initialise= True, loss_func= "mse", display_loss = False):
if initialise:
self.w = None
self.b = None
if display_loss:
loss = {}
for i in tqdm_notebook(range(epochs), total= epochs, unit = "epoch"):
dw = 0
db = 0
for e,f in zip (E,F):
if loss_func == "mse":
dw += self.grad_w_mse(e,f)
db += self.grad_b_mse(e,f)
elif loss_func == "ce":
dw += self.grad_w_ce(e,f)
db += self.grad_b_ce(e,f)
m = E.shape[1]
self.w -= learning_rate * dw/m
self.b -= learning_rate * db/m
if display_loss:
F_cal = self.sigmoid(self.perceptron(E))
if loss_func == "mse":
loss[i] = mean_squared_error(F,F_cal)
elif loss_func =="ce":
loss[i] = log_loss(F,F_cal)
if display_loss:
plt.plot(list(loss.values()))
plt.xlabel('Epochs')
if loss_func == "mse":
plt.ylabel('Mean Squared Error')
elif loss_func == "ce":
plt.ylabel('Log Loss')
plt.show()
def predict(self,E):
F_cal = []
for e in E:
f_cal = self.sigmoid(self.perceptron(e))
F_cal.append(f_cal)
return np.array(F_cal)
The error i get is displayed as:
TypeError Traceback (most recent call last)
<ipython-input-30-bd02664ff1b8> in <module>()
1 S = SigmoidNeuron()
----> 2 S.fit(E_train,F_train,epochs = 1000, learning_rate = 0.5, display_loss = True)
2 frames
<ipython-input-23-891427be7d2d> in perceptron(self, e)
5
6 def perceptron(self,e):
----> 7 return (np.dot(e,self.w) + self.b)
8
9 def sigmoid(self,e):
<__array_function__ internals> in dot(*args, **kwargs)
TypeError: unsupported operand type(s) for *: 'float' and 'NoneType'
I have tried to change the code as much as i can, but no matter what, the error remains as it is.
Kindly help me out with this so that i may able to proceed further.
THANKS
I am trying to solve a system with differential equations using odeint. I have 4 txt files (that look like the picture below). I read them and I save them in numpy arrays (length:8000) (maby not with the most effective way, but anyway...). I want to pass these 4 arrays as arguments in my odeint and solve the system. For example, at every time step the odeint takes (one from the 8000) to solve the system, I want it to use a different value from these arrays. Is there any way to do it automatically without getting lost in for loops? I tried to do it like this (see code below) but I get the error:
if g2>0: ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
g2 supposed to be 1x1 size at every loop of odeint. So it has to be something with the way I use the 4 arrays (xdot,ydot,xdotdot,ydotdot).
I am new to python and I use python 2.7.12 on Ubuntu 16.04 LTS.
Thank you in advance.
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
added_mass_x = 0.03 # kg
added_mass_y = 0.04
mb = 0.3 # kg
m1 = mb-added_mass_x
m2 = mb-added_mass_y
l1 = 0.07 # m
l2 = 0.05 # m
J = 0.00050797 # kgm^2
Sa = 0.0110 # m^2
Cd = 2.44
Cl = 3.41
Kd = 0.000655 # kgm^2
r = 1000 # kg/m^3
c1 = 0.5*r*Sa*Cd
c2 = 0.5*r*Sa*Cl
c3 = 0.5*mb*(l1**2)
c4 = Kd/J
c5 = (1/(2*J))*(l1**2)*mb*l2
c6 = (1/(3*J))*(l1**3)*mb
theta_0 = 10*(np.pi/180) # rad
theta_A = 20*(np.pi/180) # rad
f = 2 # Hz
###################################################################
t = np.linspace(0,100,8000) # s
###################################################################
# Save data from txt files into numpy arrays
xdot_list = []
ydot_list = []
xdotdot_list = []
ydotdot_list = []
with open('xdot.txt', 'r') as filehandle:
filecontents = filehandle.readlines()
for line in filecontents:
current_place = line[:-1]
xdot_list.append(current_place)
xdot = np.array(xdot_list, dtype=np.float32)
with open('ydot.txt', 'r') as filehandle:
filecontents = filehandle.readlines()
for line in filecontents:
current_place = line[:-1]
ydot_list.append(current_place)
ydot = np.array(ydot_list, dtype=np.float32)
with open('xdotdot.txt', 'r') as filehandle:
filecontents = filehandle.readlines()
for line in filecontents:
current_place = line[:-1]
xdotdot_list.append(current_place)
xdotdot = np.array(xdotdot_list, dtype=np.float32)
with open('ydotdot.txt', 'r') as filehandle:
filecontents = filehandle.readlines()
for line in filecontents:
current_place = line[:-1]
ydotdot_list.append(current_place)
ydotdot = np.array(ydotdot_list, dtype=np.float32)
def inverse(k,t,xdot,ydot,xdotdot,ydotdot):
vcx_i = k[0]
vcy_i = k[1]
psi_i = k[2]
wz_i = k[3]
theta_i = k[4]
theta_deg_i = k[5]
# Subsystem 4
vcx_i = xdot*np.cos(psi_i)-ydot*np.sin(psi_i)
vcy_i = ydot*np.cos(psi_i)+xdot*np.sin(psi_i)
psidot_i = wz_i
vcxdot_i = xdotdot*np.cos(psi_i)-xdot*np.sin(psi_i)*psidot_i-ydotdot*np.sin(psi_i)-ydot*np.cos(psi_i)*psidot_i
vcydot_i = ydotdot*np.cos(psi_i)-ydot*np.sin(psi_i)*psidot_i+xdotdot*np.sin(psi_i)+xdot*np.cos(psi_i)*psidot_i
g1 = -(m1/c3)*vcxdot_i+(m2/c3)*vcy_i*wz_i-(c1/c3)*vcx_i*np.sqrt((vcx_i**2)+(vcy_i**2))+(c2/c3)*vcy_i*np.sqrt((vcx_i**2)+(vcy_i**2))*np.arctan2(vcy_i,vcx_i)
g2 = (m2/c3)*vcydot_i+(m1/c3)*vcx_i*wz_i+(c1/c3)*vcy_i*np.sqrt((vcx_i**2)+(vcy_i**2))+(c2/c3)*vcx_i*np.sqrt((vcx_i**2)+(vcy_i**2))*np.arctan2(vcy_i,vcx_i)
A = 12*np.sin(2*np.pi*f*t+np.pi) # eksiswsi tail_frequency apo simulink
if A>=0.1:
wzdot_i = ((m1-m2)/J)*vcx_i*vcy_i-c4*wz_i**2*np.sign(wz_i)-c5*g2-c6*np.sqrt((g1**2)+(g2**2))
elif A<-0.1:
wzdot_i = ((m1-m2)/J)*vcx_i*vcy_i-c4*wz_i**2*np.sign(wz_i)-c5*g2+c6*np.sqrt((g1**2)+(g2**2))
else:
wzdot_i = ((m1-m2)/J)*vcx_i*vcy_i-c4*wz_i**2*np.sign(wz_i)-c5*g2
# Subsystem 5
if g2>0:
theta_i = np.arctan2(g1,g2)
elif g2<0 and g1>=0:
theta_i = np.arctan2(g1,g2)-np.pi
elif g2<0 and g1<0:
theta_i = np.arctan2(g1,g2)+np.pi
elif g2==0 and g1>0:
theta_i = -np.pi/2
elif g2==0 and g1<0:
theta_i = np.pi/2
elif g1==0 and g2==0:
theta_i = 0
theta_deg_i = (theta_i*180)/np.pi
return [vcxdot_i, vcydot_i, psidot_i, wzdot_i, theta_i, theta_deg_i]
# initial conditions
vcx_i_0 = 0.1257
vcy_i_0 = 0
psi_i_0 = 0
wz_i_0 = 0
theta_i_0 = 0
theta_deg_i_0 = 0
#theta_i_0 = 0.1745
#theta_deg_i_0 = 9.866
k0 = [vcx_i_0, vcy_i_0, psi_i_0, wz_i_0, theta_i_0, theta_deg_i_0]
# epilysi systimatos diaforikwn
k = odeint(inverse, k0, t, args=(xdot,ydot,xdotdot,ydotdot), tfirst=False)
# apothikeysi apotelesmatwn
vcx_i = k[:,0]
vcy_i = k[:,1]
psi_i = k[:,2]
wz_i = k[:,3]
theta_i = k[:,4]
theta_deg_i = k[:,5]
# Epanalipsi tu Subsystem 5 gia na mporun na plotaristun ta theta_i, theta_deg_i
theta_i = [inverse(k_i, t_i)[4] for t_i, k_i in zip(t, k)]
theta_deg_i = [inverse(k_i, t_i)[5] for t_i, k_i in zip(t, k)]
# Ypologismos mesis gwnias theta kai platus talantwsis
mesi_gwnia = sum(theta_i)/len(theta_i) # rad
platos = (max(theta_i)-min(theta_i))/2
UPDATE:
The most relevant solution I found so far is this:
Solving a system of odes (with changing constant!) using scipy.integrate.odeint?
But since I have only values of my variables in arrays and not the equation of the variables that depend on time (e.g. xdot=f(t)), I tried to aply an interpolation between the values in my arrays, as shown here: ODEINT with multiple parameters (time-dependent)
I managed to make the code running without errors, but the total time increased dramatically and the results of the system solved are completely wrong. I tried any possible type of interpolation that I found here: https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html but still wring outcome. That means that my interpolation isn't the best possible, or my points in the arrays (8000 values) are too much to interpolate between them and solve the system correctly.
I am setting up an optimization in OpenMDAO v0.13 using several components that are used many times. My assembly seems to be working just fine with the default driver, but when I run with an optimizer it does not solve. The optimizer simply runs with the inputs given and returns the answer using those inputs. I am not sure what the issue is, but I would appreciate any insights. I have included a simple code mimicking my structure that reproduces the error. I think the problem is in the connections, summer.fs does not update after initialization.
from openmdao.main.api import Assembly, Component
from openmdao.lib.datatypes.api import Float, Array, List
from openmdao.lib.drivers.api import DOEdriver, SLSQPdriver, COBYLAdriver, CaseIteratorDriver
from pyopt_driver.pyopt_driver import pyOptDriver
import numpy as np
class component1(Component):
x = Float(iotype='in')
y = Float(iotype='in')
term1 = Float(iotype='out')
a = Float(iotype='in', default_value=1)
def execute(self):
x = self.x
a = self.a
term1 = a*x**2
self.term1 = term1
print "In comp1", self.name, self.a, self.x, self.term1
def list_deriv_vars(self):
return ('x',), ('term1',)
def provideJ(self):
x = self.x
a = self.a
dterm1_dx = 2.*a*x
J = np.array([[dterm1_dx]])
print 'In comp1, J = %s' % J
return J
class component2(Component):
x = Float(iotype='in')
y = Float(iotype='in')
term1 = Float(iotype='in')
f = Float(iotype='out')
def execute(self):
y = self.y
x = self.x
term1 = self.term1
f = term1 + x + y**2
self.f = f
print "In comp2", self.name, self.x, self.y, self.term1, self.f
class summer(Component):
total = Float(iotype='out', desc='sum of all f values')
def __init__(self, size):
super(summer, self).__init__()
self.size = size
self.add('fs', Array(np.ones(size), iotype='in', desc='f values from all cases'))
def execute(self):
self.total = sum(self.fs)
print 'In summer, fs = %s and total = %s' % (self.fs, self.total)
class assembly(Assembly):
x = Float(iotype='in')
y = Float(iotype='in')
total = Float(iotype='out')
def __init__(self, size):
super(assembly, self).__init__()
self.size = size
self.add('a_vals', Array(np.zeros(size), iotype='in', dtype='float'))
self.add('fs', Array(np.zeros(size), iotype='out', dtype='float'))
print 'in init a_vals = %s' % self.a_vals
def configure(self):
# self.add('driver', SLSQPdriver())
self.add('driver', pyOptDriver())
self.driver.optimizer = 'SNOPT'
# self.driver.pyopt_diff = True
#create this first, so we can connect to it
self.add('summer', summer(size=len(self.a_vals)))
self.connect('summer.total', 'total')
print 'in configure a_vals = %s' % self.a_vals
# create instances of components
for i in range(0, self.size):
c1 = self.add('comp1_%d'%i, component1())
c1.missing_deriv_policy = 'assume_zero'
c2 = self.add('comp2_%d'%i, component2())
self.connect('a_vals[%d]' % i, 'comp1_%d.a' % i)
self.connect('x', ['comp1_%d.x'%i, 'comp2_%d.x'%i])
self.connect('y', ['comp1_%d.y'%i, 'comp2_%d.y'%i])
self.connect('comp1_%d.term1'%i, 'comp2_%d.term1'%i)
self.connect('comp2_%d.f'%i, 'summer.fs[%d]'%i)
self.driver.workflow.add(['comp1_%d'%i, 'comp2_%d'%i])
self.connect('summer.fs[:]', 'fs[:]')
self.driver.workflow.add(['summer'])
# set up main driver (optimizer)
self.driver.iprint = 1
self.driver.maxiter = 100
self.driver.accuracy = 1.0e-6
self.driver.add_parameter('x', low=-5., high=5.)
self.driver.add_parameter('y', low=-5., high=5.)
self.driver.add_objective('summer.total')
if __name__ == "__main__":
""" the result should be -1 at (x, y) = (-0.5, 0) """
import time
from openmdao.main.api import set_as_top
a_vals = np.array([1., 1., 1., 1.])
test = set_as_top(assembly(size=len(a_vals)))
test.a_vals = a_vals
print test.a_vals
test.x = 2.
test.y = 2.
tt = time.time()
test.run()
print "Elapsed time: ", time.time()-tt, "seconds"
print 'result = ', test.summer.total
print '(x, y) = (%s, %s)' % (test.x, test.y)
print test.fs
I played around with your model, and found that the following line caused problems:
#self.connect('summer.fs[:]', 'fs[:]')
When I commented it out, I got the optimization to move.
I am not sure what is happening there, but the graph transformations sometimes have some issues with component input nodes that are promoted as outputs on the assembly boundary. If you still want those values to be available on the assembly, you could try promoting the outputs from the comp2_n components instead.