How to create own dataset for FCN with caffe? - dataset

How to convert image to lmdb for fcn with caffe? You know, It's easy create own dataset for image classification with caffe, but how to create own dataset for semantic segment for fcn?

Use this code. Make the necessary path changes. Please read the code carefully before using it.
import caffe
import lmdb
from PIL import Image
import numpy as np
import glob
from random import shuffle
# Initialize the Image set:
NumberTrain = 1111 # Number of Training Images
NumberTest = 1112 # Number of Testing Images
Rheight = 380 # Required Height
Rwidth = 500 # Required Width
LabelHeight = 380 # Downscaled height of the label
LabelWidth = 500 # Downscaled width of the label
# Read the files in the Data Folder
inputs_data_train = sorted(glob.glob("/home/<user>/caffe-with_crop/examples/fcn-32s/TrainData/*.jpg"))
inputs_data_valid = sorted(glob.glob("/home/<user>/caffe-with_crop/examples/fcn-32s/ValData/*.jpg"))
inputs_label = sorted(glob.glob("/home/<user>/caffe-with_crop/examples/fcn-32s/VOC2011/SegmentationClass/*.png"))
shuffle(inputs_data_train) # Shuffle the DataSet
shuffle(inputs_data_valid) # Shuffle the DataSet
inputs_Train = inputs_data_train[:NumberTrain] # Extract the training data from the complete set
inputs_Test = inputs_data_valid[:NumberTest] # Extract the testing data from the complete set
# Creating LMDB for Training Data
print("Creating Training Data LMDB File ..... ")
in_db = lmdb.open('/home/<user>/caffe-with_crop/examples/fcn-32s/TrainVOC_Data_lmdb',map_size=int(1e14))
with in_db.begin(write=True) as in_txn:
for in_idx, in_ in enumerate(inputs_Train):
print in_idx
im = np.array(Image.open(in_)) # or load whatever ndarray you need
Dtype = im.dtype
im = im[:,:,::-1]
im = Image.fromarray(im)
im = im.resize([Rheight, Rwidth], Image.ANTIALIAS)
im = np.array(im,Dtype)
im = im.transpose((2,0,1))
im_dat = caffe.io.array_to_datum(im)
in_txn.put('{:0>10d}'.format(in_idx),im_dat.SerializeToString())
in_db.close()
# Creating LMDB for Training Labels
print("Creating Training Label LMDB File ..... ")
in_db = lmdb.open('/home/<user>/caffe-with_crop/examples/fcn-32s/TrainVOC_Label_lmdb',map_size=int(1e14))
with in_db.begin(write=True) as in_txn:
for in_idx, in_ in enumerate(inputs_Train):
print in_idx
in_label = in_[:-25]+'VOC2011/SegmentationClass/'+in_[-15:-3]+'png' # Change the numbers as per requirement
L = np.array(Image.open(in_)) # or load whatever ndarray you need
Dtype = L.dtype
L = L[:,:,::-1]
Limg = Image.fromarray(L)
Limg = Limg.resize([LabelHeight, LabelWidth],Image.NEAREST) # To resize the Label file to the required size
L = np.array(Limg,Dtype)
L = L.reshape(L.shape[0],L.shape[1],1)
L = L.transpose((2,0,1))
L_dat = caffe.io.array_to_datum(L)
in_txn.put('{:0>10d}'.format(in_idx),L_dat.SerializeToString())
in_db.close()
# Creating LMDB for Testing Data
print("Creating Testing Data LMDB File ..... ")
in_db = lmdb.open('/home/<user>/caffe-with_crop/examples/fcn-32s/TestVOC_Data_lmdb',map_size=int(1e14))
with in_db.begin(write=True) as in_txn:
for in_idx, in_ in enumerate(inputs_Test):
print in_idx
im = np.array(Image.open(in_)) # or load whatever ndarray you need
Dtype = im.dtype
im = im[:,:,::-1]
im = Image.fromarray(im)
im = im.resize([Rheight, Rwidth], Image.ANTIALIAS)
im = np.array(im,Dtype)
im = im.transpose((2,0,1))
im_dat = caffe.io.array_to_datum(im)
in_txn.put('{:0>10d}'.format(in_idx),im_dat.SerializeToString())
in_db.close()
# Creating LMDB for Testing Labels
print("Creating Testing Label LMDB File ..... ")
in_db = lmdb.open('/home/<user>/caffe-with_crop/examples/fcn-32s/TestVOC_Label_lmdb',map_size=int(1e14))
with in_db.begin(write=True) as in_txn:
for in_idx, in_ in enumerate(inputs_Test):
print in_idx
in_label = in_[:-25]+'VOC2011/SegmentationClass/'+in_[-15:-3]+'png' # Change the numbers as per requirement
L = np.array(Image.open(in_)) # or load whatever ndarray you need
Dtype = L.dtype
L = L[:,:,::-1]
Limg = Image.fromarray(L)
Limg = Limg.resize([LabelHeight, LabelWidth],Image.NEAREST) # To resize the Label file to the required size
L = np.array(Limg,Dtype)
L = L.reshape(L.shape[0],L.shape[1],1)
L = L.transpose((2,0,1))
L_dat = caffe.io.array_to_datum(L)
in_txn.put('{:0>10d}'.format(in_idx),L_dat.SerializeToString())
in_db.close()

Related

Problem with saving pickle object into arrays from images in python

I have the following class for loading and converting my images into train and test arrays for a deep learning model in Tensorflow 2.
The images are in three folders, named 'Car', 'Cat' and 'Man', which are within the Train and Test main folders. Each image is of 300 x 400 pixels.
import os
import pickle
import cv2
import numpy as np
os.getcwd()
out: 'C:\\Users\\me\\Jupiter_Notebooks\\Dataset_Thermal\\SeekThermal'
path_train = "../SeekThermal/Train"
path_test = "../SeekThermal/Test"
class MasterImage(object):
def __init__(self,PATH='', IMAGE_SIZE = 50):
self.PATH = PATH
self.IMAGE_SIZE = IMAGE_SIZE
self.image_data = []
self.x_data = []
self.y_data = []
self.CATEGORIES = []
# This will get List of categories
self.list_categories = []
def get_categories(self):
for path in os.listdir(self.PATH):
if '.DS_Store' in path:
pass
else:
self.list_categories.append(path)
print("Found Categories ",self.list_categories,'\n')
return self.list_categories
def process_image(self):
try:
"""
Return Numpy array of image
:return: X_Data, Y_Data
"""
self.CATEGORIES = self.get_categories()
for categories in self.CATEGORIES: # Iterate over categories
train_folder_path = os.path.join(self.PATH, categories) # Folder Path
class_index = self.CATEGORIES.index(categories) # this will get index for classification
for img in os.listdir(train_folder_path): # This will iterate in the Folder
new_path = os.path.join(train_folder_path, img) # image Path
try: # if any image is corrupted
image_data_temp = cv2.imread(new_path) # Read Image as numbers
image_temp_resize = cv2.resize(image_data_temp,(self.IMAGE_SIZE,self.IMAGE_SIZE))
self.image_data.append([image_temp_resize,class_index])
random.shuffle(self.image_data)
except:
pass
data = np.asanyarray(self.image_data) # or, data = np.asanyarray(self.image_data,dtype=object)
# Iterate over the Data
for x in data:
self.x_data.append(x[0]) # Get the X_Data
self.y_data.append(x[1]) # get the label
X_Data = np.asarray(self.x_data) / (255.0) # Normalize Data
Y_Data = np.asarray(self.y_data)
# reshape x_Data
X_Data = X_Data.reshape(-1, self.IMAGE_SIZE, self.IMAGE_SIZE, 3)
return X_Data, Y_Data
except:
print("Failed to run Function Process Image ")
def pickle_image(self):
"""
:return: None Creates a Pickle Object of DataSet
"""
# Call the Function and Get the Data
X_Data,Y_Data = self.process_image()
# Write the Entire Data into a Pickle File
pickle_out = open('X_Data','wb')
pickle.dump(X_Data, pickle_out)
pickle_out.close()
# Write the Y Label Data
pickle_out = open('Y_Data', 'wb')
pickle.dump(Y_Data, pickle_out)
pickle_out.close()
print("Pickled Image Successfully ")
return X_Data,Y_Data
def load_dataset(self):
try:
# Read the Data from Pickle Object
X_Temp = open('..\SeekThermal\X_Data','rb')
X_Data = pickle.load(X_Temp)
Y_Temp = open('..\SeekThermal\Y_Data','rb')
Y_Data = pickle.load(Y_Temp)
print('Reading Dataset from Pickle Object')
return X_Data,Y_Data
except:
print('Could not Found Pickle File ')
print('Loading File and Dataset ..........')
X_Data,Y_Data = self.pickle_image()
return X_Data,Y_Data
I dont understand what the problem is with the pickle file, because just last week I able to create these arrays successfully with the same code??
Is there an easier way to load images in Tensorflow rather than through the custom defined class?
a = MasterImage(PATH = path_train,IMAGE_SIZE = 224)
a.process_image()
out:
it produces an array with a warning.
VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
data = np.asanyarray(self.image_data)
a.pickle_image()
out:
TypeError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_1692\507657192.py in <cell line: 1>()
----> 1 a.pickle_image()
~\AppData\Local\Temp\ipykernel_1692\1410849712.py in pickle_image(self)
71 """
72 # Call the Function and Get the Data
---> 73 X_Data,Y_Data = self.process_image()
74
75 # Write the Entire Data into a Pickle File
TypeError: cannot unpack non-iterable NoneType object
a.load_dataset()
out:
Could not Found Pickle File
Loading File and Dataset ..........
Found Categories ['Car', 'Cat', 'Man', 'Car', 'Cat', 'Man']
Pickled Image Successfully
I'm running Python 3.8.8 via anaconda on Windows 10. Thank you for any advice.

Fusion of vgg19 model and dense layer model

AIM: To perform multiclassification of materials using images and roughness values.
DATASET - I have a dataset which has 11 material classes, each class has 25 images. So the total number of train images are 2011=220, and 511=55 belong to validation set. Each image has a corresponding 6 roughness parameters. These parameters are stored in csv file as input = RaX, RaY, RqX, RqY, RzX, RzY and output = 11 material classes. So for every class, 25 values of RaX, RaY, RqX, RqY, RzX, RzY are recorded. I have trained the images on vgg19 and roughness values as dense layers.
PROBLEM STATEMENT - The training accuracy is 7% and loss is 0.7. The vgg model and dense model when trained separately gave good scores. But fusion or concatenation of both models reduced the scores significantly.
Could you please help me out in this regard. Any suggestions and information are welcomed. Thanks in advance.
Below is the description of the code:
data_path = "Excel_data/Roughness_11_classes.csv"
image_path = "WOOD_PLASTIC/IMAGES_11_classes"
classes=11
rv=[]#### return values object list from rough_data values
label=[]
image_list = []
a=[]
batch_size = 16
vgg19 = applications.VGG19(include_top=False, weights='imagenet')
datagen = ImageDataGenerator(rescale=1. / 255,validation_split=0.2)
img_width, img_height = 128, 512
def rough_values(path): ## process rougness values
global trainRX
data = pd.read_csv(data_path)
data_array=data.to_numpy()
x=data_array[:,0:6].astype(np.float32)
y1=data_array[:,6].reshape(-1,1)
encoder = OneHotEncoder(sparse=False)
y = encoder.fit_transform(y1)
trainRX,testRX,trainRY,testRY=train_test_split(x,y,test_size=0.2,shuffle=False,random_state=0)
rv=[data_array,trainRX,testRX,trainRY,testRY]
return rv
def images(path): ### processes images
##### TRAIN #########################################################
generator_train = datagen.flow_from_directory(path, target_size=(img_width,
img_height),batch_size=batch_size, class_mode='categorical', shuffle=False,
subset='training')
nb_train_samples = len(generator_train.filenames)
num_classes = len(generator_train.class_indices)
predict_size_train = int(math.ceil(nb_train_samples / batch_size))
bottleneck_features_train = vgg19.predict_generator(generator_train,
predict_size_train)
print (bottleneck_features_train.shape)
np.save("bottleneck_features_vgg19_multi_input.npy", bottleneck_features_train)
#####saves train data as .npy file
#VALIDATION##################################################################################
generator_val = datagen.flow_from_directory(path,target_size=(img_width, img_height),batch_size=batch_size, class_mode='categorical', shuffle=False, subset='validation')
print('generator_val is',generator_val)
nb_validation_samples = len(generator_val.filenames)
num_classes = len(generator_val.class_indices)
predict_size_validation = int(math.ceil(nb_validation_samples / batch_size))
bottleneck_features_validation = vgg19.predict_generator(generator_val, predict_size_validation)
np.save("bottleneck_features_validation_vgg19_multi_input.npy", bottleneck_features_validation)
# training data load #############################################################
generator_top_train = datagen.flow_from_directory(path,target_size=(img_width, img_height),batch_size=batch_size, class_mode='categorical',shuffle=False,subset='training')
nb_train_samples = len(generator_top_train.filenames)
print('nb_train_samples are',nb_train_samples)
num_classes = len(generator_top_train.class_indices)
train_data = np.load("bottleneck_features_vgg19_multi_input.npy")
print (train_data.shape)
train_labels = generator_top_train.classes
train_labels = to_categorical(train_labels, num_classes=num_classes)
# validation data load #############################################################
generator_top_val = datagen.flow_from_directory(path,target_size=(img_width, img_height),batch_size=batch_size, class_mode='categorical',shuffle=False,subset='validation')
nb_validation_samples = len(generator_top_val.filenames)
print('nb_validation_samples are',nb_validation_samples)
num_classes = len(generator_top_val.class_indices)
validation_data = np.load("bottleneck_features_validation_vgg19_multi_input.npy")
validation_labels = generator_top_val.classes
validation_labels = to_categorical(validation_labels, num_classes=num_classes)
z=[train_data, validation_data, validation_labels, train_labels]
return z
def create_dense(feed): ## cnn model for roughness parameters, define MLP network
model = Sequential() ### input shape to dense is 1D
model.add(Dense(64, input_shape=feed, activation='relu', name='fc1'))
model.add(BatchNormalization())
model.add(Dense(32, activation='relu', name='fc2'))
model.add(BatchNormalization())
return model
def create_vgg(input_shape, n_classes, optimizer='rmsprop', fine_tune=0):
### cnn model for images
######### Adding own model on top of vgg ##################
model = Sequential()
model.add(Flatten(input_shape=input_shape))
model.add(Dropout(0.5))
model.add(Dense(100))
model.add(layers.LeakyReLU(alpha=0.3))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(50))
model.add(layers.LeakyReLU(alpha=0.3))
model.add(BatchNormalization())
model.add(Dropout(0.5))
### Group the convolutional base and new fully-connected layers into a Model object#
return model
rv=rough_values(data_path)
input_dense=[]
DA=rv[0]
TrRX=rv[1]
TeRX=rv[2]
TrRY=rv[3]
TeRY=rv[4]
z=images(image_path) ## stores train_data, validation_data, validation_labels,
train_labels
train_data=z[0]
vali=z[1]
val_label=z[2]
tr_label=z[3]
input_dense=TrRX[0].shape
class_names=
['ABS','PA','PC','PP','WOOD1','WOOD2','WOOD3','WOOD4','WOOD5','WOOD6','WOOD7']
### 11 material classes
mlp = create_dense(input_dense)
cnn = create_vgg(train_data.shape[1:],classes)
combinedInput = concatenate([mlp.output, cnn.output]) ## concatenated models
top= Dense(64, activation="relu")(combinedInput)
top_model= Dense(11, activation="softmax")(top) ### output as 11 classes
model = Model(inputs=[mlp.input, cnn.input], outputs=top_model)
## training #################################################################
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=optimizers.Adam(lr=1e-4),metrics=['acc'])
history= model.fit(x=[TrRX, train_data], y=TrRY, validation_data=([TeRX, vali], TeRY),
epochs=100, batch_size=32,validation_steps=(55//batch_size))
(eval_loss, eval_accuracy) = model.evaluate([TeRX, vali], TeRY, batch_size=batch_size,
verbose=1)
print("test accuracy: {:.2f}%".format(eval_accuracy * 100))
print("test Loss: {}".format(eval_loss))
model.save('fusion_model.h5')
#result=model.predict([TeRX,vali])

To convert Tif files into RGB(png/jpg) using python

I am using the code snap given below and its working without error but the converted file is not having .png extension as I am giving png in "OutputFormat".
I am running it in Colab and I am attaching the output also.
from osgeo import gdal
import numpy as np
import os
import subprocess
def _16bit_to_8Bit(inputRaster, outputRaster, outputPixType='Byte', outputFormat='png',
percentiles=[2, 98]):
#Convert 16bit image to 8bit
#Source: Medium.com, 'Creating Training Datasets for the SpaceNet Road Detection and Routing
#Challenge' by Adam Van Etten and Jake Shermeyer
srcRaster = gdal.Open(inputRaster)
cmd = ['gdal_translate', '-ot', outputPixType, '-of',
outputFormat]
# iterate through bands
for bandId in range(srcRaster.RasterCount):
bandId = bandId+1
band = srcRaster.GetRasterBand(bandId)
bmin = band.GetMinimum()
bmax = band.GetMaximum()
# if not exist minimum and maximum values
if bmin is None or bmax is None:
[enter image description here][1](bmin, bmax) = band.ComputeRasterMinMax(1)
# else, rescale
band_arr_tmp = band.ReadAsArray()
bmin = np.percentile(band_arr_tmp.flatten(),
percentiles[0])
bmax= np.percentile(band_arr_tmp.flatten(),
percentiles[1])
cmd.append('-scale_{}'.format(bandId))
cmd.append('{}'.format(bmin))
cmd.append('{}'.format(bmax))
cmd.append('{}'.format(0))
cmd.append('{}'.format(255))
cmd.append(inputRaster)
cmd.append(outputRaster)
print("Conversin command:", cmd)
subprocess.call(cmd)
path = "/content/drive/MyDrive/Spacenet_data/RGB_Pan/"
files = os.listdir(path)
for file in files:
resimPath = path+file
dstPath = "/content/drive/MyDrive/Spacenet_data/"
dstPath = dstPath+file
_16bit_to_8Bit(resimPath,dstPath)
My output is showing like this:
Conversin command: ['gdal_translate', '-ot', 'Byte', '-of', 'png', '-scale_1', '149.0', '863.0', '0', '255', '-scale_2', '244.0', '823.0200000000186', '0', '255', '-scale_3', '243.0', '568.0', '0', '255', '/content/drive/MyDrive/Spacenet_data/RGB_Pan/img0.tif', '/content/drive/MyDrive/Spacenet_data/img0.tif']
Make the below changes and you are done.
from osgeo import gdal
import numpy as np
import os
import subprocess
def _16bit_to_8Bit(inputRaster, outputRaster, outputPixType='Byte',
outputFormat='png', percentiles=[2, 98]):
srcRaster = gdal.Open(inputRaster)
cmd = ['gdal_translate', '-ot', outputPixType, '-of',
outputFormat]
for bandId in range(srcRaster.RasterCount):
bandId = bandId+1
band = srcRaster.GetRasterBand(bandId)
bmin = band.GetMinimum()
bmax = band.GetMaximum()
# if not exist minimum and maximum values
if bmin is None or bmax is None:
(bmin, bmax) = band.ComputeRasterMinMax(1)
# else, rescale
band_arr_tmp = band.ReadAsArray()
bmin = np.percentile(band_arr_tmp.flatten(),
percentiles[0])
bmax= np.percentile(band_arr_tmp.flatten(),
percentiles[1])
cmd.append('-scale_{}'.format(bandId))
cmd.append('{}'.format(bmin))
cmd.append('{}'.format(bmax))
cmd.append('{}'.format(0))
cmd.append('{}'.format(255))
cmd.append(inputRaster)
cmd.append(outputRaster)
print("Conversin command:", cmd)
subprocess.call(cmd)
path = "/content/drive/MyDrive/Spacenet_data/RGB_Pan/"
files = os.listdir(path)
for file in files:
resimPath = path+file
dstPath = "/content/drive/MyDrive/Spacenet_data/"
dstPath = dstPath+file[:-3]+"png"
_16bit_to_8Bit(resimPath,dstPath)
import os
import cv2
directory = os.fsencode(r"path")
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".tif"):
print(filename)
print(type(filename))
print("\n")
image = cv2.imread(filename)
cv2.imwrite("{}.jpg".format(filename), image)
continue
else:
continue

Interpolating GFS winds from isobaric to height coordinates using Metpy

I have been tasked with making plots of winds at various levels of the atmosphere to support aviation. While I have been able to make some nice plots using GFS model data (see code below), I'm really having to make a rough approximation of height using pressure coordinates available from the GFS. I'm using winds at 300 hPA, 700 hPA, and 925 hPA to make an approximation of the winds at 30,000 ft, 9000 ft, and 3000 ft. My question is really for those out there who are metpy gurus...is there a way that I can interpolate these winds to a height surface? It sure would be nice to get the actual winds at these height levels! Thanks for any light anyone can share on this subject!
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import numpy as np
from netCDF4 import num2date
from datetime import datetime, timedelta
from siphon.catalog import TDSCatalog
from siphon.ncss import NCSS
from PIL import Image
from matplotlib import cm
# For the vertical levels we want to grab with our queries
# Levels need to be in Pa not hPa
Levels = [30000,70000,92500]
# Time deltas for days
Deltas = [1,2,3]
#Deltas = [1]
# Levels in hPa for the file names
LevelDict = {30000:'300', 70000:'700', 92500:'925'}
# The path to where our banners are stored
impath = 'C:\\Users\\shell\\Documents\\Python Scripts\\Banners\\'
# Final images saved here
imoutpath = 'C:\\Users\\shell\\Documents\\Python Scripts\\TVImages\\'
# Quick function for finding out which variable is the time variable in the
# netCDF files
def find_time_var(var, time_basename='time'):
for coord_name in var.coordinates.split():
if coord_name.startswith(time_basename):
return coord_name
raise ValueError('No time variable found for ' + var.name)
# Function to grab data at different levels from Siphon
def grabData(level):
query.var = set()
query.variables('u-component_of_wind_isobaric', 'v-component_of_wind_isobaric')
query.vertical_level(level)
data = ncss.get_data(query)
u_wind_var = data.variables['u-component_of_wind_isobaric']
v_wind_var = data.variables['v-component_of_wind_isobaric']
time_var = data.variables[find_time_var(u_wind_var)]
lat_var = data.variables['lat']
lon_var = data.variables['lon']
return u_wind_var, v_wind_var, time_var, lat_var, lon_var
# Construct a TDSCatalog instance pointing to the gfs dataset
best_gfs = TDSCatalog('http://thredds-jetstream.unidata.ucar.edu/thredds/catalog/grib/'
'NCEP/GFS/Global_0p5deg/catalog.xml')
# Pull out the dataset you want to use and look at the access URLs
best_ds = list(best_gfs.datasets.values())[1]
#print(best_ds.access_urls)
# Create NCSS object to access the NetcdfSubset
ncss = NCSS(best_ds.access_urls['NetcdfSubset'])
print(best_ds.access_urls['NetcdfSubset'])
# Looping through the forecast times
for delta in Deltas:
# Create lat/lon box and the time(s) for location you want to get data for
now = datetime.utcnow()
fcst = now + timedelta(days = delta)
timestamp = datetime.strftime(fcst, '%A')
query = ncss.query()
query.lonlat_box(north=78, south=45, east=-90, west=-220).time(fcst)
query.accept('netcdf4')
# Now looping through the levels to create our plots
for level in Levels:
u_wind_var, v_wind_var, time_var, lat_var, lon_var = grabData(level)
# Get actual data values and remove any size 1 dimensions
lat = lat_var[:].squeeze()
lon = lon_var[:].squeeze()
u_wind = u_wind_var[:].squeeze()
v_wind = v_wind_var[:].squeeze()
#converting to knots
u_windkt= u_wind*1.94384
v_windkt= v_wind*1.94384
wspd = np.sqrt(np.power(u_windkt,2)+np.power(v_windkt,2))
# Convert number of hours since the reference time into an actual date
time = num2date(time_var[:].squeeze(), time_var.units)
print (time)
# Combine 1D latitude and longitudes into a 2D grid of locations
lon_2d, lat_2d = np.meshgrid(lon, lat)
# Create new figure
#fig = plt.figure(figsize = (18,12))
fig = plt.figure()
fig.set_size_inches(26.67,15)
datacrs = ccrs.PlateCarree()
plotcrs = ccrs.LambertConformal(central_longitude=-150,
central_latitude=55,
standard_parallels=(30, 60))
# Add the map and set the extent
ax = plt.axes(projection=plotcrs)
ext = ax.set_extent([-195., -115., 50., 72.],datacrs)
ext2 = ax.set_aspect('auto')
ax.background_patch.set_fill(False)
# Add state boundaries to plot
ax.add_feature(cfeature.STATES, edgecolor='black', linewidth=2)
# Add geopolitical boundaries for map reference
ax.add_feature(cfeature.COASTLINE.with_scale('50m'))
ax.add_feature(cfeature.OCEAN.with_scale('50m'))
ax.add_feature(cfeature.LAND.with_scale('50m'),facecolor = '#cc9666', linewidth = 4)
if level == 30000:
spdrng_sped = np.arange(30, 190, 2)
windlvl = 'Jet_Stream'
elif level == 70000:
spdrng_sped = np.arange(20, 100, 1)
windlvl = '9000_Winds_Aloft'
elif level == 92500:
spdrng_sped = np.arange(20, 80, 1)
windlvl = '3000_Winds_Aloft'
else:
pass
top = cm.get_cmap('Greens')
middle = cm.get_cmap('YlOrRd')
bottom = cm.get_cmap('BuPu_r')
newcolors = np.vstack((top(np.linspace(0, 1, 128)),
middle(np.linspace(0, 1, 128))))
newcolors2 = np.vstack((newcolors,bottom(np.linspace(0,1,128))))
cmap = ListedColormap(newcolors2)
cf = ax.contourf(lon_2d, lat_2d, wspd, spdrng_sped, cmap=cmap,
transform=datacrs, extend = 'max', alpha=0.75)
cbar = plt.colorbar(cf, orientation='horizontal', pad=0, aspect=50,
drawedges = 'true')
cbar.ax.tick_params(labelsize=16)
wslice = slice(1, None, 4)
ax.quiver(lon_2d[wslice, wslice], lat_2d[wslice, wslice],
u_windkt[wslice, wslice], v_windkt[wslice, wslice], width=0.0015,
headlength=4, headwidth=3, angles='xy', color='black', transform = datacrs)
plt.savefig(imoutpath+'TV_UpperAir'+LevelDict[level]+'_'+timestamp+'.png',bbox_inches= 'tight')
# Now we use Pillow to overlay the banner with the appropriate day
background = Image.open(imoutpath+'TV_UpperAir'+LevelDict[level]+'_'+timestamp+'.png')
im = Image.open(impath+'Banner_'+windlvl+'_'+timestamp+'.png')
# resize the image
size = background.size
im = im.resize(size,Image.ANTIALIAS)
background.paste(im, (17, 8), im)
background.save(imoutpath+'TV_UpperAir'+LevelDict[level]+'_'+timestamp+'.png','PNG')
Thanks for the question! My approach here is for each separate column to interpolate the pressure coordinate of GFS-output Geopotential Height onto your provided altitudes to estimate the pressure of each height level for each column. Then I can use that pressure to interpolate the GFS-output u, v onto. The GFS-output GPH and winds have very slightly different pressure coordinates, which is why I interpolated twice. I performed the interpolation using MetPy's interpolate.log_interpolate_1d which performs a linear interpolation on the log of the inputs. Here is the code I used!
from datetime import datetime
import numpy as np
import metpy.calc as mpcalc
from metpy.units import units
from metpy.interpolate import log_interpolate_1d
from siphon.catalog import TDSCatalog
gfs_url = 'https://tds.scigw.unidata.ucar.edu/thredds/catalog/grib/NCEP/GFS/Global_0p5deg/catalog.xml'
cat = TDSCatalog(gfs_url)
now = datetime.utcnow()
# A shortcut to NCSS
ncss = cat.datasets['Best GFS Half Degree Forecast Time Series'].subset()
query = ncss.query()
query.var = set()
query.variables('u-component_of_wind_isobaric', 'v-component_of_wind_isobaric', 'Geopotential_height_isobaric')
query.lonlat_box(north=78, south=45, east=-90, west=-220)
query.time(now)
query.accept('netcdf4')
data = ncss.get_data(query)
# Reading in the u(isobaric), v(isobaric), isobaric vars and the GPH(isobaric6) and isobaric6 vars
# These are two slightly different vertical pressure coordinates.
# We will also assign units here, and this can allow us to go ahead and convert to knots
lat = units.Quantity(data.variables['lat'][:].squeeze(), units('degrees'))
lon = units.Quantity(data.variables['lon'][:].squeeze(), units('degrees'))
iso_wind = units.Quantity(data.variables['isobaric'][:].squeeze(), units('Pa'))
iso_gph = units.Quantity(data.variables['isobaric6'][:].squeeze(), units('Pa'))
u = units.Quantity(data.variables['u-component_of_wind_isobaric'][:].squeeze(), units('m/s')).to(units('knots'))
v = units.Quantity(data.variables['v-component_of_wind_isobaric'][:].squeeze(), units('m/s')).to(units('knots'))
gph = units.Quantity(data.variables['Geopotential_height_isobaric'][:].squeeze(), units('gpm'))
# Here we will select our altitudes to interpolate onto and convert them to geopotential meters
altitudes = ([30000., 9000., 3000.] * units('ft')).to(units('gpm'))
# Now we will interpolate the pressure coordinate for model output geopotential height to
# estimate the pressure level for our given altitudes at each grid point
pressures_of_alts = np.zeros((len(altitudes), len(lat), len(lon)))
for ilat in range(len(lat)):
for ilon in range(len(lon)):
pressures_of_alts[:, ilat, ilon] = log_interpolate_1d(altitudes,
gph[:, ilat, ilon],
iso_gph)
pressures_of_alts = pressures_of_alts * units('Pa')
# Similarly, we will use our interpolated pressures to interpolate
# our u and v winds across their given pressure coordinates.
# This will provide u, v at each of our interpolated pressure
# levels corresponding to our provided initial altitudes
u_at_levs = np.zeros((len(altitudes), len(lat), len(lon)))
v_at_levs = np.zeros((len(altitudes), len(lat), len(lon)))
for ilat in range(len(lat)):
for ilon in range(len(lon)):
u_at_levs[:, ilat, ilon], v_at_levs[:, ilat, ilon] = log_interpolate_1d(pressures_of_alts[:, ilat, ilon],
iso_wind,
u[:, ilat, ilon],
v[:, ilat, ilon])
u_at_levs = u_at_levs * units('knots')
v_at_levs = v_at_levs * units('knots')
# We can use mpcalc to calculate a wind speed array from these
wspd = mpcalc.wind_speed(u_at_levs, v_at_levs)
I was able to take my output from this and coerce it into your plotting code (with some unit stripping.)
Your 300-hPa GFS winds
My "30000-ft" GFS winds
Here is what my interpolated pressure fields at each estimated height level look like.
Hope this helps!
I am not sure if this is what you are looking for (I am very new to Metpy), but I have been using the metpy height_to_pressure_std(altitude) function. It puts it in units of hPa which then I convert to Pascals and then a unitless value to use in the Siphon vertical_level(float) function.
I don't think you can use metpy functions to convert height to pressure or vice versus in the upper atmosphere. There errors are too when using the Standard Atmosphere to convert say pressure to feet.

Use numpy arrays as arguments in odeint

I am trying to solve a system with differential equations using odeint. I have 4 txt files (that look like the picture below). I read them and I save them in numpy arrays (length:8000) (maby not with the most effective way, but anyway...). I want to pass these 4 arrays as arguments in my odeint and solve the system. For example, at every time step the odeint takes (one from the 8000) to solve the system, I want it to use a different value from these arrays. Is there any way to do it automatically without getting lost in for loops? I tried to do it like this (see code below) but I get the error:
if g2>0: ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
g2 supposed to be 1x1 size at every loop of odeint. So it has to be something with the way I use the 4 arrays (xdot,ydot,xdotdot,ydotdot).
I am new to python and I use python 2.7.12 on Ubuntu 16.04 LTS.
Thank you in advance.
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
added_mass_x = 0.03 # kg
added_mass_y = 0.04
mb = 0.3 # kg
m1 = mb-added_mass_x
m2 = mb-added_mass_y
l1 = 0.07 # m
l2 = 0.05 # m
J = 0.00050797 # kgm^2
Sa = 0.0110 # m^2
Cd = 2.44
Cl = 3.41
Kd = 0.000655 # kgm^2
r = 1000 # kg/m^3
c1 = 0.5*r*Sa*Cd
c2 = 0.5*r*Sa*Cl
c3 = 0.5*mb*(l1**2)
c4 = Kd/J
c5 = (1/(2*J))*(l1**2)*mb*l2
c6 = (1/(3*J))*(l1**3)*mb
theta_0 = 10*(np.pi/180) # rad
theta_A = 20*(np.pi/180) # rad
f = 2 # Hz
###################################################################
t = np.linspace(0,100,8000) # s
###################################################################
# Save data from txt files into numpy arrays
xdot_list = []
ydot_list = []
xdotdot_list = []
ydotdot_list = []
with open('xdot.txt', 'r') as filehandle:
filecontents = filehandle.readlines()
for line in filecontents:
current_place = line[:-1]
xdot_list.append(current_place)
xdot = np.array(xdot_list, dtype=np.float32)
with open('ydot.txt', 'r') as filehandle:
filecontents = filehandle.readlines()
for line in filecontents:
current_place = line[:-1]
ydot_list.append(current_place)
ydot = np.array(ydot_list, dtype=np.float32)
with open('xdotdot.txt', 'r') as filehandle:
filecontents = filehandle.readlines()
for line in filecontents:
current_place = line[:-1]
xdotdot_list.append(current_place)
xdotdot = np.array(xdotdot_list, dtype=np.float32)
with open('ydotdot.txt', 'r') as filehandle:
filecontents = filehandle.readlines()
for line in filecontents:
current_place = line[:-1]
ydotdot_list.append(current_place)
ydotdot = np.array(ydotdot_list, dtype=np.float32)
def inverse(k,t,xdot,ydot,xdotdot,ydotdot):
vcx_i = k[0]
vcy_i = k[1]
psi_i = k[2]
wz_i = k[3]
theta_i = k[4]
theta_deg_i = k[5]
# Subsystem 4
vcx_i = xdot*np.cos(psi_i)-ydot*np.sin(psi_i)
vcy_i = ydot*np.cos(psi_i)+xdot*np.sin(psi_i)
psidot_i = wz_i
vcxdot_i = xdotdot*np.cos(psi_i)-xdot*np.sin(psi_i)*psidot_i-ydotdot*np.sin(psi_i)-ydot*np.cos(psi_i)*psidot_i
vcydot_i = ydotdot*np.cos(psi_i)-ydot*np.sin(psi_i)*psidot_i+xdotdot*np.sin(psi_i)+xdot*np.cos(psi_i)*psidot_i
g1 = -(m1/c3)*vcxdot_i+(m2/c3)*vcy_i*wz_i-(c1/c3)*vcx_i*np.sqrt((vcx_i**2)+(vcy_i**2))+(c2/c3)*vcy_i*np.sqrt((vcx_i**2)+(vcy_i**2))*np.arctan2(vcy_i,vcx_i)
g2 = (m2/c3)*vcydot_i+(m1/c3)*vcx_i*wz_i+(c1/c3)*vcy_i*np.sqrt((vcx_i**2)+(vcy_i**2))+(c2/c3)*vcx_i*np.sqrt((vcx_i**2)+(vcy_i**2))*np.arctan2(vcy_i,vcx_i)
A = 12*np.sin(2*np.pi*f*t+np.pi) # eksiswsi tail_frequency apo simulink
if A>=0.1:
wzdot_i = ((m1-m2)/J)*vcx_i*vcy_i-c4*wz_i**2*np.sign(wz_i)-c5*g2-c6*np.sqrt((g1**2)+(g2**2))
elif A<-0.1:
wzdot_i = ((m1-m2)/J)*vcx_i*vcy_i-c4*wz_i**2*np.sign(wz_i)-c5*g2+c6*np.sqrt((g1**2)+(g2**2))
else:
wzdot_i = ((m1-m2)/J)*vcx_i*vcy_i-c4*wz_i**2*np.sign(wz_i)-c5*g2
# Subsystem 5
if g2>0:
theta_i = np.arctan2(g1,g2)
elif g2<0 and g1>=0:
theta_i = np.arctan2(g1,g2)-np.pi
elif g2<0 and g1<0:
theta_i = np.arctan2(g1,g2)+np.pi
elif g2==0 and g1>0:
theta_i = -np.pi/2
elif g2==0 and g1<0:
theta_i = np.pi/2
elif g1==0 and g2==0:
theta_i = 0
theta_deg_i = (theta_i*180)/np.pi
return [vcxdot_i, vcydot_i, psidot_i, wzdot_i, theta_i, theta_deg_i]
# initial conditions
vcx_i_0 = 0.1257
vcy_i_0 = 0
psi_i_0 = 0
wz_i_0 = 0
theta_i_0 = 0
theta_deg_i_0 = 0
#theta_i_0 = 0.1745
#theta_deg_i_0 = 9.866
k0 = [vcx_i_0, vcy_i_0, psi_i_0, wz_i_0, theta_i_0, theta_deg_i_0]
# epilysi systimatos diaforikwn
k = odeint(inverse, k0, t, args=(xdot,ydot,xdotdot,ydotdot), tfirst=False)
# apothikeysi apotelesmatwn
vcx_i = k[:,0]
vcy_i = k[:,1]
psi_i = k[:,2]
wz_i = k[:,3]
theta_i = k[:,4]
theta_deg_i = k[:,5]
# Epanalipsi tu Subsystem 5 gia na mporun na plotaristun ta theta_i, theta_deg_i
theta_i = [inverse(k_i, t_i)[4] for t_i, k_i in zip(t, k)]
theta_deg_i = [inverse(k_i, t_i)[5] for t_i, k_i in zip(t, k)]
# Ypologismos mesis gwnias theta kai platus talantwsis
mesi_gwnia = sum(theta_i)/len(theta_i) # rad
platos = (max(theta_i)-min(theta_i))/2
UPDATE:
The most relevant solution I found so far is this:
Solving a system of odes (with changing constant!) using scipy.integrate.odeint?
But since I have only values of my variables in arrays and not the equation of the variables that depend on time (e.g. xdot=f(t)), I tried to aply an interpolation between the values in my arrays, as shown here: ODEINT with multiple parameters (time-dependent)
I managed to make the code running without errors, but the total time increased dramatically and the results of the system solved are completely wrong. I tried any possible type of interpolation that I found here: https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html but still wring outcome. That means that my interpolation isn't the best possible, or my points in the arrays (8000 values) are too much to interpolate between them and solve the system correctly.

Resources