I have a praat script that extracts formant information from a folder of wavefiles:
clearinfo
min_f0 = 75
max_f0 = 350
directory$ = "./soundfiles/"
outputDir$ = "./test/"
strings = Create Strings as file list: "list", directory$ + "/*.WAV"
numberOfFiles = Get number of strings
for ifile to numberOfFiles
select Strings list
filename$ = Get string... ifile
Read from file... 'directory$''filename$'
soundname$ = selected$ ("Sound", 1)
outputFileName$ = outputDir$ + soundname$ + ".f0123"
appendInfoLine: outputFileName$
select Sound 'soundname$'
formant = To Formant (burg): 0, 4, 5000, 0.025, 50
formantStep = Get time step
selectObject: formant
table = Down to Table: "no", "yes", 6, "yes", 3, "yes", 3, "yes"
numberOfRows = Get number of rows
select Sound 'soundname$'
pitch = To Pitch: 0, min_f0, max_f0
selectObject: table
Append column: "Pitch"
for step to numberOfRows
selectObject: table
t = Get value: step, "time(s)"
selectObject: pitch
pitchValue = Get value at time: t, "Hertz", "Nearest"
selectObject: table
Set numeric value: step, "Pitch", pitchValue
endfor
#export to csv
selectObject: table
Save as comma-separated file: outputFileName$
removeObject(table)
select all
minus Strings list
Remove
endfor
select all
Remove
exit
And it generates the following output:
time(s),intensity,nformants,F1(Hz),B1(Hz),F2(Hz),B2(Hz),F3(Hz),B3(Hz),F4(Hz),B4(Hz),Pitch
0.025370,0.000007,3,213.115,14.053,2385.911,791.475,3622.099,677.605,--undefined--,--undefined--,--undefined--
0.031620,0.000007,3,208.843,15.034,2487.710,687.736,3818.027,645.184,--undefined--,--undefined--,197.5315925472943
...
This works great for what I need, but is there a way to get the intensity of each formant as well? Right now I only have the one intensity estimate.
It's an old question, but I'll still respond.
I've ran into this too in 2002, when I was creating an editor for a hardware format synthesizer (FS1R). I used praat to do the wav->format tracks calculation, and the synthesizer expects formant frequencies and intensities as an input.
I've implemented several algorithms for it, but the one that had the most realistic results evaluated the intensity for each formant at each frame in the spectogram.
Here's the code that I've used for that.
Keep in mind that it was my goal to get a list of 512 frames with up to 8 freq/intensity pairs, and a fundamental pitch.
# Add to dynamic menu... Sound 1 "" 0 "" 0 "Sine-wave speech" Resample... 1 yourdisk:Praat:scripts:SWS
form Add Sounds
word wavePath e:\samples\wav\root\
word waveFile DOUG.wav
word OutPath e:\samples\wav\root\
integer minFP 75
integer maxFP 500
integer maxFF 5000
integer Amp_low_pass_freq 50
integer Formant_low_pass_freq 20
endform
echo Wave to FSeq - FORMANT EXTRACTION
echo -------------------------------------------------------
# LOAD WAVEFILE
echo loading 'wavePath$''waveFile$'
Read from file... 'wavePath$''waveFile$'
if numberOfSelected ("Sound") <> 1
pause Select one Sound then Continue
endif
snd$ = selected$("Sound", 1)
snd = selected("Sound", 1)
sampleRate = Get sample rate
numSamples = Get number of samples
dur = Get duration
zzz = 512/509*512
timeStep = dur/zzz
echo samplerate : 'sampleRate' herz
echo number of samples : 'numSamples'
echo duration : 'dur' seconds
echo timestep : 'timeStep' seconds
echo
# GET FUNDAMENTAL PITCH
echo getting fundamental pitch
# this was the old method, used until FSeqEdit 1.21:
# To Pitch... 'timeStep' 'minFP' 'maxFP'
# Interpolate
# this algorithm seems to work better
To Pitch (ac)... 'timeStep' 'minFP' 15 no 1e-06 0.1 0.01 1 1 'maxFP'
Kill octave jumps
Interpolate
select Pitch 'snd$'
Write to short text file... 'outPath$'pitch.txt
select Pitch 'snd$'
Remove
# GET VOICED/UNVOICED INFORMATION
echo getting voiced/unvoiced information
select Pitch 'snd$'
To PointProcess
select PointProcess 'snd$'
To TextGrid (vuv)... 0.02 'timeStep'
select TextGrid 'snd$'
Write to short text file... 'outPath$'vuv.txt
#create wide-band spectrogram for finding formant amplitudes
# to spectorgam analwidth maxfreq timestep freqstep windowshape
echo to spectogram
select 'snd'
To Spectrogram... 0.003 'maxFF' 0.001 40 Gaussian
select 'snd'
echo finding formants
To Formant (burg)... 'timeStep' 8 'maxFF' 0.025 50
Rename... untrack
Track... 6 'maxFP' 'maxFP'*3 'maxFP'*5 'maxFP'*7 'maxFP'*9 1 0.1 1
Rename... 'snd$'
select Formant untrack
Remove
select 'snd'
#start of main formant loop
#===========================
#for each chosen formant turn formant tracks into
#a Matrix then a Sound object for optional low-pass filtering
#NB this Sound object is the formant TRACK
#then back into a Matrix object for sound synthesis
for i from 1 to 6
# make a matrix from Fi
select Formant 'snd$'
echo extracting formant 'i'
To Matrix... 'i'
Rename... f'i'
#low-pass filter the formant track and tidy-up the names
#filtering needs a Sound object, so cast as Sound, filter and then back to Matrix
if Formant_low_pass_freq <> 0
To Sound (slice)... 1
Filter (pass Hann band)... 0 'formant_low_pass_freq' 'formant_low_pass_freq'
Down to Matrix
select Matrix f'i'
Remove
select Matrix f'i'_band
Rename... f'i'
select Sound f'i'
plus Sound f'i'_band
Remove
endif
#set up amplitude contour array (sample only at 1kHz) for i'th formant
#make it a Sound object so that it can be smoothed by filtering
Create Sound... amp'i' 0 'dur' 1000 sqrt(Spectrogram_'snd$'(x,Matrix_f'i'(x)))
#smooth out pitch amplitude modulation by low-pass filtering
if Amp_low_pass_freq <> 0
Filter (pass Hann band)... 0 'amp_low_pass_freq' 'amp_low_pass_freq'
select Sound amp'i'
Remove
select Sound amp'i'_band
Rename... amp'i'
endif
Extract part... 0 'dur' Rectangular 1 yes
To Intensity... 'minFP' 0
Write to short text file... 'outPath$'amp'i'.txt
select Matrix f'i'
Remove
endfor
#===========================
#end of the main formant loop
select Formant 'snd$'
Write to short text file... 'outPath$'formant.txt
#tidy-up
select Spectrogram 'snd$'
plus Formant 'snd$'
plus Pitch 'snd$'
plus PointProcess 'snd$'
plus TextGrid 'snd$'
Remove
echo
echo -------------------------------------------------------
echo done.
I'm not sure if this is what you need, but based on the comment from #nikolay-shmyrev, this is how you'd insert the measurement of formant intensity from Spectrogram objects into your script.
I seem to be inoculated against the pain of scripting using Praat...
I simplified the script below so that it works only on the currently selected Sound object (for testing), and simply kept the generated Table (so you can check it out), but it should point you in the right direction.
form Script...
positive Minimum_F0 75
positive Maximum_F0 350
positive Formants 4
endform
sound = selected("Sound")
pitch = To Pitch: 0, minimum_F0, maximum_F0
# You need this for the intensity
selectObject: sound
spectrogram = To Spectrogram: 0.005, 5000, 0.002, 20, "Gaussian"
selectObject: sound
formant = To Formant (burg): 0, formants, 5000, 0.025, 50
table = Down to Table: "no", "yes", 6, "yes", 3, "yes", 3, "yes"
Append column: "Pitch"
# Insert columns for each formant intensity
# (labeled here as "I#", where # is the formant index)
for f to formants
index = Get column index: "F" + string$(f) + "(Hz)"
Insert column: index + 1, "I" + string$(f)
endfor
for row to Object_'table'.nrow
selectObject: table
time = Object_'table'[row, "time(s)"]
# Get the intensity of each formant
for f to formants
frequency = Object_'table'[row, "F" + string$(f) + "(Hz)"]
selectObject: spectrogram
if frequency != undefined
intensity = Get power at: time, frequency
else
intensity = undefined
endif
selectObject: table
Set string value: row, "I" + string$(f), fixed$(intensity, 3)
endfor
selectObject: pitch
pitchValue = Get value at time: time, "Hertz", "Nearest"
selectObject: table
Set string value: row, "Pitch", fixed$(pitchValue, 3)
endfor
removeObject: spectrogram, formant, pitch
Related
I have 12 files all with the same format:
Statistics Information
Line: 4
Fiducial range: 156 to 364
Number of items: 209
Number of dummies: 0
Minimum value: -0.08983668870989447
Maximum value: 0.059795797205623558
Mean value: -0.00884060126461031
Standard deviation: 0.03707261357656038
Arithmetic sum: -1.8476856643035546
Each file is for a manoeuvre in a specific direction, North (Pitch, roll, Yaw), South (Pitch, roll, Yaw), East (Pitch, roll, Yaw) and West (Pitch, roll, Yaw).
I want to cycle through each of these text files and store the number for each minimum, maximum and mean value for each file. Then export them in a table:
NORTH
Pitch
Roll
Yaw
Min
-0.08983668870989447
Max
0.059795797205623558
Mean
-0.00884060126461031
South
Pitch
Roll
Yaw
Min
Max
Mean
et cetera
So far I have managed to list the different files and then extract the first line:
import glob
txt_files = glob.glob("*.txt")
def read_first_line(txt_files):
with open(txt_files, 'rt') as fd:
first_line = fd.readline()
return first_line
output_strings = map(read_first_line, txt_files) # apply read first line function all text files
print(txt_files)
output_content = "".join(sorted(output_strings))
output_content # as a string
print(output_content) # print as formatted
with open('outfile.txt', 'wt') as fd:
fd.write(output_content)
The goal is to subsample a data frame.
code:
# 1 date is in type datatime
dg.Yr_Mo_Dy = pd.to_datetime(dg.Yr_Mo_Dy, format='%Y%m%d')
# 2 date is in index
dg = dg.set_index(dg.Yr_Mo_Dy, drop = True)
# 3 to group by 10
dg.resample('1AS').mean().mean()
That gives:
RPT 14.847325
VAL 12.914560
ROS 13.299624
KIL 7.199498
SHA 11.667734
BIR 8.054839
DUB 11.819355
CLA 9.512047
MUL 9.543208
CLO 10.053566
BEL 14.550520
MAL 18.028763
dtype: float6
The code takes every 10 values the 10 intermediate values and the average.
Similarly, it is also possible to sum these 10 values by replacing mean() with sum().
However, what I want to do is not an average but a sampling. That is, to take all the values and only one without averaging, without summing the intermediate values.
For example, the data: 1,2,3,4,5,6.. sampled by 0.5 gives 2,4,6... et non 1.5,2.5,3.5,5.5...
Hey so I am trying my hand at image classification/transfer learning using the monkey species dataset and the resnet50 with a modified final fc layer to predict just the 10 classes. Eveything is working until I use model.train() and model.eval() then after the first epoch it starts to return nans and the accuracy drops off as you'll see below. I'm curious why is this only when switching to train/eval....?
First I import the model and attach the classifier and freeze the parameters
%%capture
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.required_grad = False
in_features = resnet.fc.in_features
# Build custom classifier
classifier = nn.Sequential(OrderedDict([('fc1', nn.Linear(in_features, 512)),
('relu', nn.ReLU()),
('drop', nn.Dropout(0.05)),
('fc2', nn.Linear(512, 10)),
]))
# ('output', nn.LogSoftmax(dim=1))
resnet.classifier = classifier
resnet.to(device)
Then setting my loss func, optimizer, and shceduler
# Step : Define criterion and optimizer
criterion = nn.CrossEntropyLoss()
# pass the optimizer to the appended classifier layer
optimizer = torch.optim.SGD(resnet.parameters(), lr=0.01)
# Scheduler
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10], gamma=0.05)
Then setting the training and validation loops
epochs = 20
tr_losses = []
avg_epoch_tr_loss = []
tr_accuracy = []
val_losses = []
avg_epoch_val_loss = []
val_accuracy = []
val_loss_min = np.Inf
resnet.train()
for epoch in range(epochs):
for i, batch in enumerate(train_loader):
# Pull the data and labels from the batch
data, label = batch
# If available push data and label to GPU
if train_on_gpu:
data, label = data.to(device), label.to(device)
# Compute the logit
logit = resnet(data)
# Compte loss
loss = criterion(logit, label)
# Clearing the gradient
resnet.zero_grad()
# Backpropagate the gradients (accumulte the partial derivatives of loss)
loss.backward()
# Apply the updates to the optimizer step in the opposite direction to the gradient
optimizer.step()
# Store the losses of each batch
# loss.item() seperates the loss from comp graph
tr_losses.append(loss.item())
# Detach and store the average accuracy of each batch
tr_accuracy.append(label.eq(logit.argmax(dim=1)).float().mean())
# Print the rolling batch training loss every 20 batches
if i % 40 == 0 and not i == 1:
print(f'Batch No: {i} \tAverage Training Batch Loss: {torch.tensor(tr_losses).mean():.2f}')
# Print the average loss for each epoch
print(f'\nEpoch No: {epoch + 1},Training Loss: {torch.tensor(tr_losses).mean():.2f}')
# Print the average accuracy for each epoch
print(f'Epoch No: {epoch + 1}, Training Accuracy: {torch.tensor(tr_accuracy).mean():.2f}\n')
# Store the avg epoch loss for plotting
avg_epoch_tr_loss.append(torch.tensor(tr_losses).mean())
resnet.eval()
for i, batch in enumerate(val_loader):
# Pull the data and labels from the batch
data, label = batch
# If available push data and label to GPU
if train_on_gpu:
data, label = data.to(device), label.to(device)
# Compute the logits without computing the gradients
with torch.no_grad():
logit = resnet(data)
# Compte loss
loss = criterion(logit, label)
# Store test loss
val_losses.append(loss.item())
# Store the accuracy for each batch
val_accuracy.append(label.eq(logit.argmax(dim=1)).float().mean())
if i % 20 == 0 and not i == 1:
print(f'Batch No: {i+1} \tAverage Val Batch Loss: {torch.tensor(val_losses).mean():.2f}')
# Print the average loss for each epoch
print(f'\nEpoch No: {epoch + 1}, Epoch Val Loss: {torch.tensor(val_losses).mean():.2f}')
# Print the average accuracy for each epoch
print(f'Epoch No: {epoch + 1}, Epoch Val Accuracy: {torch.tensor(val_accuracy).mean():.2f}\n')
# Store the avg epoch loss for plotting
avg_epoch_val_loss.append(torch.tensor(val_losses).mean())
# Checpoininting the model using val loss threshold
if torch.tensor(val_losses).float().mean() <= val_loss_min:
print("Epoch Val Loss Decreased... Saving model")
# save current model
torch.save(resnet.state_dict(), '/content/drive/MyDrive/1. Full Projects/Intel Image Classification/model_state.pt')
val_loss_min = torch.tensor(val_losses).mean()
# Step the scheduler for the next epoch
scheduler.step()
# Print the updated learning rate
print('Learning Rate Set To: {:.5f}'.format(optimizer.state_dict()['param_groups'][0]['lr']),'\n')
The model starts to train but then slowly becomes nan values
Batch No: 0 Average Training Batch Loss: 9.51
Batch No: 40 Average Training Batch Loss: 1.71
Batch No: 80 Average Training Batch Loss: 1.15
Batch No: 120 Average Training Batch Loss: 0.94
Epoch No: 1,Training Loss: 0.83
Epoch No: 1, Training Accuracy: 0.78
Batch No: 1 Average Val Batch Loss: 0.39
Batch No: 21 Average Val Batch Loss: 0.56
Batch No: 41 Average Val Batch Loss: 0.54
Batch No: 61 Average Val Batch Loss: 0.54
Epoch No: 1, Epoch Val Loss: 0.55
Epoch No: 1, Epoch Val Accuracy: 0.81
Epoch Val Loss Decreased... Saving model
Learning Rate Set To: 0.01000
Batch No: 0 Average Training Batch Loss: 0.83
Batch No: 40 Average Training Batch Loss: nan
Batch No: 80 Average Training Batch Loss: nan
I see that resnet.zero_grad() is after logit = resnet(data), which causes the gradient to explode in your case.
Please do it as below:
# Clearing the gradient
optimizer.zero_grad()
logit = resnet(data)
# Compute loss
loss = criterion(logit, label)
This is the script that I have. It works till the ------ separation. Under I do not get any error from Matlab, but neither do I get a return of bestDx nor bestDy. Please help. (The first part is given just to put you in context)
%%
% Variables after running script Read_eA3_file.m
%date_time_UTC
%reflectivity
%clutter_mask
%Convert units
dBZ = reflectivity * 0.375 - 30;
dBZ_Mask = clutter_mask * 0.375 - 30;
%Replace clutter values with NaN
weather = NaN(size(dBZ)); %initialise to constant
weather(dBZ>=dBZ_Mask) = dBZ(dBZ>=dBZ_Mask); %copy values when A >= B
%Reduce to range -- those are 384x384 arrays
dBZ_range = dBZ(:,:,1:16); %16:18 to 16:23 included
weather_range = weather(:,:,1:16); %16:18 to 16:23 included
weather1618 = weather(:,:,1); %16:18 map only
weather1623 = weather(:,:,16); %16:23 map only
% Plot maps
image(imrotate(-weather1618,90)); %of 16:18
image(imrotate(-weather1623,90)); %of 16:23
%Find x,y of strongest dBZ
%Since the value are all negative. I look for their minimun
[M,I] = min(weather1618(:)); %for 16:18
[I_row, I_col] = ind2sub(size(weather1618),I); %values are 255 and 143
[M2,I2] = min(weather1623(:)); %for 16:23
[I2_row, I2_col] = ind2sub(size(weather1623),I2); %values are 223 and 7
%Calc displacement
%I get a value of 139.7140
max_displ=sqrt((I2_row-I_row)^2+(I2_col-I_col)^2); %between 1618 and 1623
%%
% -----Section below does not work; ONLY RUN the section ABOVE---------
%% Find Dx Dy for max_corr between two maps
maxCoeff=0;
weather1618Modified = zeros(384,384); %create weather array for time range
%weather1618Modified(:) = {NaN}; % Matlab cannot mix cell & double
%%
for x = 1:384
for y = 1:384
%30 pixel appx.
for Dx = -max_displ:30: max_displ
for Dy = -max_displ:30: max_displ
%Limit range of x+Dx and y+Dy to 1:384
if x+Dx<1 | y+Dy<1 | x+Dx>384 | y+Dy>384
continue
%weather1618Modified is the forecasted weather1823
weather1618Modified(x+Dx,y+Dy) = weather1618(x,y)
%Find the best correlation; Is corrcoef the right formula?
newCoeff=corrcoef(weather1623,weather1618Modified);
if newCoeff>maxCoeff
maxCoeff=newCoeff;
bestDx=Dx;
bestDy=Dy;
end
end
end
end
end
end
%% Calc displacement
bestDispl = sqrt(bestDx^2+bestDy^2); %bestDispl for a 5 min frame
%Calc speed
speed = bestDispl/time;
You have to delete the continue statement after the first if (or place it somewhere else).
The continue statement makes the program skip the remaining part of the for-loop and go directly to the next iteration. Therefore bestDx and bestDy will never be set.
Documentation: https://se.mathworks.com/help/matlab/ref/continue.html
I am trying to design a function that will calculate 30 day rolling volatility.
I have a file with 3 columns: date, and daily returns for 2 stocks.
How can I do this? I have a problem in summing the first 30 entries to get my vol.
Edit:
So it will read an excel file, with 3 columns: a date, and daily returns.
daily.ret = read.csv("abc.csv")
e.g. date stock1 stock2
01/01/2000 0.01 0.02
etc etc, with years of data. I want to calculate rolling 30 day annualised vol.
This is my function:
calc_30day_vol = function()
{
stock1 = abc$stock1^2
stock2 = abc$stock1^2
j = 30
approx_days_in_year = length(abc$stock1)/10
vol_1 = 1: length(a1)
vol_2 = 1: length(a2)
for (i in 1 : length(a1))
{
vol_1[j] = sqrt( (approx_days_in_year / 30 ) * rowSums(a1[i:j])
vol_2[j] = sqrt( (approx_days_in_year / 30 ) * rowSums(a2[i:j])
j = j + 1
}
}
So stock1, and stock 2 are the squared daily returns from the excel file, needed to calculate vol. Entries 1-30 for vol_1 and vol_2 are empty since we are calculating 30 day vol. I am trying to use the rowSums function to sum the squared daily returns for the first 30 entries, and then move down the index for each iteration.
So from day 1-30, day 2-31, day 3-32, etc, hence why I have defined "j".
I'm new at R, so apologies if this sounds rather silly.
This should get you started.
First I have to create some data that look like you describe
library(quantmod)
getSymbols(c("SPY", "DIA"), src='yahoo')
m <- merge(ROC(Ad(SPY)), ROC(Ad(DIA)), all=FALSE)[-1, ]
dat <- data.frame(date=format(index(m), "%m/%d/%Y"), coredata(m))
tmpfile <- tempfile()
write.csv(dat, file=tmpfile, row.names=FALSE)
Now I have a csv with data in your very specific format.
Use read.zoo to read csv and then convert to an xts object (there are lots of ways to read data into R. See R Data Import/Export)
r <- as.xts(read.zoo(tmpfile, sep=",", header=TRUE, format="%m/%d/%Y"))
# each column of r has daily log returns for a stock price series
# use `apply` to apply a function to each column.
vols.mat <- apply(r, 2, function(x) {
#use rolling 30 day window to calculate standard deviation.
#annualize by multiplying by square root of time
runSD(x, n=30) * sqrt(252)
})
#`apply` returns a `matrix`; `reclass` to `xts`
vols.xts <- reclass(vols.mat, r) #class as `xts` using attributes of `r`
tail(vols.xts)
# SPY.Adjusted DIA.Adjusted
#2012-06-22 0.1775730 0.1608266
#2012-06-25 0.1832145 0.1640912
#2012-06-26 0.1813581 0.1621459
#2012-06-27 0.1825636 0.1629997
#2012-06-28 0.1824120 0.1630481
#2012-06-29 0.1898351 0.1689990
#Clean-up
unlink(tmpfile)