Reading and writing files - file

I am trying to read one text file and convert the contents of that file to pig latin on a new file. Here is what I have:
def pl_line(word):
statement = input('enter a string: ')
words = statement.split()
for word in words:
if len(word) <= 1:
print(word + 'ay')
else:
print(word[1:] + word[0] + 'ay')
def pl_file(old_file, new_file):
old_file = input('enter the file you want to read from: ')
new_file = input('enter the file you would like to write to: ')
write_to = open(new_file, 'w')
read_from = open(old_file, 'r')
lines = read_from.readlines()
for line in lines():
line = pl_line(line.strip('\n'))
write_to.write(line + '\n')
read_from.close()
write_to.close()
However, when I run this, I get this error message:
TypeError: 'list' object is not callable
Any ideas of how to improve my code?

Here are some improvements to the actual converter:
_VOWELS = 'aeiou'
_VOWELS_Y = _VOWELS + 'y'
_SILENT_H_WORDS = "hour honest honor heir herb".split()
def igpay_atinlay(word:str, with_vowel:str='yay'):
is_title = False
if word.title() == word:
is_title = True
word = word.lower()
# Default case, strangely, is 'in-yay'
result = word + with_vowel
if not word[0] in _VOWELS and not word in _SILENT_H_WORDS:
for pos in range(1, len(word)):
if word[pos] in _VOWELS:
result = word[pos:] + word[0:pos] + 'ay'
break
if is_title:
result = result.title()
return result
def line_to_pl(line:str, with_vowel:str='yay'):
new_line = ''
start = None
for pos in range(0, len(line)):
if line[pos].isalpha() or line[pos] == "'" or line[pos] == "-":
if start is None:
start = pos
else:
if start is not None:
new_line += igpay_atinlay(line[start:pos], with_vowel=with_vowel)
start = None
new_line += line[pos]
if start is not None:
new_line += igpay_atinlay(line[start:pos], with_vowel=with_vowel)
start = None
return new_line
tests = """
Now is the time for all good men to come to the aid of their party!
Onward, Christian soldiers!
A horse! My kingdom for a horse!
Ng!
Run away!
This is it.
Help, I need somebody.
Oh, my!
Dr. Livingston, I presume?
"""
for t in tests.split("\n"):
if t:
print(t)
print(line_to_pl(t))

You very likely mixed up the assignments to read_fromand write_to, so you're unintentionally trying to read from a file opened only for write access.

Related

Brute Force Transposition

Hello i have an assignment that I can find out. The questions for the assignment is:
Make a loop that tries to decrypt the ciphertext with all possible keys one at a time.
For each loop, each individual word is looked up in the dictionary. If 85% of the words are found in the dictionary, then it is probably the right key in the current run, and then the loop must be broken.
Decrypt the text with the found key and print it.
I have a code that takes all words from a dictionary and count them. I have linked the dsv file. Hope you can help me.
import csv
import pickle
import math
orddict = {}
item = 0
with open('alle_dkord.csv', 'r', encoding='utf-8') as file:
reader = csv.reader(file, delimiter=';')
for row in reader:
orddict[row[0].upper()] = row[1]
print(len(orddict))
pkfile = open('wordlist.pkl', 'ab')
pickle.dump(orddict, pkfile)
pkfile.close()
def main():
msg = "This is a cypher text"
kryptmsg = "Ta h ticesyx ptihse r"
key = 8
krypteret_tekst = krypter(key, msg)
print(krypteret_tekst)
dekrypteret_tekst = dekrypter(key, kryptmsg)
print(dekrypteret_tekst)
def krypter(key, msg):
ciffer_string = [""] * key
for kolonne in range(key):
curIndex = kolonne
while curIndex < len(msg):
ciffer_string[kolonne] += msg[curIndex]
curIndex += key
return''.join(ciffer_string)
def dekrypter(key, kryptmsg):
numKolonner = int(math.ceil(len(kryptmsg)/float(key)))
numRows = key
numOfGreyBox = (numKolonner * numRows) - len(kryptmsg)
plaintekst = [''] * numKolonner
kolonne = 0
row = 0
for symbol in kryptmsg:
plaintekst[kolonne] += symbol
kolonne += 1
if (kolonne == numKolonner) or (kolonne == numKolonner - 1 and row >= numRows - numOfGreyBox):
kolonne = 0
row += 1
return ''.join(plaintekst)
if __name__ =='__main__':
main()
The csv file
I have tried to make a loop. But it didn't work

F# scan a buffer finding the last part that begins \c\f and not followed by comma

Trying to find an elegant F# solution for this. I'm reading 1000 bytes from a file into a buffer, "buff". That part is easy.
Now, I want to scan the buffer looking for the last occurrence of a two-character combination:
Either a carriage return ('\r') or a line feed ('\f') that is not followed by a comma.
When I've found that, I need to find the next CR or LF (or the end of the buffer) and print the contents in between as a string.
Context: The file is a CSV file and I want the last line that has some non-empty value in the first column.
First of all, if you are reading CSV files, then it might be better idea to use CSV type provider. This gives you a nice typed access to CSV files and it has a couple of options that you can use for dealing with messy CSV files (e.g. if you need to skip a few lines). Alternatively, the F# Data library also has CSV parser, which lets you read the file using untyped API.
That said, if you really want to implement parsing on your own, then the following example should illustrate the idiomatic approach. I'm not sure I understand your problem exactly, but say we have:
let input = "start \r body \r, comma"
let buff = input.ToCharArray()
I believe you want to find the region between \r and \r,. You can do this using a recursive function that remembers the end of the range and the start of the range and decrements the starting range as it iterates over the string. You can use pattern matching to detect the cases that you need:
let rec findRange startLoc endLoc =
if startLoc < 0 then failwith "reached beginning"
match buff.[startLoc], buff.[startLoc+1] with
| ('\r' | '\f'), ',' -> findRange (startLoc - 1) startLoc
| ('\r' | '\f'), _ -> startLoc, endLoc
| _, _ -> findRange (startLoc - 1) endLoc
Using this, we can now get the range and get the required substring:
let s, e = findRange (buff.Length-2) (buff.Length-1)
input.Substring(s + 1, e - s - 1)
Elegant is in the eye of the beholder but one approach is implementing a matcher type. A matcher is function that given an input string and a position either succeeds returning a new matcher state with an updated position or fails.
// A matcher state holds a string and the position
[<Struct>]
type MatcherState =
{
Input : string
Pos : int
}
static member New i p : MatcherState = { Input = i ; Pos = p }
member x.Reposition p : MatcherState = { Input = x.Input ; Pos = p }
member x.AdvanceBy i : MatcherState = { Input = x.Input ; Pos = x.Pos + i }
member x.Current = x.Input.[x.Pos]
member x.InRange = x.Pos >= 0 && x.Pos < x.Input.Length
member x.Eos = x.Pos >= x.Input.Length
// A Matcher is a function that given a MatcherState
// returns Some MatcherState with a new position if successful
// otherwise returns None
type Matcher = MatcherState -> MatcherState option
By defining a few active patterns we can pattern match for the line start:
// Matches a line start
let mlineStart =
fun ms ->
match ms with
// Bad cases, new line followed by WS + Comma
| Cr (Ln (Ws (Comma _ | Eos _)))
| Ln (Ws (Comma _ | Eos _)) -> mbad
// Good cases, new line not followed by WS + Comma
| Cr (Ln (Ws ms))
| Ln (Ws ms) -> mgood ms
// All other cases bad
| _ -> mbad
Note: I handle new line followed by whitespace + comma here.
The line end is matched similar:
// Matches a line end
let mlineEnd =
fun ms ->
match ms with
// Good cases, new line or EOS
| Cr (Ln ms)
| Ln ms
| Eos ms -> mgood ms
// All other cases bad
| _ -> mbad
Finally we scanBackward looking for the line start and if we find it scanForward from that position until we find the line end.
match scanBackward testCase testCase.Length mlineStart with
| None -> printfn "No matching line start found"
| Some startPos ->
// Scan forwards from line start until we find a line end
match scanForward testCase startPos mlineEnd with
| None -> printfn "Line start found #%d, but no matching line end found" startPos
| Some endPos ->
let line = testCase.Substring (startPos, endPos - startPos)
printfn "Line found: %s" line
Matcher is actually a simplistic parser but that produces no values and that support scanning forward and backwards. The approach I have chosen is not the most efficient. If efficiency is important it can be improved by applying parser combinator techniques used by for example FParsec.
Hope this was interesting. I am sure someone can up with a shorter regex solution but what fun is that?
Full example follows (no quality guarantees given, use it as an inspiration)
// A matcher state holds a string and the position
[<Struct>]
type MatcherState =
{
Input : string
Pos : int
}
static member New i p : MatcherState = { Input = i ; Pos = p }
member x.Reposition p : MatcherState = { Input = x.Input ; Pos = p }
member x.AdvanceBy i : MatcherState = { Input = x.Input ; Pos = x.Pos + i }
member x.Current = x.Input.[x.Pos]
member x.InRange = x.Pos >= 0 && x.Pos < x.Input.Length
member x.Eos = x.Pos >= x.Input.Length
// A Matcher is a function that given a MatcherState
// returns Some MatcherState with a new position if successful
// otherwise returns None
type Matcher = MatcherState -> MatcherState option
let mgood ms = Some ms
let mbad = None
// Matches EOS
let meos : Matcher =
fun ms ->
if ms.Eos then
mgood ms
else
mbad
// Matches a specific character
let mch ch : Matcher =
fun ms ->
if not ms.InRange then
mbad
elif ms.Current = ch then
mgood <| ms.AdvanceBy 1
else mbad
// Matches zero or more whitespaces
let mws : Matcher =
fun ms ->
let rec loop pos =
if pos < ms.Input.Length then
let ch = ms.Input.[pos]
if ch = ' ' then
loop (pos + 1)
else
mgood <| ms.Reposition pos
else
mgood <| ms.Reposition pos
loop (max ms.Pos 0)
// Active patterns
let (|Eos|_|) = meos
let (|Comma|_|) = mch ','
let (|Cr|_|) = mch '\r'
let (|Ln|_|) = mch '\n'
let (|Ws|_|) = mws
// Matches a line start
let mlineStart =
fun ms ->
match ms with
// Bad cases, new line followed by WS + Comma
| Cr (Ln (Ws (Comma _ | Eos _)))
| Ln (Ws (Comma _ | Eos _)) -> mbad
// Good cases, new line not followed by WS + Comma
| Cr (Ln (Ws ms))
| Ln (Ws ms) -> mgood ms
// All other cases bad
| _ -> mbad
// Matches a line end
let mlineEnd =
fun ms ->
match ms with
// Good cases, new line or EOS
| Cr (Ln ms)
| Ln ms
| Eos ms -> mgood ms
// All other cases bad
| _ -> mbad
// Scans either backward or forward looking for a match
let scan steps input pos (m : Matcher) =
let rec loop ms =
match m ms with
| Some mms ->
if steps < 0 then
Some mms.Pos
else
Some ms.Pos
| None ->
if steps = 0 then
None
elif steps > 0 && ms.Pos >= ms.Input.Length then
None
elif steps < 0 && ms.Pos < 0 then
None
else
loop <| ms.AdvanceBy steps
loop (MatcherState.New input (min input.Length (max 0 pos)))
let scanForward = scan 1
let scanBackward = scan -1
[<EntryPoint>]
let main argv =
// Some test cases
let testCases =
[|
"""1,2,3,4
4,5,6,7"""
"""1,2,3,4
4,5,6,7
"""
"""1,2,3,4
4,5,6,7
,2,3,4
"""
"""1,2,3,4
4,5,6,7
,2,3,4
"""
|]
for testCase in testCases do
// Scan backwards from end until we find a line start
match scanBackward testCase testCase.Length mlineStart with
| None -> printfn "No matching line start found"
| Some startPos ->
// Scan forwards from line start until we find a line end
match scanForward testCase startPos mlineEnd with
| None -> printfn "Line start found #%d, but no matching line end found" startPos
| Some endPos ->
let line = testCase.Substring (startPos, endPos - startPos)
printfn "Line found: %s" line
0

Python: I'm making several lists to keep track of variables in a database

It's supposed to save each line in the text file into a list and split it based on commas and categorize them into multiple different lists. The error occurs at the while loop stating that the index is out of range.
lineCounter = 0
j = 0
file = open("savefile.txt","r")
with open('savefile.txt', 'r') as f:
string = [line.strip() for line in f]
for line in file:
lineCounter += 1
while(j<lineCounter):
tempList = string[j].split(',')
firstName[j] = tempList[0]
lastName[j] = tempList[1]
postition[j] = tempList[2]
department[j] = tempList[3]
seniority[j] = tempList[4]
vacationWeeks[j] = tempList[5]
sickDays[j] = tempList[6]
iD[j] = tempList[7]
status[j] = tempList[8]
j += 1
print firstName
file.close() # close the text file
NVM the problem was that the list needed to be appended rather than replaced.

Process output of a sequence of files in Matlab

EDIT 3
Hi! I had problems with the matrix dimensions but I've solved it. Now my problem is that I want to do the same operation on a large series of files on the same folder and I want write the output values on a separate line on text.txt. With the first one it works but it doesn't 'write' to the 'text', the rest. Is there something wrong?
myPath = 'C:\EX\';
a= dir (fullfile(myPath,'*.DIM'));
fileNames = { a.name };
% Rename files
for k = 1:length(fileNames)
newFileName = [fileNames{k}(1:2) fileNames{k}(4:6) '.txt'];
movefile([myPath fileNames{k}], [myPath newFileName]);
end
filePattern=fullfile( myPath,'*.txt');
txtFiles= dir(filePattern);
for k = 1:length(txtFiles)
baseFileName=txtFiles(k).name;
fullFileName= fullfile(myPath,baseFileName);
fid=fopen(fullFileName, 'r');
for i = 1:18
m{i} = fgetl(fid);
end
result2 = m{18};
result2b= result2([12:19]);
fid=fopen(fullFileName, 'r');
for i = 1:30
m{i} = fgetl(fid);
end
result3 = m{30};
result3b= result3([12:19]);
fid=fopen(fullFileName, 'r');
for i = 1:31
m{i} = fgetl(fid);
end
result4 = m{31};
result4b= result4([12:20]);
fid=fopen(fullFileName, 'r');
for i = 1:19
m{i} = fgetl(fid);
end
result5 = m{19};
result5b= result5([12:20]);
text= {baseFileName, result2b, result3b, result4b, result5b};
final= [Fields'; text];
end
Really thanks in advance!
Index exceeds dimensions is exactly what it means.
Try to put a breakpoint at the line where it occurs and check the dimension of result2. Assuming it is a vector, you will find that its length is less than 19.

Reading TDM (Diadem) files from script

My customer is sending TDM/TDX files captured in National Instruments Diadem, which I haven't got. I'm looking for a way to convert the files into .CSV, XLS or .MAT files for analysis in Matlab (without using Diadem or Diadem DLLs!)
The format consists of a well structured XML file (.TDM) and a binary (.TDX), with the .TDM defining how fields are packed as bits in the binary TDX. I'd like to read the files (for use in Matlab and other environments). Does anyone have a general purpose tool or conversion script in for instance Python or Perl (not using the NI DLL's) or directly in Matlab?
I've looked into buying the tool, but didn't like it for anything other than one-time conversion to a compatible file format.
Thanks!
I know this is a little late, but I have a simple library to read TDM/TDX files in Python. It works by parsing the TDM file to figure out the data type, then using NumPy.memmap to open the TDX file. It can then be used like a standard NumPy array. The code is pretty simple, so you could probably implement something similar in Matlab.
Here's the link: https://bitbucket.org/joshayers/tdm_loader
Hope that helps.
Maybe a little too late, but I think there is a simple way to get the data from TDM files: NI provides plug-ins for reading TDM files into Excel and OpenOffice Calc. Having the data in one of these programs you could use the CSV export. Search google for "tdm excel" or "tdm openoffice".
Hope this helps...
Gemue
The following script can convert all variables into 'variable' struct.
CurrDirectory = '...//'; % Path to current directory
fileNametdx = '.../utility/'; % Path to TDX file
%%
% Data type conversion
Dtype.eInt8Usi='int8';
Dtype.eInt16Usi='int16';
Dtype.eInt32Usi='int32';
Dtype.eInt64Usi='int64';
Dtype.eUInt8Usi='uint8';
Dtype.eUInt16Usi='uint16';
Dtype.eUInt32Usi='uint32';
Dtype.eUInt64Usi='uint64';
Dtype.eFloat32Usi='single';
Dtype.eFloat64Usi='double';
%% Read .tdx file Name
wb=waitbar(0,'Reading *.tdx Files');
fileNameTDM = strrep(fileNametdx,'.tdx','.TDM');
%% Read .TDM
tdm=xml2struct(fileNameTDM);
for i=1:numel(tdm.usi_colon_tdm.usi_colon_data.tdm_channel)
waitbar((1/numel(tdm.usi_colon_tdm.usi_colon_data.tdm_channel))*i,wb,['File ' fileNametdx ' conversion started']);
s1=strsplit(string(tdm.usi_colon_tdm.usi_colon_data.tdm_channel{1, i}.local_columns.Text),'"');
usi1=s1(2);
% if condition match untill we get usi2
for j=1:numel(tdm.usi_colon_tdm.usi_colon_data.localcolumn)
usi2=string(tdm.usi_colon_tdm.usi_colon_data.localcolumn{1, j}.Attributes.id);
if usi1==usi2
%take new usi
s2=strsplit(string(tdm.usi_colon_tdm.usi_colon_data.localcolumn{1, j}.values.Text),'"');
new_usi1=s2(2);
w1=strsplit(string(tdm.usi_colon_tdm.usi_colon_data.tdm_channel{1, i}.datatype.Text),'_');
str_1=char(strcat('tdm.usi_colon_tdm.usi_colon_data.',lower(w1(2)),'_sequence'));
str_2=char(strcat('tdm.usi_colon_tdm.usi_colon_data.',lower(w1(2)),'_sequence{1, k}.Attributes.id'));
str_3=char(strcat('tdm.usi_colon_tdm.usi_colon_data.',lower(w1(2)),'_sequence{1, k}.values.Attributes.external'));
str_4=char(strcat('tdm.usi_colon_tdm.usi_colon_data.',lower(w1(2)),'_sequence{1, k}.values'));
for k=1:numel(eval(str_1))
new_usi2=string(eval(str_2));
if new_usi1==new_usi2
if isfield(eval(str_4), 'Attributes')
inc_value1=string(eval(str_3));
for m=1:numel(tdm.usi_colon_tdm.usi_colon_include.file.block)
inc_value2=string(tdm.usi_colon_tdm.usi_colon_include.file.block{1, m}.Attributes.id);
if inc_value1==inc_value2
% offset=round(str2num(tdm.usi_colon_tdm.usi_colon_include.file.block{1, m}.Attributes.byteOffset)/8);
length = round(str2num(tdm.usi_colon_tdm.usi_colon_include.file.block{1, m}.Attributes.length));
offset1=round(str2num(tdm.usi_colon_tdm.usi_colon_include.file.block{1, m}.Attributes.byteOffset));
value_type = tdm.usi_colon_tdm.usi_colon_include.file.block{1, m}.Attributes.valueType;
m = memmapfile(fullfile(CurrDirectory,fileNametdx),'Offset',offset1,'Format',{Dtype.(value_type) [length 1] 'dat'},'Writable',true,'Repeat',1);
dat=m.Data.dat ;
end
end
else
str_5=char(strcat('tdm.usi_colon_tdm.usi_colon_data.',lower(w1(2)),'_sequence{1, k}.values.',char(fieldnames(tdm.usi_colon_tdm.usi_colon_data.string_sequence{1, k}.values))));
dat=eval(str_5)';
end
name_variable = string(tdm.usi_colon_tdm.usi_colon_data.tdm_channel{1, i}.name.Text);
varname = genvarname(char(name_variable));
variable.(varname) = dat;
end
end
end
end
end
waitbar(1,wb,[fileNametdx ' conversion completed']);
pause(1)
close(wb)
delete(fullfile(CurrDirectory,fileNametdx),fullfile(CurrDirectory,fileNameTDM));
%Output Variable is Struct
clearvars -except variable
This script requires following XML parser
function [ s ] = xml2struct( file )
%Convert xml file into a MATLAB structure
% [ s ] = xml2struct( file )
%
% A file containing:
% <XMLname attrib1="Some value">
% <Element>Some text</Element>
% <DifferentElement attrib2="2">Some more text</Element>
% <DifferentElement attrib3="2" attrib4="1">Even more text</DifferentElement>
% </XMLname>
%
% Will produce:
% s.XMLname.Attributes.attrib1 = "Some value";
% s.XMLname.Element.Text = "Some text";
% s.XMLname.DifferentElement{1}.Attributes.attrib2 = "2";
% s.XMLname.DifferentElement{1}.Text = "Some more text";
% s.XMLname.DifferentElement{2}.Attributes.attrib3 = "2";
% s.XMLname.DifferentElement{2}.Attributes.attrib4 = "1";
% s.XMLname.DifferentElement{2}.Text = "Even more text";
%
% Please note that the following characters are substituted
% '-' by '_dash_', ':' by '_colon_' and '.' by '_dot_'
%
% Written by W. Falkena, ASTI, TUDelft, 21-08-2010
% Attribute parsing speed increased by 40% by A. Wanner, 14-6-2011
% Added CDATA support by I. Smirnov, 20-3-2012
%
% Modified by X. Mo, University of Wisconsin, 12-5-2012
if (nargin < 1)
clc;
help xml2struct
return
end
if isa(file, 'org.apache.xerces.dom.DeferredDocumentImpl') || isa(file, 'org.apache.xerces.dom.DeferredElementImpl')
% input is a java xml object
xDoc = file;
else
%check for existance
if (exist(file,'file') == 0)
%Perhaps the xml extension was omitted from the file name. Add the
%extension and try again.
if (isempty(strfind(file,'.xml')))
file = [file '.xml'];
end
if (exist(file,'file') == 0)
error(['The file ' file ' could not be found']);
end
end
%read the xml file
xDoc = xmlread(file);
end
%parse xDoc into a MATLAB structure
s = parseChildNodes(xDoc);
end
% ----- Subfunction parseChildNodes -----
function [children,ptext,textflag] = parseChildNodes(theNode)
% Recurse over node children.
children = struct;
ptext = struct; textflag = 'Text';
if hasChildNodes(theNode)
childNodes = getChildNodes(theNode);
numChildNodes = getLength(childNodes);
for count = 1:numChildNodes
theChild = item(childNodes,count-1);
[text,name,attr,childs,textflag] = getNodeData(theChild);
if (~strcmp(name,'#text') && ~strcmp(name,'#comment') && ~strcmp(name,'#cdata_dash_section'))
%XML allows the same elements to be defined multiple times,
%put each in a different cell
if (isfield(children,name))
if (~iscell(children.(name)))
%put existsing element into cell format
children.(name) = {children.(name)};
end
index = length(children.(name))+1;
%add new element
children.(name){index} = childs;
if(~isempty(fieldnames(text)))
children.(name){index} = text;
end
if(~isempty(attr))
children.(name){index}.('Attributes') = attr;
end
else
%add previously unknown (new) element to the structure
children.(name) = childs;
if(~isempty(text) && ~isempty(fieldnames(text)))
children.(name) = text;
end
if(~isempty(attr))
children.(name).('Attributes') = attr;
end
end
else
ptextflag = 'Text';
if (strcmp(name, '#cdata_dash_section'))
ptextflag = 'CDATA';
elseif (strcmp(name, '#comment'))
ptextflag = 'Comment';
end
%this is the text in an element (i.e., the parentNode)
if (~isempty(regexprep(text.(textflag),'[\s]*','')))
if (~isfield(ptext,ptextflag) || isempty(ptext.(ptextflag)))
ptext.(ptextflag) = text.(textflag);
else
%what to do when element data is as follows:
%<element>Text <!--Comment--> More text</element>
%put the text in different cells:
% if (~iscell(ptext)) ptext = {ptext}; end
% ptext{length(ptext)+1} = text;
%just append the text
ptext.(ptextflag) = [ptext.(ptextflag) text.(textflag)];
end
end
end
end
end
end
% ----- Subfunction getNodeData -----
function [text,name,attr,childs,textflag] = getNodeData(theNode)
% Create structure of node info.
%make sure name is allowed as structure name
name = toCharArray(getNodeName(theNode))';
name = strrep(name, '-', '_dash_');
name = strrep(name, ':', '_colon_');
name = strrep(name, '.', '_dot_');
attr = parseAttributes(theNode);
if (isempty(fieldnames(attr)))
attr = [];
end
%parse child nodes
[childs,text,textflag] = parseChildNodes(theNode);
if (isempty(fieldnames(childs)) && isempty(fieldnames(text)))
%get the data of any childless nodes
% faster than if any(strcmp(methods(theNode), 'getData'))
% no need to try-catch (?)
% faster than text = char(getData(theNode));
text.(textflag) = toCharArray(getTextContent(theNode))';
end
end
% ----- Subfunction parseAttributes -----
function attributes = parseAttributes(theNode)
% Create attributes structure.
attributes = struct;
if hasAttributes(theNode)
theAttributes = getAttributes(theNode);
numAttributes = getLength(theAttributes);
for count = 1:numAttributes
%attrib = item(theAttributes,count-1);
%attr_name = regexprep(char(getName(attrib)),'[-:.]','_');
%attributes.(attr_name) = char(getValue(attrib));
%Suggestion of Adrian Wanner
str = toCharArray(toString(item(theAttributes,count-1)))';
k = strfind(str,'=');
attr_name = str(1:(k(1)-1));
attr_name = strrep(attr_name, '-', '_dash_');
attr_name = strrep(attr_name, ':', '_colon_');
attr_name = strrep(attr_name, '.', '_dot_');
attributes.(attr_name) = str((k(1)+2):(end-1));
end
end
end

Resources