using Transaction Execution Approval Language (TEAL) can I make my own coin? - cryptocurrency

Just like ERC-20 Tokens let you make a new currency on the ETH network, is there a way to use TEAL https://github.com/algorand/go-algorand/blob/master/data/transactions/logic/README.md and the algorand network for the same purpose?
i.e. you can do lots of stuff in a "bytecode based stack language that executes inside Algorand transactions" but one simple thing must be just another coin logic with a fixed set of total supply, rules for adding new coin, etc.
i.e. this GUI

Found answer at: https://developer.algorand.org/tutorials/create-dogcoin/
via https://github.com/algorand/go-algorand/issues/2142#issuecomment-835243098
I understand if this question needs to be closed.

Yes, you can use TEAL to create a new token on the Algorand network. Tokens are just a form of smart contracts, and TEAL allows developers to build smart contracts.
However, Algorand has an ASA network (Algorand Standard Asset) which means you can create a token without writing a new smart contract. You can read more about ASAs here.
If you want to do this in TEAL and create a more bespoke token, it is recommended to use PyTEAL. PyTEAL is a Python implementation of TEAL which is much easier to use.
This is an example of an asset in PyTEAL:
from pyteal import *
def approval_program():
on_creation = Seq(
[
Assert(Txn.application_args.length() == Int(1)),
App.globalPut(Bytes("total supply"), Btoi(Txn.application_args[0])),
App.globalPut(Bytes("reserve"), Btoi(Txn.application_args[0])),
App.localPut(Int(0), Bytes("admin"), Int(1)),
App.localPut(Int(0), Bytes("balance"), Int(0)),
Return(Int(1)),
]
)
is_admin = App.localGet(Int(0), Bytes("admin"))
on_closeout = Seq(
[
App.globalPut(
Bytes("reserve"),
App.globalGet(Bytes("reserve"))
+ App.localGet(Int(0), Bytes("balance")),
),
Return(Int(1)),
]
)
register = Seq([App.localPut(Int(0), Bytes("balance"), Int(0)), Return(Int(1))])
# configure the admin status of the account Txn.accounts[1]
# sender must be admin
new_admin_status = Btoi(Txn.application_args[1])
set_admin = Seq(
[
Assert(And(is_admin, Txn.application_args.length() == Int(2))),
App.localPut(Int(1), Bytes("admin"), new_admin_status),
Return(Int(1)),
]
)
# NOTE: The above set_admin code is carefully constructed. If instead we used the following code:
# Seq([
# Assert(Txn.application_args.length() == Int(2)),
# App.localPut(Int(1), Bytes("admin"), new_admin_status),
# Return(is_admin)
# ])
# It would be vulnerable to the following attack: a sender passes in their own address as
# Txn.accounts[1], so then the line App.localPut(Int(1), Bytes("admin"), new_admin_status)
# changes the sender's admin status, meaning the final Return(is_admin) can return anything the
# sender wants. This allows anyone to become an admin!
# move assets from the reserve to Txn.accounts[1]
# sender must be admin
mint_amount = Btoi(Txn.application_args[1])
mint = Seq(
[
Assert(Txn.application_args.length() == Int(2)),
Assert(mint_amount <= App.globalGet(Bytes("reserve"))),
App.globalPut(
Bytes("reserve"), App.globalGet(Bytes("reserve")) - mint_amount
),
App.localPut(
Int(1),
Bytes("balance"),
App.localGet(Int(1), Bytes("balance")) + mint_amount,
),
Return(is_admin),
]
)
# transfer assets from the sender to Txn.accounts[1]
transfer_amount = Btoi(Txn.application_args[1])
transfer = Seq(
[
Assert(Txn.application_args.length() == Int(2)),
Assert(transfer_amount <= App.localGet(Int(0), Bytes("balance"))),
App.localPut(
Int(0),
Bytes("balance"),
App.localGet(Int(0), Bytes("balance")) - transfer_amount,
),
App.localPut(
Int(1),
Bytes("balance"),
App.localGet(Int(1), Bytes("balance")) + transfer_amount,
),
Return(Int(1)),
]
)
program = Cond(
[Txn.application_id() == Int(0), on_creation],
[Txn.on_completion() == OnComplete.DeleteApplication, Return(is_admin)],
[Txn.on_completion() == OnComplete.UpdateApplication, Return(is_admin)],
[Txn.on_completion() == OnComplete.CloseOut, on_closeout],
[Txn.on_completion() == OnComplete.OptIn, register],
[Txn.application_args[0] == Bytes("set admin"), set_admin],
[Txn.application_args[0] == Bytes("mint"), mint],
[Txn.application_args[0] == Bytes("transfer"), transfer],
)
return program
def clear_state_program():
program = Seq(
[
App.globalPut(
Bytes("reserve"),
App.globalGet(Bytes("reserve"))
+ App.localGet(Int(0), Bytes("balance")),
),
Return(Int(1)),
]
)
return program
if __name__ == "__main__":
with open("asset_approval.teal", "w") as f:
compiled = compileTeal(approval_program(), mode=Mode.Application, version=2)
f.write(compiled)
with open("asset_clear_state.teal", "w") as f:
compiled = compileTeal(clear_state_program(), mode=Mode.Application, version=2)
f.write(compiled)

Related

Sagemaker - batch transform] Internal server error : 500

I am trying to do a batch transform on a training dataset in an S3 bucket. I have followed this link:
https://github.com/aws-samples/quicksight-sagemaker-integration-blog
The training data on which transformation is being applied is of ~35 MB.
I am getting these errors:
Bad HTTP status received from algorithm: 500
The server encountered an internal error and was unable to complete your request. Either the server is overloaded or there is an error in the application.
Process followed:
1. s3_input_train = sagemaker.TrainingInput(s3_data='s3://{}/{}/rawtrain/'.format(bucket, prefix), content_type='csv')
2. from sagemaker.sklearn.estimator import SKLearn
sagemaker_session = sagemaker.Session()
script_path = 'preprocessing.py'
sklearn_preprocessor = SKLearn(
entry_point=script_path,
role=role,
train_instance_type="ml.c4.xlarge",
framework_version='0.20.0',
py_version = 'py3',
sagemaker_session=sagemaker_session)
sklearn_preprocessor.fit({'train': s3_input_train})
3. transform_train_output_path = 's3://{}/{}/{}/'.format(bucket, prefix, 'transformtrain-train-output')
scikit_learn_inferencee_model = sklearn_preprocessor.create_model(env={'TRANSFORM_MODE': 'feature-transform'})
transformer_train = scikit_learn_inferencee_model.transformer(
instance_count=1,
assemble_with = 'Line',
output_path = transform_train_output_path,
accept = 'text/csv',
strategy = "MultiRecord",
max_payload =40,
instance_type='ml.m4.xlarge')
4. Preprocess training input
transformer_train.transform(s3_input_train.config['DataSource']['S3DataSource']['S3Uri'],
content_type='text/csv',
split_type = "Line")
print('Waiting for transform job: ' + transformer_train.latest_transform_job.job_name)
transformer_train.wait()
preprocessed_train_path = transformer_train.output_path + transformer_train.latest_transform_job.job_name
preprocessing.py
from __future__ import print_function
import time
import sys
from io import StringIO
import os
import shutil
import argparse
import csv
import json
import numpy as np
import pandas as pd
import logging
from sklearn.compose import ColumnTransformer
from sklearn.externals import joblib
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Binarizer, StandardScaler, OneHotEncoder
from sagemaker_containers.beta.framework import (
content_types, encoders, env, modules, transformer, worker)
# Specifying the column names here.
feature_columns_names = [
'A',
'B',
'C',
'D',
'E',
'F',
'G',
'H',
'I',
'J',
'K'
]
label_column = 'ab'
feature_columns_dtype = {
'A' : str,
'B' : np.float64,
'C' : np.float64,
'D' : str,
"E" : np.float64,
'F' : str,
'G' : str,
'H' : np.float64,
'I' : str,
'J' : str,
'K': str,
}
label_column_dtype = {'ab': np.int32}
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def _is_inverse_label_transform():
"""Returns True if if it's running in inverse label transform."""
return os.getenv('TRANSFORM_MODE') == 'inverse-label-transform'
def _is_feature_transform():
"""Returns True if it's running in feature transform mode."""
return os.getenv('TRANSFORM_MODE') == 'feature-transform'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Sagemaker specific arguments. Defaults are set in the environment variables.
parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
args = parser.parse_args()
input_files = [ os.path.join(args.train, file) for file in os.listdir(args.train) ]
if len(input_files) == 0:
raise ValueError(('There are no files in {}.\n' +
'This usually indicates that the channel ({}) was incorrectly specified,\n' +
'the data specification in S3 was incorrectly specified or the role specified\n' +
'does not have permission to access the data.').format(args.train, "train"))
raw_data = [ pd.read_csv(
file,
header=None,
names=feature_columns_names + [label_column],
dtype=merge_two_dicts(feature_columns_dtype, label_column_dtype)) for file in input_files ]
concat_data = pd.concat(raw_data)
numeric_features = list([
'B',
'C',
'E',
'H'
])
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = list(['A','D','F','G','I','J','K'])
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)],
remainder="drop")
preprocessor.fit(concat_data)
joblib.dump(preprocessor, os.path.join(args.model_dir, "model.joblib"))
print("saved model!")
def input_fn(input_data, request_content_type):
"""Parse input data payload
We currently only take csv input. Since we need to process both labelled
and unlabelled data we first determine whether the label column is present
by looking at how many columns were provided.
"""
content_type = request_content_type.lower(
) if request_content_type else "text/csv"
content_type = content_type.split(";")[0].strip()
if isinstance(input_data, str):
str_buffer = input_data
else:
str_buffer = str(input_data,'utf-8')
if _is_feature_transform():
if content_type == 'text/csv':
# Read the raw input data as CSV.
df = pd.read_csv(StringIO(input_data), header=None)
if len(df.columns) == len(feature_columns_names) + 1:
# This is a labelled example, includes the label
df.columns = feature_columns_names + [label_column]
elif len(df.columns) == len(feature_columns_names):
# This is an unlabelled example.
df.columns = feature_columns_names
return df
else:
raise ValueError("{} not supported by script!".format(content_type))
if _is_inverse_label_transform():
if (content_type == 'text/csv' or content_type == 'text/csv; charset=utf-8'):
# Read the raw input data as CSV.
df = pd.read_csv(StringIO(str_buffer), header=None)
if len(df.columns) == len(feature_columns_names) + 1:
# This is a labelled example, includes the ring label
df.columns = feature_columns_names + [label_column]
elif len(df.columns) == len(feature_columns_names):
# This is an unlabelled example.
df.columns = feature_columns_names
return df
else:
raise ValueError("{} not supported by script!".format(content_type))
def output_fn(prediction, accept):
"""Format prediction output
The default accept/content-type between containers for serial inference is JSON.
We also want to set the ContentType or mimetype as the same value as accept so the next
container can read the response payload correctly.
"""
accept = 'text/csv'
if type(prediction) is not np.ndarray:
prediction=prediction.toarray()
if accept == "application/json":
instances = []
for row in prediction.tolist():
instances.append({"features": row})
json_output = {"instances": instances}
return worker.Response(json.dumps(json_output), mimetype=accept)
elif accept == 'text/csv':
return worker.Response(encoders.encode(prediction, accept), mimetype=accept)
else:
raise RuntimeException("{} accept type is not supported by this script.".format(accept))
def predict_fn(input_data, model):
"""Preprocess input data
We implement this because the default predict_fn uses .predict(), but our model is a preprocessor
so we want to use .transform().
The output is returned in the following order:
rest of features either one hot encoded or standardized
"""
if _is_feature_transform():
features = model.transform(input_data)
if label_column in input_data:
# Return the label (as the first column) and the set of features.
return np.insert(features.toarray(), 0, pd.get_dummies(input_data[label_column])['True.'], axis=1)
else:
# Return only the set of features
return features
if _is_inverse_label_transform():
features = input_data.iloc[:,0]>0.5
features = features.values
return features
def model_fn(model_dir):
"""Deserialize fitted model
"""
if _is_feature_transform():
preprocessor = joblib.load(os.path.join(model_dir, "model.joblib"))
return preprocessor
Please help.
As I can see you are referring to a post that is pretty old and has issues open on Github here with regards to the Input source and the configurations.
I encourage you to check out the latest examples here which shows how to visualize Amazon SageMaker machine learning predictions with Amazon QuickSight.
Additionally, if the problem persists please open a service request with AWS Support with job ARN to investigate future on the Internal Server Error.

Unable to transcode from audio/l16;rate=48000;channel=1 to one of: audio/x-float-array; rate=16000; channels=1,

I am currently working on Softbanks' robot Pepper and I try to use Watson speech-to-text solution on Pepper's audio buffers remote streaming by using websocket protocol.
I used the answer to that former question NAO robot remote audio problems to find a way to access remotly pepper's audio buffers and that project https://github.com/ibm-dev/watson-streaming-stt to learn how to use websocket protocole to use watson streaming stt.
However, after I open my websocket application, I start sending buffers to watson and after a few sendings, I receive error: 'Unable to transcode from audio/l16;rate=48000;channel=1 to one of: audio/x-float-array; rate=16000; channels=1'
Each time I'm trying to send Pepper's audio buffer to watson, it is unable to understand it.
I compared data I send with data sent in watson streaming stt example (using pyaudio streaming from microphone instead of Pepper's buffer streaming) and I don't see any difference. Both time I'm pretty sure that I am sending a string containing raw chunks of bytes. Which is what Watson asks for in it documentation.
I try to send chunks of 8192 bytes with a sample rate of 48kHz and I can easily convert Pepper's audio buffer in hexa so I don't understand why Watson can't transcode it.
Here is my code:
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import argparse
import base64
import configparser
import json
import threading
import time
from optparse import OptionParser
import naoqi
import numpy as np
import sys
from threading import Thread
import ssl
import websocket
from websocket._abnf import ABNF
CHANNELS = 1
NAO_IP = "172.20.10.12"
class SoundReceiverModule(naoqi.ALModule):
"""
Use this object to get call back from the ALMemory of the naoqi world.
Your callback needs to be a method with two parameter (variable name, value).
"""
def __init__( self, strModuleName, strNaoIp):
try:
naoqi.ALModule.__init__(self, strModuleName );
self.BIND_PYTHON( self.getName(),"callback" );
self.strNaoIp = strNaoIp;
self.outfile = None;
self.aOutfile = [None]*(4-1); # ASSUME max nbr channels = 4
self.FINALS = []
self.RECORD_SECONDS = 20
self.ws_open = False
self.ws_listening = ""
# init data for websocket interfaces
self.headers = {}
self.userpass = "" #userpass and password
self.headers["Authorization"] = "Basic " + base64.b64encode(
self.userpass.encode()).decode()
self.url = ("wss://stream.watsonplatform.net//speech-to-text/api/v1/recognize"
"?model=fr-FR_BroadbandModel")
except BaseException, err:
print( "ERR: abcdk.naoqitools.SoundReceiverModule: loading error: %s" % str(err) );
# __init__ - end
def __del__( self ):
print( "INF: abcdk.SoundReceiverModule.__del__: cleaning everything" );
self.stop();
def start( self ):
audio = naoqi.ALProxy( "ALAudioDevice", self.strNaoIp, 9559 );
self.nNbrChannelFlag = 3; # ALL_Channels: 0, AL::LEFTCHANNEL: 1, AL::RIGHTCHANNEL: 2; AL::FRONTCHANNEL: 3 or AL::REARCHANNEL: 4.
self.nDeinterleave = 0;
self.nSampleRate = 48000;
audio.setClientPreferences( self.getName(), self.nSampleRate, self.nNbrChannelFlag, self.nDeinterleave ); # setting same as default generate a bug !?!
audio.subscribe( self.getName() );
#openning websocket app
self._ws = websocket.WebSocketApp(self.url,
header=self.headers,
on_open = self.on_open,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close)
sslopt={"cert_reqs": ssl.CERT_NONE}
threading.Thread(target=self._ws.run_forever, kwargs = {'sslopt':sslopt}).start()
print( "INF: SoundReceiver: started!" );
def stop( self ):
print( "INF: SoundReceiver: stopping..." );
audio = naoqi.ALProxy( "ALAudioDevice", self.strNaoIp, 9559 );
audio.unsubscribe( self.getName() );
print( "INF: SoundReceiver: stopped!" );
print "INF: WebSocket: closing..."
data = {"action": "stop"}
self._ws.send(json.dumps(data).encode('utf8'))
# ... which we need to wait for before we shutdown the websocket
time.sleep(1)
self._ws.close()
print "INF: WebSocket: closed"
if( self.outfile != None ):
self.outfile.close();
def processRemote( self, nbOfChannels, nbrOfSamplesByChannel, aTimeStamp, buffer ):
"""
This is THE method that receives all the sound buffers from the "ALAudioDevice" module"""
print "receiving buffer"
# self.data_to_send = self.data_to_send + buffer
# print len(self.data_to_send)
#self.data_to_send = ''.join( [ "%02X " % ord( x ) for x in buffer ] ).strip()
self.data_to_send = buffer
#print("buffer type :", type(data))
#print("buffer :", buffer)
#~ print( "process!" );
print( "processRemote: %s, %s, %s, lendata: %s, data0: %s (0x%x), data1: %s (0x%x)" % (nbOfChannels, nbrOfSamplesByChannel, aTimeStamp, len(buffer), buffer[0],ord(buffer[0]),buffer[1],ord(buffer[1])) );
if self.ws_open == True and self.ws_listening == True:
print "sending data"
self._ws.send(self.data_to_send, ABNF.OPCODE_BINARY)
print "data sent"
#print self.data_to_send
aSoundDataInterlaced = np.fromstring( str(buffer), dtype=np.int16 );
#
aSoundData = np.reshape( aSoundDataInterlaced, (nbOfChannels, nbrOfSamplesByChannel), 'F' );
# print "processRemote over"
# processRemote - end
def on_message(self, ws, msg):
print("message")
data = json.loads(msg)
print data
if "state" in data:
if data["state"] == "listening":
self.ws_listening = True
if "results" in data:
if data["results"][0]["final"]:
self.FINALS.append(data)
# This prints out the current fragment that we are working on
print(data['results'][0]['alternatives'][0]['transcript'])
def on_error(self, ws, error):
"""Print any errors."""
print(error)
def on_close(self, ws):
"""Upon close, print the complete and final transcript."""
transcript = "".join([x['results'][0]['alternatives'][0]['transcript']
for x in self.FINALS])
print("transcript :", transcript)
self.ws_open = False
def on_open(self, ws):
"""Triggered as soon a we have an active connection."""
# args = self._ws.args
print "INF: WebSocket: opening"
data = {
"action": "start",
# this means we get to send it straight raw sampling
"content-type": "audio/l16;rate=%d;channel=1" % self.nSampleRate,
"continuous": True,
"interim_results": True,
# "inactivity_timeout": 5, # in order to use this effectively
# you need other tests to handle what happens if the socket is
# closed by the server.
"word_confidence": True,
"timestamps": True,
"max_alternatives": 3
}
# Send the initial control message which sets expectations for the
# binary stream that follows:
self._ws.send(json.dumps(data).encode('utf8'))
# Spin off a dedicated thread where we are going to read and
# stream out audio.
print "INF: WebSocket: opened"
self.ws_open = True
def version( self ):
return "0.6";
def main():
"""initialisation
"""
parser = OptionParser()
parser.add_option("--pip",
help="Parent broker port. The IP address or your robot",
dest="pip")
parser.add_option("--pport",
help="Parent broker port. The port NAOqi is listening to",
dest="pport",
type="int")
parser.set_defaults(
pip=NAO_IP,
pport=9559)
(opts, args_) = parser.parse_args()
pip = opts.pip
pport = opts.pport
# We need this broker to be able to construct
# NAOqi modules and subscribe to other modules
# The broker must stay alive until the program exists
myBroker = naoqi.ALBroker("myBroker",
"0.0.0.0", # listen to anyone
0, # find a free port and use it
pip, # parent broker IP
pport) # parent broker port
"""fin initialisation
"""
global SoundReceiver
SoundReceiver = SoundReceiverModule("SoundReceiver", pip) #thread1
SoundReceiver.start()
try:
while True:
time.sleep(1)
print "hello"
except KeyboardInterrupt:
print "Interrupted by user, shutting down"
myBroker.shutdown()
SoundReceiver.stop()
sys.exit(0)
if __name__ == "__main__":
main()
I would be thankful if anyone had any idea on how to bypass that error or on what to try to get useful info. I first believed that I was sending "wrong" data to watson however after lots of attempts I have no clue on how to fix that problem.
Thank you a lot,
Alex

Run another DAG with TriggerDagRunOperator multiple times

i have a DAG (DAG1) where i copy a bunch of files. I would then like to kick off another DAG (DAG2) for each file that was copied. As the number of files copied will vary per DAG1 run, i would like to essentially loop over the files and call DAG2 with the appropriate parameters.
eg:
with DAG( 'DAG1',
description="copy files over",
schedule_interval="* * * * *",
max_active_runs=1
) as dag:
t_rsync = RsyncOperator( task_id='rsync_data',
source='/source/',
target='/destination/' )
t_trigger_preprocessing = TriggerDagRunOperator( task_id='trigger_preprocessing',
trigger_daq_id='DAG2',
python_callable=trigger
)
t_rsync >> t_trigger_preprocessing
i was hoping to use the python_callable trigger to pull the relevant xcom data from t_rsync and then trigger DAG2; but its not clear to me how to do this.
i would prefer to put the logic of calling DAG2 here to simplify the contents of DAG2 (and also provide stacking schematics with the max_active_runs)
ended up writing my own operator:
class TriggerMultipleDagRunOperator(TriggerDagRunOperator):
def execute(self, context):
count = 0
for dro in self.python_callable(context):
if dro:
with create_session() as session:
dbag = DagBag(settings.DAGS_FOLDER)
trigger_dag = dbag.get_dag(self.trigger_dag_id)
dr = trigger_dag.create_dagrun(
run_id=dro.run_id,
state=State.RUNNING,
conf=dro.payload,
external_trigger=True)
session.add(dr)
session.commit()
count = count + 1
else:
self.log.info("Criteria not met, moving on")
if count == 0:
raise AirflowSkipException('No external dags triggered')
with a python_callable like
def trigger_preprocessing(context):
for base_filename,_ in found.items():
exp = context['ti'].xcom_pull( task_ids='parse_config', key='experiment')
run_id='%s__%s' % (exp['microscope'], datetime.utcnow().replace(microsecond=0).isoformat())
dro = DagRunOrder(run_id=run_id)
d = {
'directory': context['ti'].xcom_pull( task_ids='parse_config', key='experiment_directory'),
'base': base_filename,
'experiment': exp['name'],
}
LOG.info('triggering dag %s with %s' % (run_id,d))
dro.payload = d
yield dro
return
and then tie it all together with:
t_trigger_preprocessing = TriggerMultipleDagRunOperator( task_id='trigger_preprocessing',
trigger_dag_id='preprocessing',
python_callable=trigger_preprocessing
)

Wipe out dropout operations from TensorFlow graph

I have a trained freezed graph that I am trying to run on an ARM device. Basically, I am using contrib/pi_examples/label_image, but with my network instead of Inception. My network was trained with dropout, which now causes me troubles:
Invalid argument: No OpKernel was registered to support Op 'Switch' with these attrs. Registered kernels:
device='CPU'; T in [DT_FLOAT]
device='CPU'; T in [DT_INT32]
device='GPU'; T in [DT_STRING]
device='GPU'; T in [DT_BOOL]
device='GPU'; T in [DT_INT32]
device='GPU'; T in [DT_FLOAT]
[[Node: l_fc1_dropout/cond/Switch = Switch[T=DT_BOOL](is_training_pl, is_training_pl)]]
One solution I can see is to build such TF static library that includes the corresponding operation. From other hand, it might be a better idea to eliminate the dropout ops from the network in order to make it simpler and faster. Is there a way to do that?
Thanks.
#!/usr/bin/env python2
import argparse
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
def print_graph(input_graph):
for node in input_graph.node:
print "{0} : {1} ( {2} )".format(node.name, node.op, node.input)
def strip(input_graph, drop_scope, input_before, output_after, pl_name):
input_nodes = input_graph.node
nodes_after_strip = []
for node in input_nodes:
print "{0} : {1} ( {2} )".format(node.name, node.op, node.input)
if node.name.startswith(drop_scope + '/'):
continue
if node.name == pl_name:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
if new_node.name == output_after:
new_input = []
for node_name in new_node.input:
if node_name == drop_scope + '/cond/Merge':
new_input.append(input_before)
else:
new_input.append(node_name)
del new_node.input[:]
new_node.input.extend(new_input)
nodes_after_strip.append(new_node)
output_graph = graph_pb2.GraphDef()
output_graph.node.extend(nodes_after_strip)
return output_graph
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input-graph', action='store', dest='input_graph')
parser.add_argument('--input-binary', action='store_true', default=True, dest='input_binary')
parser.add_argument('--output-graph', action='store', dest='output_graph')
parser.add_argument('--output-binary', action='store_true', dest='output_binary', default=True)
args = parser.parse_args()
input_graph = args.input_graph
input_binary = args.input_binary
output_graph = args.output_graph
output_binary = args.output_binary
if not tf.gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return
input_graph_def = tf.GraphDef()
mode = "rb" if input_binary else "r"
with tf.gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read().decode("utf-8"), input_graph_def)
print "Before:"
print_graph(input_graph_def)
output_graph_def = strip(input_graph_def, u'l_fc1_dropout', u'l_fc1/Relu', u'prediction/MatMul', u'is_training_pl')
print "After:"
print_graph(output_graph_def)
if output_binary:
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
else:
with tf.gfile.GFile(output_graph, "w") as f:
f.write(text_format.MessageToString(output_graph_def))
print("%d ops in the final graph." % len(output_graph_def.node))
if __name__ == "__main__":
main()
How about this as a more general solution:
for node in temp_graph_def.node:
for idx, i in enumerate(node.input):
input_clean = node_name_from_input(i)
if input_clean.endswith('/cond/Merge') and input_clean.split('/')[-3].startswith('dropout'):
identity = node_from_map(input_node_map, i).input[0]
assert identity.split('/')[-1] == 'Identity'
parent = node_from_map(input_node_map, node_from_map(input_node_map, identity).input[0])
pred_id = parent.input[1]
assert pred_id.split('/')[-1] == 'pred_id'
good = parent.input[0]
node.input[idx] = good

In Django how can I run a custom clean function on fixture data during import and validation?

In a ModelForm I can write a clean_<field_name> member function to automatically validate and clean up data entered by a user, but what can I do about dirty json or csv files (fixtures) during a manage.py loaddata?
Fixtures loaded with loaddata are assumed to contain clean data that doen't need validation (usually as an inverse operation to a prior dumpdata), so the short answer is that loaddata isn't the approach you want if you need to clean your inputs.
However, you probably can use some of the underpinnings of loaddata while implementing your custom data cleaning code--I'm sure you can easily script something using the Django serialization libs to read your existing data files them in and the save the resulting objects normally after the data has been cleaned up.
In case others want to do something similar, I defined a model method to do the cleaning (so it can be called from ModelForms)
MAX_ZIPCODE_DIGITS = 9
MIN_ZIPCODE_DIGITS = 5
def clean_zip_code(self, s=None):
#s = str(s or self.zip_code)
if not s: return None
s = re.sub("\D","",s)
if len(s)>self.MAX_ZIPCODE_DIGITS:
s = s[:self.MAX_ZIPCODE_DIGITS]
if len(s) in (self.MIN_ZIPCODE_DIGITS-1,self.MAX_ZIPCODE_DIGITS-1):
s = '0'+s # FIXME: deal with other intermediate lengths
if len(s)>=self.MAX_ZIPCODE_DIGITS:
s = s[:self.MIN_ZIPCODE_DIGITS]+'-'+s[self.MIN_ZIPCODE_DIGITS:]
return s
Then wrote a standalone python script to clean up my legacy json files using any clean_ methods found among the models.
import os, json
def clean_json(app = 'XYZapp', model='Entity', fields='zip_code', cleaner_prefix='clean_'):
# Set the DJANGO_SETTINGS_MODULE environment variable.
os.environ['DJANGO_SETTINGS_MODULE'] = app+".settings"
settings = __import__(app+'.settings').settings
models = __import__(app+'.models').models
fpath = os.path.join( settings.SITE_PROJECT_PATH, 'fixtures', model+'.json')
if isinstance(fields,(str,unicode)):
fields = [fields]
Ns = []
for field in fields:
try:
instance = getattr(models,model)()
except AttributeError:
print 'No model named %s could be found'%(model,)
continue
try:
cleaner = getattr(instance, cleaner_prefix+field)
except AttributeError:
print 'No cleaner method named %s.%s could be found'%(model,cleaner_prefix+field)
continue
print 'Cleaning %s using %s.%s...'%(fpath,model,cleaner.__name__)
fin = open(fpath,'r')
if fin:
l = json.load(fin)
before = len(l)
cleans = 0
for i in range(len(l)):
if 'fields' in l[i] and field in l[i]['fields']:
l[i]['fields'][field]=cleaner(l[i]['fields'][field]) # cleaner returns None to delete records
cleans += 1
fin.close()
after = len(l)
assert after>.5*before
Ns += [(before, after,cleans)]
print 'Writing %d/%d (new/old) records after %d cleanups...'%Ns[-1]
with open(fpath,'w') as fout:
fout.write(json.dumps(l,indent=2,sort_keys=True))
return Ns
if __name__ == '__main__':
clean_json()

Resources