Macros that generate code from a for-loop - hy

This example is a little contrived. The goal is to create a macro that loops over some values and programmatically generates some code.
A common pattern in Python is to initialize the properties of an object at calling time as follows:
(defclass hair [foo bar]
(defn __init__ [self]
(setv self.foo foo)
(setv self.bar bar)))
This correctly translates with hy2py to
class hair(foo, bar):
def __init__(self):
self.foo = foo
self.bar = bar
return None
I know there are Python approaches to this problem including attr.ib and dataclasses. But as a simplified learning exercise I wanted to approach this with a macro.
This is my non-working example:
(defmacro self-set [&rest args]
(for [[name val] args]
`(setv (. self (read-str ~name)) ~val)))
(defn fur [foo bar]
(defn __init__ [self]
(self-set [["foo" foo] ["bar" bar]])))
But this doesn't expand to the original pattern. hy2py shows:
from hy.core.language import name
from hy import HyExpression, HySymbol
import hy
def _hy_anon_var_1(hyx_XampersandXname, *args):
for [name, val] in args:
HyExpression([] + [HySymbol('setv')] + [HyExpression([] + [HySymbol
('.')] + [HySymbol('self')] + [HyExpression([] + [HySymbol(
'read-str')] + [name])])] + [val])
hy.macros.macro('self-set')(_hy_anon_var_1)
def fur(foo, bar):
def __init__(self, foo, bar):
return None
Wbat am I doing wrong?

for forms always return None. So, your loop is constructing the (setv ...) forms you request and then throwing them away. Instead, try lfor, which returns a list of results, or gfor, which returns a generator. Note also in the below example that I use do to group the generated forms together, and I've moved a ~ so that the read-str happens at compile-time, as it must in order for . to work.
(defmacro self-set [&rest args]
`(do ~#(gfor
[name val] args
`(setv (. self ~(read-str name)) ~val))))
(defclass hair []
(defn __init__ [self]
(self-set ["foo" 1] ["bar" 2])))
(setv h (hair))
(print h.bar) ; 2

Related

merge() method on AggregateFunction in Flink

I want to know when the merge() method on AggregateFunction gets called. From what I've understood from the answers here and here, is that its applicable to Session Windows only and occurs on every event that can be merged with the previous window since every event for a Session Window create a new Window. I'm using PyFlink and would appreciate any help by providing an example.
Let's take an example that I put together from the documentation for the AverageAggregate function and some custom code:
class MyTimestampAssigner(TimestampAssigner):
def extract_timestamp(self, value, record_timestamp) -> int:
return int(value[1])
class AverageAggregate(AggregateFunction):
def create_accumulator(self) -> Tuple[int, int]:
return 0, 0
def add(self, value: Tuple[str, int], accumulator: Tuple[int, int]) -> Tuple[int, int]:
return accumulator[0] + value[1], accumulator[1] + 1
def get_result(self, accumulator: Tuple[int, int]) -> float:
return accumulator[0] / accumulator[1]
def merge(self, a: Tuple[int, int], b: Tuple[int, int]) -> Tuple[int, int]:
return a[0] + b[0], a[1] + b[1]
if __name__ == '__main__':
env = StreamExecutionEnvironment.get_execution_environment()
env.set_parallelism(1)
# define the source
data_stream = env.from_collection([
('hi', 1), ('hi', 2), ('hi', 3), ('hi', 4), ('hi', 8), ('hi', 9), ('hi', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()]))
# define the watermark strategy
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(MyTimestampAssigner())
ds = (
data_stream
.assign_timestamps_and_watermarks(watermark_strategy)
.key_by(lambda x: x[0], key_type=Types.STRING())
.window(EventTimeSessionWindows.with_gap(Time.milliseconds(3)))
.aggregate(AverageAggregate())
)
# print the results
ds.print()
# submit for execution
env.execute()
From my understanding, the merge() method should have run on the second event ('hi', 2) since that is within the window size of 3 ms and then again for the input ('hi', 4) and so on. But while executing the code, the merge() method doesn't even fire once. So if anyone can modify the sample code above to show merge() being executed and explain how it works would be greatly appreciated.
While it's not a direct PyFlink example, you can have a look at the DataStream API recipe at https://docs.immerok.cloud/docs/how-to-guides/development/using-session-windows/#merging-data-in-one-session-window for info on the merge() method.
Disclaimer: I work for Immerok

OpenMDAO 1.5 : Running DOEdriver with array as desvar

I have used the example described here (http://openmdao.readthedocs.org/en/1.5.0/usr-guide/tutorials/doe-drivers.html?highlight=driver) to show my problem. I want to use the same approach for one component were "params" are array and no longer float . See example below
from openmdao.api import IndepVarComp, Group, Problem, ScipyOptimizer, ExecComp, DumpRecorder, Component
from openmdao.drivers.latinhypercube_driver import LatinHypercubeDriver, OptimizedLatinHypercubeDriver
import numpy as np
class Paraboloid(Component):
""" Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 """
def __init__(self):
super(Paraboloid, self).__init__()
self.add_param('x', val=0.0)
self.add_param('y', val=0.0)
self.add_output('f_xy', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
"""f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3
"""
x = params['x']
y = params['y']
unknowns['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0
def linearize(self, params, unknowns, resids):
#""" Jacobian for our paraboloid."""
x = params['x']
y = params['y']
J = {}
J['f_xy', 'x'] = 2.0*x - 6.0 + y
J['f_xy', 'y'] = 2.0*y + 8.0 + x
return J
class ParaboloidArray(Component):
""" Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 """
def __init__(self):
super(ParaboloidArray, self).__init__()
self.add_param('X', val=np.array([0., 0.]))
self.add_output('f_xy', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
"""f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3
"""
x = params['X'][0]
y = params['y'][1]
unknowns['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0
top = Problem()
root = top.root = Group()
root.add('p1', IndepVarComp('x', 50.0), promotes=['*'])
root.add('p2', IndepVarComp('y', 50.0), promotes=['*'])
root.add('comp', Paraboloid(), promotes=['*'])
top.driver = OptimizedLatinHypercubeDriver(num_samples=4, seed=0, population=20, generations=4, norm_method=2)
top.driver.add_desvar('x', lower=-50.0, upper=50.0)
top.driver.add_desvar('y', lower=-50.0, upper=50.0)
top.driver.add_objective('f_xy')
top.setup()
top.run()
top.cleanup()
###########################
print("case float ok")
top = Problem()
root = top.root = Group()
root.add('p1', IndepVarComp('X', np.array([50., 50.])), promotes=['*'])
root.add('comp', ParaboloidArray(), promotes=['*'])
top.driver = OptimizedLatinHypercubeDriver(num_samples=4, seed=0, population=20, generations=4, norm_method=2)
top.driver.add_desvar('X', lower=np.array([-50., -50.]), upper=np.array([50., 50.]))
top.driver.add_objective('f_xy')
top.setup()
top.run()
top.cleanup()
I obtain the following error :
Traceback (most recent call last):
File "C:\Program Files (x86)\Wing IDE 101 5.0\src\debug\tserver\_sandbox.py", line 102, in <module>
File "D:\tlefeb\Anaconda2\Lib\site-packages\openmdao\core\problem.py", line 1038, in run
self.driver.run(self)
File "D:\tlefeb\Anaconda2\Lib\site-packages\openmdao\drivers\predeterminedruns_driver.py", line 108, in run
for run in runlist:
File "D:\tlefeb\Anaconda2\Lib\site-packages\openmdao\drivers\latinhypercube_driver.py", line 57, in _build_runlist
design_var_buckets = self._get_buckets(bounds['lower'], bounds['upper'])
File "D:\tlefeb\Anaconda2\Lib\site-packages\openmdao\drivers\latinhypercube_driver.py", line 101, in _get_buckets
bucket_walls = np.linspace(low, high, self.num_samples + 1)
File "D:\tlefeb\Anaconda2\Lib\site-packages\numpy\core\function_base.py", line 102, in linspace
if step == 0:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
Did I misunderstood something in my way of coding ?
I get a different error than you, using the the latest OpenMDAO master, but I get an error non-the-less. There isn't anything wrong with the mode, but rather there are some bugs with using array variables for DOEs. I've added a bug-fix story to the OpenMDAO backlog, which we'll hopefully be able to deal with in the next couple weeks. We'd gladly accept a pull request if you develop a fix before we get to it though.

NoneType Buffer interface error in windows 8

I am making a text editor, so you can imagine that there is a lot of manipulating text files. When I double click on the .py file to run it in python.exe in windows, it throws an error of 'NoneType does not support the Buffer interface' As I have only ever heard "buffer" as a programming term in the context of text files, I believe the problem is somewhere in there.
Here is the code:
from tkinter import *
from tkinter.filedialog import *
from tkinter.messagebox import *
import os
os.chdir(os.getenv('HOME'))
current=None
backup=''
def newfile():
def create_file(entry):
global current
global root
nonlocal create_in
nonlocal name_it
current = open(entry.get(),'w')
root.title(create_in+'/'+current.name+' - Aspus')
name_it.destroy()
create_in=askdirectory()
if create_in!='':
global root
os.chdir(create_in)
name_it=Tk()
name_it.title("Name the File?")
prompt=Label(name_it, text="Enter name for new file:")
prompt.grid(row=0)
entry=Entry(name_it)
entry.grid(row=1)
entry.insert(0, "Untitled.txt")
create=Button(name_it, text="Create", command = lambda: create_file(entry))
create.grid(row=1, column=3)
name_it.mainloop()
def openfile(master):
global current
global backup
opening=askopenfilename()
file=open(opening, 'r')
insert=file.read()
backup=file.read()
file.close()
file=open(opening, 'w')
current=file
master.title(current.name+' - Aspus')
return insert
def savefile(entry):
global current
if current!=None:
current.write(entry.get('1.0', END))
elif current==None:
newfile()
current.write(entry.get('1.0', END))
def ask_save():
global root
global current
global main
if current!=None:
save_exit=askyesnocancel("Save Before Exit?", "Do you want to save before exiting?")
if save_exit==True:
a=current.name
current.close()
current=open(a, 'w')
savefile(main)
current.close()
root.destroy()
exit()
elif save_exit==False:
a=current.name
current.close()
current=open(a, 'w')
current.write(backup)
current.close()
root.destroy()
exit()
elif current==None:
if main.get('0.1', END).strip()!='':
save_exit=askyesnocancel("Save Before Exit?", "Do you want to save before exiting?")
if save_exit==True:
newfile()
savefile()
current.close()
root.destroy()
elif save_exit==False:
root.destroy()
else:
root.destroy()
def setpgwidth():
def adjust(entry):
global main
new_width=entry.get()
try:
main.config(width=int(entry.get()))
except:
showerror("Invalid width", "You entered an invalid width. Expected an integer.")
entry.delete(0, END)
else:
main.pack(expand=Y, fill=Y, side=LEFT)
entry.master.destroy()
width=Tk()
width.title("Set Page Width")
prompt=Label(width, text="Enter new page width:")
prompt.grid(row=0, column=0, columnspan=2)
new=Entry(width)
new.grid(row=1, column=0)
submit=Button(width, text="Submit", command=lambda: adjust(new))
submit.grid(row=1, column=1)
width.mainloop()
root=Tk()
root.title("Aspus Text Editor")
#create main text widget
main=Text(root, wrap=WORD)
main.pack(expand=True, fill=BOTH, side=LEFT)
#create scrollbar
scroll=Scrollbar(root)
scroll.pack(side=RIGHT, fill=Y)
#configure scrollbar
scroll.config(command=main.yview)
main.config(yscrollcommand=scroll.set)
#Creating menus
menu=Menu(root)
root.config(menu=menu)
menu.add_command(label="New File", command=newfile)
menu.add_command(label="Open File", command=lambda: main.insert(END, openfile(root)))
menu.add_command(label="Save File", command=lambda: savefile(main))
formatmenu=Menu(menu)
menu.add_cascade(label="Format", menu=formatmenu)
formatmenu.add_command(label="Set Page Width", command=setpgwidth)
menu.add_command(label="Quit", command=ask_save)
root.protocol("WM_DELETE_WINDOW", ask_save)
root.mainloop()
Does anyone know why this is happening and how to avoid it?
At least part of the problem is these two statements:
insert=file.read()
backup=file.read()
Because read() reads the whole file, backup isn't going to be what you think it is.
The first thing to do is step through the code with pdb, or add some print statements to validate that your data is what you think it is. You're relying heavily on global variables which can easily be changed in an order different than what you expect.

How do I create an abstract function that has a given derivative in Sage?

I want to have abstract $f$ function that has a given derivative. But, when I try to substitute it to D[0](f)(t), Sage says:
NameError: name 'D' is not defined
R.<t,u1,u2> = PolynomialRing(RR,3,'t' 'u1' 'u2')
tmp1 = r1*k1*u1-(r1/k1)*k1^2*u1^2-r1*b12/k1*k1*u1*k2*u2
f=function('f',t)
a=diff(f)
a.substitute_expression((D[0](f)(t))==tmp1)
tmp1.integral() won't do the job. I also can't substitute the integral, although it gives no warning.
%var u10, u20,r1,r2,k1,k2,b12,b21,t
u1=function('u1',t)
u2=function('u2',t)
tmp1 = r1*k1*u1-(r1/k1)*k1^2*u1^2-r1*b12/k1*k1*u1*k2*u2
tmp2 = r2*u2*k2-r2/k2*k2^2*u2^2-((r2*b21)/k2)*u1*u2*k1*k2
v1=integral(tmp1,t)
v2=integral(tmp2,t)
sep1=tmp1.substitute_expression(u1==v1,u2==v2)
sep2=tmp2.substitute_expression(u1==v1,u2==v2)
trial=diff(sep1,t)
trial.substitute_expression((integrate(-b12*k2*r1*u1(t)*u2(t) - k1*r1*u1(t)^2 + k1*r1*u1(t), t))==v1, (integrate(-b12*k2*r1*u1(t)*u2(t) - k1*r1*u1(t)^2 + k1*r1*u1(t), t))==v2)
Now let's go back to original version:
d1=diff(tmp1,t)
d1.substitute_function((D[0](u1)(t)),tmp1)
Error in lines 13-13
Traceback (most recent call last):
File "/projects/b501d31c-1f5d-48aa-bee3-73a2dcb30a39/.sagemathcloud/sage_server.py", line 733, in execute
exec compile(block+'\n', '', 'single') in namespace, locals
File "", line 1, in <module>
NameError: name 'D' is not defined
I don't know if this is really what you are looking for. But it offers at least some semblance of it.
sage: def myfunc(self, *args, **kwds): return e^(args[0])^2
sage: foo = function('foo', nargs=1, tderivative_func=myfunc)
sage: foo(x)
foo(x)
sage: foo(x).diff(x)
e^(x^2)
sage: foo(x).diff(x,3)
4*x^2*e^(x^2) + 2*e^(x^2)
You'll need to read the documentation of function (gotten by typing function?) very carefully to use this well, especially the comment
Note that custom methods must be instance methods, i.e., expect the
instance of the symbolic function as the first argument.
The doc is quite subtle and could use some improvement.

Running join on Maybe Relation

I have a model
Assignment
blah Text
....
and a model
File
assignmentId AssignmentId Maybe
...
and I want to get all the files associated with an assignment in a join query. I have tried Esqueleto and runJoin with selectOneMany but haven't had any luck, so I am considering not using a join, or using rawSql. That really doesn't seem like a good idea, but I can't figure this out. Is there any support for that feature?
Update, working example:
{-# LANGUAGE PackageImports, OverloadedStrings, ConstraintKinds #-}
module Handler.HTest where
import Import
import "esqueleto" Database.Esqueleto as Esql
import "monad-logger" Control.Monad.Logger (MonadLogger)
import "resourcet" Control.Monad.Trans.Resource (MonadResourceBase)
import qualified Data.List as L
getFilesByAssignment :: (PersistQuery (SqlPersist m), MonadLogger m
, MonadResourceBase m) =>
Text -> SqlPersist m [Entity File]
getFilesByAssignment myAssign = do
result <- select $
from $ \(assign `InnerJoin` file) -> do
on (just (assign ^. AssignmentId)
Esql.==. file ^. FileAssignmentId)
where_ (assign ^. AssignmentBlah Esql.==. val myAssign)
return (assign, file)
return $ map snd (result :: [(Entity Assignment, Entity File)])
(.$) = flip ($)
getTestR :: Handler RepHtml
getTestR = do
entFiles <- runDB $ getFilesByAssignment "test"
defaultLayout $ do
setTitle "Test page"
entFiles .$ map (show . unKey . entityKey)
.$ L.intercalate ", "
.$ toHtml
.$ toWidget

Resources