OpenFlow - How are ICMP messages handled - icmp

I am running a Ryu controller and a Mininet instance with 2 hosts and 1 switch like below.
H1---S---H2
Code in Ryu controller
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
#set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
Basically the switch flow table is empty. In this case, when I run h1 ping h2 from my mininet console and record the packet exchanges, this is what I get in wireshark from host h1.
There is no router in the mininet instance. How am I receiving an ICMP Host Destination Unreachable Message from the same host that initiated the ping?

The app code you posted is not complete.
For complete simple_switch_13.py, you can get it from the osrg github.
Take a look, it is like this:
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
#set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
#set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
This simple_switch_13.py app only handles layer 2 forwarding, which is your case.
As you can see, after the connection established, the switch_features_handler will listen on this event and add a send all flow to controller flow on the switch.(table-miss flow)
And for the normal states, when the controller receives PACKET_IN, it will check if the dst_MAC is in the mac_to_port. If yes, then output to the port, and at the same time insert a flow(whose match field is inport and dst_MAC); else(not in the array), the action is set to be FLOOD by assigning the outport=FLOOD.
That's the case in Layer 2 switching.
For ICMP messages handling in layer 3 switching, you need to read the rest_router.py code, which is a lot more complicated.

You get ICMP Host Destination Unreachable because the ARP request is never answered by h2.
Since h1 gets no ARP reply, ICMP error message comes from its own IP stack.

Related

Using v4l2sink with DeepStream

I'm working on deepstream code to pass rtsp streams to virtual V4L2 devices (I used v4l2loopback to create the virtual devices). I have a code that works without errors, however, I can't read the V4L2 device.
Does anyone know of a working DeepStream code where v4l2sink is used? I have tried to find an example without success.
Here is my code. The writing part to v4l2sink is in the function: create_v4l2sink_branch()
import sys
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstRtspServer', '1.0')
import math
import sys
import common.utils as DS_UTILS
import pyds
from common.bus_call import bus_call
from common.FPS import PERF_DATA
from common.is_aarch_64 import is_aarch64
from gi.repository import GLib, Gst, GstRtspServer
CODEC="H264"
BITRATE=4000000
MAX_DISPLAY_LEN = 64
MUXER_OUTPUT_WIDTH = 1920
MUXER_OUTPUT_HEIGHT = 1080
MUXER_BATCH_TIMEOUT_USEC = 400000
TILED_OUTPUT_WIDTH = 1920
TILED_OUTPUT_HEIGHT = 1080
GST_CAPS_FEATURES_NVMM = "memory:NVMM"
OSD_PROCESS_MODE = 0
OSD_DISPLAY_TEXT = 1
MUX_SYNC_INPUTS = 0
ds_loop=None
perf_data = None
def terminate_pipeline(u_data):
global ds_loop
pass
# if global_config.request_to_stop == True:
# print("Aborting pipeline by request")
# ds_loop.quit()
# return False
return True
def create_onscreen_branch(pipeline, gst_elem, index):
print("Creating EGLSink")
sink = DS_UTILS.create_gst_element("nveglglessink", f"nvvideo-renderer-{index}")
sink.set_property('sync', 0)
sink.set_property('async', 1)
pipeline.add(sink)
if is_aarch64():
transform = DS_UTILS.create_gst_element("nvegltransform", f"nvegl-transform{index}")
pipeline.add(transform)
gst_elem.link(transform)
transform.link(sink)
else:
gst_elem.link(sink)
sink.set_property("qos", 0)
def create_v4l2sink_branch(pipeline, gst_elem, index, output_video_device):
# Create a caps filter
caps = DS_UTILS.create_gst_element("capsfilter", f"filter-{index}")
#caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420"))
#caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=NV12"))
identity = DS_UTILS.create_gst_element("identity", f"identity-{index}")
identity.set_property("drop-allocation", 1)
nvvidconv = DS_UTILS.create_gst_element("nvvideoconvert", f"convertor-{index}")
sink = DS_UTILS.create_gst_element("v4l2sink", f"v4l2sink-{index}")
sink.set_property('device', output_video_device)
sink.set_property("sync", 0)
sink.set_property("async", 1)
pipeline.add(caps)
pipeline.add(nvvidconv)
pipeline.add(identity)
pipeline.add(sink)
gst_elem.link(caps)
caps.link(nvvidconv)
nvvidconv.link(identity)
identity.link(sink)
def run_pipeline(rtsp_v4l2_pairs):
# Check input arguments
number_sources = len(rtsp_v4l2_pairs)
perf_data = PERF_DATA(number_sources)
# Standard GStreamer initialization
Gst.init(None)
# Create gstreamer elements */
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline")
pipeline = Gst.Pipeline()
is_live = False
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
return
# Create nvstreammux instance to form batches from one or more sources.
streammux = DS_UTILS.create_gst_element("nvstreammux", "Stream-muxer")
pipeline.add(streammux)
for i in range(number_sources):
uri_name = rtsp_v4l2_pairs[i][0]
print(" Creating source_bin {} --> {}".format(i, uri_name))
is_live = uri_name.find("rtsp://") == 0
source_bin = DS_UTILS.create_source_bin(i, uri_name)
pipeline.add(source_bin)
padname = "sink_%u" % i
sinkpad = streammux.get_request_pad(padname)
if not sinkpad:
sys.stderr.write("Unable to create sink pad bin \n")
srcpad = source_bin.get_static_pad("src")
if not srcpad:
sys.stderr.write("Unable to create src pad bin \n")
srcpad.link(sinkpad)
# streammux setup
if is_live:
print(" At least one of the sources is live")
streammux.set_property('live-source', 1)
streammux.set_property('width', MUXER_OUTPUT_WIDTH)
streammux.set_property('height', MUXER_OUTPUT_HEIGHT)
streammux.set_property('batch-size', number_sources)
streammux.set_property("batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC)
#streammux.set_property("sync-inputs", MUX_SYNC_INPUTS)
queue = DS_UTILS.create_gst_element("queue", "queue1")
pipeline.add(queue)
nvstreamdemux = DS_UTILS.create_gst_element("nvstreamdemux", "nvstreamdemux")
pipeline.add(nvstreamdemux)
# linking
streammux.link(queue)
queue.link(nvstreamdemux)
for i in range(number_sources):
queue = DS_UTILS.create_gst_element("queue", f"queue{2+i}")
pipeline.add(queue)
demuxsrcpad = nvstreamdemux.get_request_pad(f"src_{i}")
if not demuxsrcpad:
sys.stderr.write("Unable to create demux src pad \n")
queuesinkpad = queue.get_static_pad("sink")
if not queuesinkpad:
sys.stderr.write("Unable to create queue sink pad \n")
demuxsrcpad.link(queuesinkpad)
#create_onscreen_branch(pipeline=pipeline, gst_elem=queue, index=i)
create_v4l2sink_branch(pipeline=pipeline, gst_elem=queue, index=i, output_video_device=rtsp_v4l2_pairs[i][1])
# for termate the pipeline
GLib.timeout_add_seconds(1, terminate_pipeline, 0)
# display FPS
GLib.timeout_add(5000, perf_data.perf_print_callback)
# create an event loop and feed gstreamer bus mesages to it
loop = GLib.MainLoop()
ds_loop = loop
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_call, loop)
print("Starting pipeline")
# start play back and listed to events
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# cleanup
print("Pipeline ended")
pipeline.set_state(Gst.State.NULL)
if __name__ == '__main__':
import json
import sys
pairs = [
("rtsp://192.168.1.88:554/22", "/dev/video6")
]
run_pipeline(rtsp_v4l2_pairs=pairs)

using usocket seems to halt the loop (micropython)

I'm trying to code a simple program for a ESP32 board.
My main program is fairly simple and it has to run on a loop.
On the side, the device also needs to be able to respond to HTTP requests with a very simple response.
This is my attempt (a rework of https://randomnerdtutorials.com/micropython-esp32-esp8266-bme280-web-server/):
try:
import usocket as socket
except:
import socket
from micropython import const
import time
REFRESH_DELAY = const(60000) #millisecondi
def do_connect():
import network
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
if not wlan.isconnected():
print('connecting to network...')
wlan.config(dhcp_hostname=HOST)
wlan.connect('SSID', 'PSWD')
while not wlan.isconnected():
pass
print('network config:', wlan.ifconfig())
import json
import esp
esp.osdebug(None)
import gc
gc.collect()
do_connect()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, SENSOR_SCKT_PORT))
s.listen(5)
prevRun = 0
i = 0
while True:
print("iteration #"+str(i))
i += 1
# run every 60 seconds
curRun = int(round(time.time() * 1000))
if curRun - prevRun >= REFRESH_DELAY:
prevRun = curRun
# MAIN PROGRAM
# ......
# whole bunch of code
# ....
# run continuously:
try:
if gc.mem_free() < 102000:
gc.collect()
conn, addr = s.accept()
conn.settimeout(3.0)
print('Got a connection from %s' % str(addr))
request = conn.recv(1024)
conn.settimeout(None)
request = str(request)
#print('Content = %s' % request)
measurements = 'some json stuff'
conn.send('HTTP/1.1 200 OK\n')
conn.send('Content-Type: text/html\n')
conn.send('Connection: close\n\n')
conn.send(measurements)
conn.close()
except OSError as e:
conn.close()
print('Connection closed')
what happens is I only get the iteration #0, and then the while True loop halts.
If I ping this server with a HTTP request, I get a correct response, AND the loop advances to iteration #1 and #2 (no idea why it thinks I pinged it with 2 requests).
So it seems that socket.listen(5) is halting the while loop.
Is there any way to avoid this?
Any other solution?
I don't think that threading is an option here.
The problem is that s.accept() is a blocking call...it won't return until it receives a connection. This is why it pauses your loop.
The easiest solution is probably to check whether or not a connection is waiting before calling s.accept(); you can do this using either select.select or select.poll. I prefer the select.poll API, which would end up looking something like this:
import esp
import gc
import json
import machine
import network
import select
import socket
import time
from micropython import const
HOST = '0.0.0.0'
SENSOR_SCKT_PORT = const(1234)
REFRESH_DELAY = const(60000) # milliseconds
def wait_for_connection():
print('waiting for connection...')
wlan = network.WLAN(network.STA_IF)
while not wlan.isconnected():
machine.idle()
print('...connected. network config:', wlan.ifconfig())
esp.osdebug(None)
gc.collect()
wait_for_connection()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, SENSOR_SCKT_PORT))
s.listen(5)
poll = select.poll()
poll.register(s, select.POLLIN)
prevRun = 0
i = 0
while True:
print("iteration #"+str(i))
i += 1
# run every 60 seconds
curRun = int(round(time.time() * 1000))
if curRun - prevRun >= REFRESH_DELAY:
prevRun = curRun
# MAIN PROGRAM
# ......
# whole bunch of code
# ....
# run continuously:
try:
if gc.mem_free() < 102000:
gc.collect()
events = poll.poll(100)
if events:
conn, addr = s.accept()
conn.settimeout(3.0)
print('Got a connection from %s' % str(addr))
request = conn.recv(1024)
conn.settimeout(None)
request = str(request)
# print('Content = %s' % request)
measurements = 'some json stuff'
conn.send('HTTP/1.1 200 OK\n')
conn.send('Content-Type: text/html\n')
conn.send('Connection: close\n\n')
conn.send(measurements)
conn.close()
except OSError:
conn.close()
print('Connection closed')
You'll note that I've taken a few liberties with your code to get it running on my device and to appease my sense of style; primarily, I've excised most of your do_connect method and put all the imports at the top of the file.
The only real changes are:
We create a select.poll() object:
poll = select.poll()
We ask it to monitor the s variable for POLLIN events:
poll.register(s, select.POLLIN)
We check if any connections are pending before attempting to handle a connection:
events = poll.poll(100)
if events:
conn, addr = s.accept()
conn.settimeout(3.0)
[...]
With these changes in place, running your code and making a request looks something like this:
iteration #0
iteration #1
iteration #2
iteration #3
iteration #4
iteration #5
iteration #6
Got a connection from ('192.168.1.169', 54392)
iteration #7
iteration #8
iteration #9
iteration #10
Note that as written here, your loop will iterate at least once every 100ms (and you can control that by changing the timeout on our call to poll.poll()).
Note: the above was tested on an esp8266 device (A Wemos D1 clone) running MicroPython v1.13-268-gf7aafc062).

Unable to transcode from audio/l16;rate=48000;channel=1 to one of: audio/x-float-array; rate=16000; channels=1,

I am currently working on Softbanks' robot Pepper and I try to use Watson speech-to-text solution on Pepper's audio buffers remote streaming by using websocket protocol.
I used the answer to that former question NAO robot remote audio problems to find a way to access remotly pepper's audio buffers and that project https://github.com/ibm-dev/watson-streaming-stt to learn how to use websocket protocole to use watson streaming stt.
However, after I open my websocket application, I start sending buffers to watson and after a few sendings, I receive error: 'Unable to transcode from audio/l16;rate=48000;channel=1 to one of: audio/x-float-array; rate=16000; channels=1'
Each time I'm trying to send Pepper's audio buffer to watson, it is unable to understand it.
I compared data I send with data sent in watson streaming stt example (using pyaudio streaming from microphone instead of Pepper's buffer streaming) and I don't see any difference. Both time I'm pretty sure that I am sending a string containing raw chunks of bytes. Which is what Watson asks for in it documentation.
I try to send chunks of 8192 bytes with a sample rate of 48kHz and I can easily convert Pepper's audio buffer in hexa so I don't understand why Watson can't transcode it.
Here is my code:
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import argparse
import base64
import configparser
import json
import threading
import time
from optparse import OptionParser
import naoqi
import numpy as np
import sys
from threading import Thread
import ssl
import websocket
from websocket._abnf import ABNF
CHANNELS = 1
NAO_IP = "172.20.10.12"
class SoundReceiverModule(naoqi.ALModule):
"""
Use this object to get call back from the ALMemory of the naoqi world.
Your callback needs to be a method with two parameter (variable name, value).
"""
def __init__( self, strModuleName, strNaoIp):
try:
naoqi.ALModule.__init__(self, strModuleName );
self.BIND_PYTHON( self.getName(),"callback" );
self.strNaoIp = strNaoIp;
self.outfile = None;
self.aOutfile = [None]*(4-1); # ASSUME max nbr channels = 4
self.FINALS = []
self.RECORD_SECONDS = 20
self.ws_open = False
self.ws_listening = ""
# init data for websocket interfaces
self.headers = {}
self.userpass = "" #userpass and password
self.headers["Authorization"] = "Basic " + base64.b64encode(
self.userpass.encode()).decode()
self.url = ("wss://stream.watsonplatform.net//speech-to-text/api/v1/recognize"
"?model=fr-FR_BroadbandModel")
except BaseException, err:
print( "ERR: abcdk.naoqitools.SoundReceiverModule: loading error: %s" % str(err) );
# __init__ - end
def __del__( self ):
print( "INF: abcdk.SoundReceiverModule.__del__: cleaning everything" );
self.stop();
def start( self ):
audio = naoqi.ALProxy( "ALAudioDevice", self.strNaoIp, 9559 );
self.nNbrChannelFlag = 3; # ALL_Channels: 0, AL::LEFTCHANNEL: 1, AL::RIGHTCHANNEL: 2; AL::FRONTCHANNEL: 3 or AL::REARCHANNEL: 4.
self.nDeinterleave = 0;
self.nSampleRate = 48000;
audio.setClientPreferences( self.getName(), self.nSampleRate, self.nNbrChannelFlag, self.nDeinterleave ); # setting same as default generate a bug !?!
audio.subscribe( self.getName() );
#openning websocket app
self._ws = websocket.WebSocketApp(self.url,
header=self.headers,
on_open = self.on_open,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close)
sslopt={"cert_reqs": ssl.CERT_NONE}
threading.Thread(target=self._ws.run_forever, kwargs = {'sslopt':sslopt}).start()
print( "INF: SoundReceiver: started!" );
def stop( self ):
print( "INF: SoundReceiver: stopping..." );
audio = naoqi.ALProxy( "ALAudioDevice", self.strNaoIp, 9559 );
audio.unsubscribe( self.getName() );
print( "INF: SoundReceiver: stopped!" );
print "INF: WebSocket: closing..."
data = {"action": "stop"}
self._ws.send(json.dumps(data).encode('utf8'))
# ... which we need to wait for before we shutdown the websocket
time.sleep(1)
self._ws.close()
print "INF: WebSocket: closed"
if( self.outfile != None ):
self.outfile.close();
def processRemote( self, nbOfChannels, nbrOfSamplesByChannel, aTimeStamp, buffer ):
"""
This is THE method that receives all the sound buffers from the "ALAudioDevice" module"""
print "receiving buffer"
# self.data_to_send = self.data_to_send + buffer
# print len(self.data_to_send)
#self.data_to_send = ''.join( [ "%02X " % ord( x ) for x in buffer ] ).strip()
self.data_to_send = buffer
#print("buffer type :", type(data))
#print("buffer :", buffer)
#~ print( "process!" );
print( "processRemote: %s, %s, %s, lendata: %s, data0: %s (0x%x), data1: %s (0x%x)" % (nbOfChannels, nbrOfSamplesByChannel, aTimeStamp, len(buffer), buffer[0],ord(buffer[0]),buffer[1],ord(buffer[1])) );
if self.ws_open == True and self.ws_listening == True:
print "sending data"
self._ws.send(self.data_to_send, ABNF.OPCODE_BINARY)
print "data sent"
#print self.data_to_send
aSoundDataInterlaced = np.fromstring( str(buffer), dtype=np.int16 );
#
aSoundData = np.reshape( aSoundDataInterlaced, (nbOfChannels, nbrOfSamplesByChannel), 'F' );
# print "processRemote over"
# processRemote - end
def on_message(self, ws, msg):
print("message")
data = json.loads(msg)
print data
if "state" in data:
if data["state"] == "listening":
self.ws_listening = True
if "results" in data:
if data["results"][0]["final"]:
self.FINALS.append(data)
# This prints out the current fragment that we are working on
print(data['results'][0]['alternatives'][0]['transcript'])
def on_error(self, ws, error):
"""Print any errors."""
print(error)
def on_close(self, ws):
"""Upon close, print the complete and final transcript."""
transcript = "".join([x['results'][0]['alternatives'][0]['transcript']
for x in self.FINALS])
print("transcript :", transcript)
self.ws_open = False
def on_open(self, ws):
"""Triggered as soon a we have an active connection."""
# args = self._ws.args
print "INF: WebSocket: opening"
data = {
"action": "start",
# this means we get to send it straight raw sampling
"content-type": "audio/l16;rate=%d;channel=1" % self.nSampleRate,
"continuous": True,
"interim_results": True,
# "inactivity_timeout": 5, # in order to use this effectively
# you need other tests to handle what happens if the socket is
# closed by the server.
"word_confidence": True,
"timestamps": True,
"max_alternatives": 3
}
# Send the initial control message which sets expectations for the
# binary stream that follows:
self._ws.send(json.dumps(data).encode('utf8'))
# Spin off a dedicated thread where we are going to read and
# stream out audio.
print "INF: WebSocket: opened"
self.ws_open = True
def version( self ):
return "0.6";
def main():
"""initialisation
"""
parser = OptionParser()
parser.add_option("--pip",
help="Parent broker port. The IP address or your robot",
dest="pip")
parser.add_option("--pport",
help="Parent broker port. The port NAOqi is listening to",
dest="pport",
type="int")
parser.set_defaults(
pip=NAO_IP,
pport=9559)
(opts, args_) = parser.parse_args()
pip = opts.pip
pport = opts.pport
# We need this broker to be able to construct
# NAOqi modules and subscribe to other modules
# The broker must stay alive until the program exists
myBroker = naoqi.ALBroker("myBroker",
"0.0.0.0", # listen to anyone
0, # find a free port and use it
pip, # parent broker IP
pport) # parent broker port
"""fin initialisation
"""
global SoundReceiver
SoundReceiver = SoundReceiverModule("SoundReceiver", pip) #thread1
SoundReceiver.start()
try:
while True:
time.sleep(1)
print "hello"
except KeyboardInterrupt:
print "Interrupted by user, shutting down"
myBroker.shutdown()
SoundReceiver.stop()
sys.exit(0)
if __name__ == "__main__":
main()
I would be thankful if anyone had any idea on how to bypass that error or on what to try to get useful info. I first believed that I was sending "wrong" data to watson however after lots of attempts I have no clue on how to fix that problem.
Thank you a lot,
Alex

Send list using socket

I'm trying to send a list from server to client.
The list looks like this (it's a csv file).
201,8,0040000080
205,8,1f421d25721e
but when sending I get this error:
TypeError: must be string or buffer, not list
I tried 2 options:
Iterate through the list and send each string to the server, but got this as a result:
201 ---> 2,0,1
Tried casting each line, e.g str(line), and then send it, but got this:
201,8,0040000080 ---> [,',2,0,1,',",", ,',8,',",", ,',0,0,4,0,0,0,0,0,8,0,',]
how can I solve this? I just want to send the data from the client to server as is. For the record, the Client code:
import socket
import csv
clientSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with open('can_data.csv', 'rb') as csv_file:
csv_reader = csv.reader(csv_file)
for line in csv_reader:
clientSock.sendto(str(line), (self.address, self.port))
Server code:
with open('output.csv', 'wb') as new_file:
csv_writer = csv.writer(new_file)
while True:
data, addr = s.recvfrom(1024)
csv_writer.writerow(data)
Both sides need to agree on a serialization format. str(line) may work when paired with ast.literaleval() but repr(line) would be the better choice as repr tries for a more precise representation that str. You could also move to a serialization protocol like pickle or json.
Assuming this is python 3, I also moved the csv to text and used utf-8 encoding on the wire.
client:
import socket
import csv
address = 'localhost'
port = 5555
clientSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with open('can_data.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file)
for line in csv_reader:
clientSock.sendto(repr(line).encode('utf-8'), (address, port))
server:
import socket
import csv
import ast
address = 'localhost'
port = 5555
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((address, port))
with open('output.csv', 'w') as new_file:
csv_writer = csv.writer(new_file)
while True:
data, addr = sock.recvfrom(1024)
csv_writer.writerow(ast.literal_eval(data.decode('utf-8')))

Python3 TypeError: sequence item 0: expected a bytes-like object, int found

I'm trying to send an array over TCP from a server-like script to a client-like one. The array is variable, so the data is sent using packets and then joined together at the client.
The data I'm trying to send is from the MNIST hand-written digits dataset for Deep Learning. The server-side code is:
tcp = '127.0.0.1'
port = 1234
buffer_size = 4096
(X_train, y_train), (X_test, y_test) = mnist.load_data()
test_data = (X_test, y_test)
# Client-side Deep Learning stuff
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((tcp, port))
x = pickle.dumps(test_data)
s.sendall(x)
s.close()
The client-side script loads a Neural Network that uses the test data to predict classes. The script for listening to said data is:
tcp = '127.0.0.1'
port = 1234
buffer_size = 4096
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((tcp, port))
print ('Listening...')
s.listen(1)
conn, addr = s.accept()
data_arr = []
while True:
data_pack = conn.recv(buffer_size)
if not data: break
data_pack += data
my_pickle = b"".join(data_pack)
test_data = pickle.loads(my_pickle)
print ("Received: " + test_data)
conn.close()
# Irrelevant Deep Learning stuff...
The server sends the data without a hitch, but the client crashes when trying to join the packets received by the client (my_pickle = ...) with the following error:
TypeError: sequence item 0: expected a bytes-like object, int found
How should I format the join in order to recreate the data sent and use it for the rest of the script?
I ended up using both Pickle and ZeroMQ to handle the comunication protocol. An advantage of this method is that I can send more than one data package.
On the client side:
ip = '127.0.0.1'
port = '1234'
# ZeroMQ context
context = zmq.Context()
# Setting up protocol (client)
sock = context.socket(zmq.REQ)
sock.bind('tcp://'+ip+':'+port)
print('Waiting for connection at tcp://'+ip+':'+port+'...')
sock.send(pickle.dumps(X_send))
X_answer = sock.recv()
sock.send(pickle.dumps(y_send))
print('Data sent. Waiting for classification...')
y_answer = sock.recv()
print('Done.')
And on the server side:
# ZeroMQ Context
context = zmq.Context()
# Setting up protocol (server)
sock = context.socket(zmq.REP)
ip = '127.0.0.1'
port = '1234'
sock.connect('tcp://'+ip+':'+port)
print('Listening to tcp://'+ip+':'+port+'...')
X_message = sock.recv()
X_test = pickle.loads(X_message)
sock.send(pickle.dumps(X_message))
y_message = sock.recv()
y_test = pickle.loads(y_message)
print('Data received. Starting classification...')
# Classification process
sock.send(pickle.dumps(y_message))
print('Done.')

Resources