Multiple recordings pycharm loop just stops - loops

for some reason my loop always stops after recording the first time, yet it actually gets into the second run but terminates the script right after the print statement "Recording..."
No matter how I indent it never prevents the issue.
### instantiate pycharm
p = pyaudio.PyAudio()
FORMAT = pyaudio.paInt16
CHANNELS = 1
# 44100 is normal
RATE = 44100
# 1024 is normal
FRAMES = 1024
RECORD_SECONDS = 5
def multi_record(n):
# initiate recording parameters + record by using imported pyaudio module
for i in range(1, n):
print("Recording...")
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=FRAMES,
input_device_index=1)
frames = []
# appending recorded date to the frames list
for sec in range(0, int(RATE / FRAMES * RECORD_SECONDS)):
data = stream.read(FRAMES)
frames.append(data)
stream.stop_stream()
stream.close()
p.terminate()
# store audio-data from frames in a .wav file by using the imported wav module
print("Saving...")
with wave.open(f"C:/Users/LucasGames/Desktop/Coding/PythonCoding/Apps/SpeechRecognition/Wakeword data/{i}.wav",
"wb") as sound_file:
sound_file.setnchannels(CHANNELS)
sound_file.setsampwidth(p.get_sample_size(FORMAT))
sound_file.setframerate(RATE)
sound_file.writeframes(b"".join(frames))
sound_file.close()
print(f"File number: {i}")
if __name__ == "__main__":
# record()
multi_record(3)

Related

Using v4l2sink with DeepStream

I'm working on deepstream code to pass rtsp streams to virtual V4L2 devices (I used v4l2loopback to create the virtual devices). I have a code that works without errors, however, I can't read the V4L2 device.
Does anyone know of a working DeepStream code where v4l2sink is used? I have tried to find an example without success.
Here is my code. The writing part to v4l2sink is in the function: create_v4l2sink_branch()
import sys
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstRtspServer', '1.0')
import math
import sys
import common.utils as DS_UTILS
import pyds
from common.bus_call import bus_call
from common.FPS import PERF_DATA
from common.is_aarch_64 import is_aarch64
from gi.repository import GLib, Gst, GstRtspServer
CODEC="H264"
BITRATE=4000000
MAX_DISPLAY_LEN = 64
MUXER_OUTPUT_WIDTH = 1920
MUXER_OUTPUT_HEIGHT = 1080
MUXER_BATCH_TIMEOUT_USEC = 400000
TILED_OUTPUT_WIDTH = 1920
TILED_OUTPUT_HEIGHT = 1080
GST_CAPS_FEATURES_NVMM = "memory:NVMM"
OSD_PROCESS_MODE = 0
OSD_DISPLAY_TEXT = 1
MUX_SYNC_INPUTS = 0
ds_loop=None
perf_data = None
def terminate_pipeline(u_data):
global ds_loop
pass
# if global_config.request_to_stop == True:
# print("Aborting pipeline by request")
# ds_loop.quit()
# return False
return True
def create_onscreen_branch(pipeline, gst_elem, index):
print("Creating EGLSink")
sink = DS_UTILS.create_gst_element("nveglglessink", f"nvvideo-renderer-{index}")
sink.set_property('sync', 0)
sink.set_property('async', 1)
pipeline.add(sink)
if is_aarch64():
transform = DS_UTILS.create_gst_element("nvegltransform", f"nvegl-transform{index}")
pipeline.add(transform)
gst_elem.link(transform)
transform.link(sink)
else:
gst_elem.link(sink)
sink.set_property("qos", 0)
def create_v4l2sink_branch(pipeline, gst_elem, index, output_video_device):
# Create a caps filter
caps = DS_UTILS.create_gst_element("capsfilter", f"filter-{index}")
#caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420"))
#caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=NV12"))
identity = DS_UTILS.create_gst_element("identity", f"identity-{index}")
identity.set_property("drop-allocation", 1)
nvvidconv = DS_UTILS.create_gst_element("nvvideoconvert", f"convertor-{index}")
sink = DS_UTILS.create_gst_element("v4l2sink", f"v4l2sink-{index}")
sink.set_property('device', output_video_device)
sink.set_property("sync", 0)
sink.set_property("async", 1)
pipeline.add(caps)
pipeline.add(nvvidconv)
pipeline.add(identity)
pipeline.add(sink)
gst_elem.link(caps)
caps.link(nvvidconv)
nvvidconv.link(identity)
identity.link(sink)
def run_pipeline(rtsp_v4l2_pairs):
# Check input arguments
number_sources = len(rtsp_v4l2_pairs)
perf_data = PERF_DATA(number_sources)
# Standard GStreamer initialization
Gst.init(None)
# Create gstreamer elements */
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline")
pipeline = Gst.Pipeline()
is_live = False
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
return
# Create nvstreammux instance to form batches from one or more sources.
streammux = DS_UTILS.create_gst_element("nvstreammux", "Stream-muxer")
pipeline.add(streammux)
for i in range(number_sources):
uri_name = rtsp_v4l2_pairs[i][0]
print(" Creating source_bin {} --> {}".format(i, uri_name))
is_live = uri_name.find("rtsp://") == 0
source_bin = DS_UTILS.create_source_bin(i, uri_name)
pipeline.add(source_bin)
padname = "sink_%u" % i
sinkpad = streammux.get_request_pad(padname)
if not sinkpad:
sys.stderr.write("Unable to create sink pad bin \n")
srcpad = source_bin.get_static_pad("src")
if not srcpad:
sys.stderr.write("Unable to create src pad bin \n")
srcpad.link(sinkpad)
# streammux setup
if is_live:
print(" At least one of the sources is live")
streammux.set_property('live-source', 1)
streammux.set_property('width', MUXER_OUTPUT_WIDTH)
streammux.set_property('height', MUXER_OUTPUT_HEIGHT)
streammux.set_property('batch-size', number_sources)
streammux.set_property("batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC)
#streammux.set_property("sync-inputs", MUX_SYNC_INPUTS)
queue = DS_UTILS.create_gst_element("queue", "queue1")
pipeline.add(queue)
nvstreamdemux = DS_UTILS.create_gst_element("nvstreamdemux", "nvstreamdemux")
pipeline.add(nvstreamdemux)
# linking
streammux.link(queue)
queue.link(nvstreamdemux)
for i in range(number_sources):
queue = DS_UTILS.create_gst_element("queue", f"queue{2+i}")
pipeline.add(queue)
demuxsrcpad = nvstreamdemux.get_request_pad(f"src_{i}")
if not demuxsrcpad:
sys.stderr.write("Unable to create demux src pad \n")
queuesinkpad = queue.get_static_pad("sink")
if not queuesinkpad:
sys.stderr.write("Unable to create queue sink pad \n")
demuxsrcpad.link(queuesinkpad)
#create_onscreen_branch(pipeline=pipeline, gst_elem=queue, index=i)
create_v4l2sink_branch(pipeline=pipeline, gst_elem=queue, index=i, output_video_device=rtsp_v4l2_pairs[i][1])
# for termate the pipeline
GLib.timeout_add_seconds(1, terminate_pipeline, 0)
# display FPS
GLib.timeout_add(5000, perf_data.perf_print_callback)
# create an event loop and feed gstreamer bus mesages to it
loop = GLib.MainLoop()
ds_loop = loop
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_call, loop)
print("Starting pipeline")
# start play back and listed to events
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# cleanup
print("Pipeline ended")
pipeline.set_state(Gst.State.NULL)
if __name__ == '__main__':
import json
import sys
pairs = [
("rtsp://192.168.1.88:554/22", "/dev/video6")
]
run_pipeline(rtsp_v4l2_pairs=pairs)

How to activate naoqi_navigation_samples on my Pepper robot?

One of the goal of the project I'm working with is making Pepper robot patrolling hospital wards "autonomously". So I downloaded some basic application to start with navigation (https://github.com/aldebaran/naoqi_navigation_samples). The "explore" application is critical since it is needed by the other two (places and patrol). I tried to launch "explore" on Choregraphe, but the robot does not move (so it does not explore neither creates a map, obviously) and the application ends by saying the final sentence. In particular the block "Get map" gives an error. So, the application starts correctly but it does not work properly.
I saved "explore" as a robot application and tried in both autonomous life and not autonomous life.
I can not understand where I'm wrong: could you help me please?
Make sure the charging flap is not open when you run. Also try this code, it will create a map
#! /usr/bin/env python
# -*- encoding: UTF-8 -*-
"""Example: Use explore method."""
import qi
import argparse
import sys
import numpy
from PIL import Image
def main(session):
"""
This example uses the explore method.
"""
# Get the services ALNavigation and ALMotion.
navigation_service = session.service("ALNavigation")
motion_service = session.service("ALMotion")
# Wake up robot
motion_service.wakeUp()
# Explore the environement, in a radius of 2 m.
radius = 5.0
error_code = navigation_service.explore(radius)
if error_code != 0:
print ("Exploration failed.")
return
# Saves the exploration on disk
path = navigation_service.saveExploration()
print ("Exploration saved at path: \"" + path + "\"")
# Start localization to navigate in map
navigation_service.startLocalization()
# Come back to initial position
navigation_service.navigateToInMap([0., 0., 0.])
# Stop localization
navigation_service.stopLocalization()
# Retrieve and display the map built by the robot
result_map = navigation_service.getMetricalMap()
map_width = result_map[1]
map_height = result_map[2]
img = numpy.array(result_map[4]).reshape(map_width, map_height)
img = (100 - img) * 2.55 # from 0..100 to 255..0
img = numpy.array(img, numpy.uint8)
Image.frombuffer('L', (map_width, map_height), img, 'raw', 'L', 0, 1).show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="127.0.0.1",
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
session = qi.Session()
try:
session.connect("tcp://" + args.ip + ":" + str(args.port))
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
main(session)

how to receive data from mqtt and open cv video stream at the same time

My project need to receive mqtt data and videostream data at the same time. However, when my raspi receiving video data by open cv, it cant receive iOT data at the same time.
My raspi need to check if the iot data is on, and the video will not turn on and receive data.
So the piority is
iOT sensor check if there is something detected-----> video will not turn. (case 1)
iOT sensor check if there is nothing detected-------> video turn on and receive another image data.
[Update 2]
# import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
import serial
import struct
import paho.mqtt.client as mqtt
#subscribe mqtt publisher with topic "esp/pot"
def on_connect(client, userdata,flags, rc):
client.subscribe("/esp/pot")
#get the data from the publisher and save to integer variable "bdy"
def on_message(client, userdata, msg):
bdy=int(msg.payload)
#when disconnect stop the loop of mqtt function
def on_disconnect(client, userdata,rc=0):
client.loop_stop()
#Assign the mqtt client as client
client = mqtt.Client()
#Assign the mqtt connect function to client connect
client.on_connect = on_connect
#Assign the mqtt message function to client message
client.on_message = on_message
#set the client connect to local broker host
client.connect("localhost", 1883, 60) # localhost is the Raspberry Pi itself
#Below is setting up the OpenCV function
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
#get the video if path provided
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=0,
help="max buffer size")
args = vars(ap.parse_args())
# define detect object, in here is a green object
# define the lower and upper boundaries of the "green" color in HSV value
# ball in the HSV colorspace, then initialize the
# list of tracked points
greenLower = (25, 96, 49)
greenUpper = (39, 255, 255)
Lower = greenLower
Upper = greenUpper
#define the trace point of the object detected, here is centroid
pts = deque(maxlen=args["buffer"])
#setup the serial port for arduino, which is for another motor control
ser = serial.Serial('/dev/ttyUSB0',9600)
# if a video path was not supplied, grab the reference
# to the webcam (we use webcam in this project
if not args.get("video", False):
vs = VideoStream(src=0).start()
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# allow the camera or video file to warm up
time.sleep(2.0)
#initialize the integer variable "bdy" to zero
bdy = 0
# main function body, keep looping
while True:
# grab the current frame of the webcam
frame = vs.read()
# handle the frame from VideoCapture or VideoStream
frame = frame[1] if args.get("video", False) else frame
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if frame is None:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=246)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, Lower, Upper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the green object
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
#initial center value
center = None
#start mqtt client loop function for receiving data from broker, int variable "bdy"
client.loop_start()
#if the data from broker is not 1
if bdy != 1:
#printout bdy value for debug
print (bdy)
# only proceed if at least one contour of green object was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
#save webcam detected object centroid to cX and cY
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
#locate the centre of the green object
center = (cX, cY)
# only proceed if the radius of the detected green object meets a minimum size,
# say radius >2 to remove unneccssary noise
if radius > 2:
# draw the circle and centroid on the frame,
# then update the list of tracked points
#cv2.circle(frame, (int(x), int(y)), int(radius),
#(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# print x_position
print (cX)
# send the cX value to the arduino by serial port with braud rate 9600
ser.write(struct.pack('>H', cX))
# if the data from broker is not 1,
# here the broker actually just send data from sensor with digital value
# so it should be 0
else:
print("boundary detected")
#stop the client loop function from receive data for clearing the retain messsage
client.loop_stop()
# show the frame from webcam to raspberry pi screen for debug
cv2.imshow("Frame", frame)
#awaiting user press key for action
chkKey = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the main function body "while True loop"
if chkKey == ord("q"):
break
# if we are not using a video file, stop the camera video stream
if not args.get("video", False):
vs.stop()
# otherwise, release the camera
else:
vs.release()
# close all windows
cv2.destroyAllWindows()
#stop the client loop function
client.loop_stop()

Unable to transcode from audio/l16;rate=48000;channel=1 to one of: audio/x-float-array; rate=16000; channels=1,

I am currently working on Softbanks' robot Pepper and I try to use Watson speech-to-text solution on Pepper's audio buffers remote streaming by using websocket protocol.
I used the answer to that former question NAO robot remote audio problems to find a way to access remotly pepper's audio buffers and that project https://github.com/ibm-dev/watson-streaming-stt to learn how to use websocket protocole to use watson streaming stt.
However, after I open my websocket application, I start sending buffers to watson and after a few sendings, I receive error: 'Unable to transcode from audio/l16;rate=48000;channel=1 to one of: audio/x-float-array; rate=16000; channels=1'
Each time I'm trying to send Pepper's audio buffer to watson, it is unable to understand it.
I compared data I send with data sent in watson streaming stt example (using pyaudio streaming from microphone instead of Pepper's buffer streaming) and I don't see any difference. Both time I'm pretty sure that I am sending a string containing raw chunks of bytes. Which is what Watson asks for in it documentation.
I try to send chunks of 8192 bytes with a sample rate of 48kHz and I can easily convert Pepper's audio buffer in hexa so I don't understand why Watson can't transcode it.
Here is my code:
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import argparse
import base64
import configparser
import json
import threading
import time
from optparse import OptionParser
import naoqi
import numpy as np
import sys
from threading import Thread
import ssl
import websocket
from websocket._abnf import ABNF
CHANNELS = 1
NAO_IP = "172.20.10.12"
class SoundReceiverModule(naoqi.ALModule):
"""
Use this object to get call back from the ALMemory of the naoqi world.
Your callback needs to be a method with two parameter (variable name, value).
"""
def __init__( self, strModuleName, strNaoIp):
try:
naoqi.ALModule.__init__(self, strModuleName );
self.BIND_PYTHON( self.getName(),"callback" );
self.strNaoIp = strNaoIp;
self.outfile = None;
self.aOutfile = [None]*(4-1); # ASSUME max nbr channels = 4
self.FINALS = []
self.RECORD_SECONDS = 20
self.ws_open = False
self.ws_listening = ""
# init data for websocket interfaces
self.headers = {}
self.userpass = "" #userpass and password
self.headers["Authorization"] = "Basic " + base64.b64encode(
self.userpass.encode()).decode()
self.url = ("wss://stream.watsonplatform.net//speech-to-text/api/v1/recognize"
"?model=fr-FR_BroadbandModel")
except BaseException, err:
print( "ERR: abcdk.naoqitools.SoundReceiverModule: loading error: %s" % str(err) );
# __init__ - end
def __del__( self ):
print( "INF: abcdk.SoundReceiverModule.__del__: cleaning everything" );
self.stop();
def start( self ):
audio = naoqi.ALProxy( "ALAudioDevice", self.strNaoIp, 9559 );
self.nNbrChannelFlag = 3; # ALL_Channels: 0, AL::LEFTCHANNEL: 1, AL::RIGHTCHANNEL: 2; AL::FRONTCHANNEL: 3 or AL::REARCHANNEL: 4.
self.nDeinterleave = 0;
self.nSampleRate = 48000;
audio.setClientPreferences( self.getName(), self.nSampleRate, self.nNbrChannelFlag, self.nDeinterleave ); # setting same as default generate a bug !?!
audio.subscribe( self.getName() );
#openning websocket app
self._ws = websocket.WebSocketApp(self.url,
header=self.headers,
on_open = self.on_open,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close)
sslopt={"cert_reqs": ssl.CERT_NONE}
threading.Thread(target=self._ws.run_forever, kwargs = {'sslopt':sslopt}).start()
print( "INF: SoundReceiver: started!" );
def stop( self ):
print( "INF: SoundReceiver: stopping..." );
audio = naoqi.ALProxy( "ALAudioDevice", self.strNaoIp, 9559 );
audio.unsubscribe( self.getName() );
print( "INF: SoundReceiver: stopped!" );
print "INF: WebSocket: closing..."
data = {"action": "stop"}
self._ws.send(json.dumps(data).encode('utf8'))
# ... which we need to wait for before we shutdown the websocket
time.sleep(1)
self._ws.close()
print "INF: WebSocket: closed"
if( self.outfile != None ):
self.outfile.close();
def processRemote( self, nbOfChannels, nbrOfSamplesByChannel, aTimeStamp, buffer ):
"""
This is THE method that receives all the sound buffers from the "ALAudioDevice" module"""
print "receiving buffer"
# self.data_to_send = self.data_to_send + buffer
# print len(self.data_to_send)
#self.data_to_send = ''.join( [ "%02X " % ord( x ) for x in buffer ] ).strip()
self.data_to_send = buffer
#print("buffer type :", type(data))
#print("buffer :", buffer)
#~ print( "process!" );
print( "processRemote: %s, %s, %s, lendata: %s, data0: %s (0x%x), data1: %s (0x%x)" % (nbOfChannels, nbrOfSamplesByChannel, aTimeStamp, len(buffer), buffer[0],ord(buffer[0]),buffer[1],ord(buffer[1])) );
if self.ws_open == True and self.ws_listening == True:
print "sending data"
self._ws.send(self.data_to_send, ABNF.OPCODE_BINARY)
print "data sent"
#print self.data_to_send
aSoundDataInterlaced = np.fromstring( str(buffer), dtype=np.int16 );
#
aSoundData = np.reshape( aSoundDataInterlaced, (nbOfChannels, nbrOfSamplesByChannel), 'F' );
# print "processRemote over"
# processRemote - end
def on_message(self, ws, msg):
print("message")
data = json.loads(msg)
print data
if "state" in data:
if data["state"] == "listening":
self.ws_listening = True
if "results" in data:
if data["results"][0]["final"]:
self.FINALS.append(data)
# This prints out the current fragment that we are working on
print(data['results'][0]['alternatives'][0]['transcript'])
def on_error(self, ws, error):
"""Print any errors."""
print(error)
def on_close(self, ws):
"""Upon close, print the complete and final transcript."""
transcript = "".join([x['results'][0]['alternatives'][0]['transcript']
for x in self.FINALS])
print("transcript :", transcript)
self.ws_open = False
def on_open(self, ws):
"""Triggered as soon a we have an active connection."""
# args = self._ws.args
print "INF: WebSocket: opening"
data = {
"action": "start",
# this means we get to send it straight raw sampling
"content-type": "audio/l16;rate=%d;channel=1" % self.nSampleRate,
"continuous": True,
"interim_results": True,
# "inactivity_timeout": 5, # in order to use this effectively
# you need other tests to handle what happens if the socket is
# closed by the server.
"word_confidence": True,
"timestamps": True,
"max_alternatives": 3
}
# Send the initial control message which sets expectations for the
# binary stream that follows:
self._ws.send(json.dumps(data).encode('utf8'))
# Spin off a dedicated thread where we are going to read and
# stream out audio.
print "INF: WebSocket: opened"
self.ws_open = True
def version( self ):
return "0.6";
def main():
"""initialisation
"""
parser = OptionParser()
parser.add_option("--pip",
help="Parent broker port. The IP address or your robot",
dest="pip")
parser.add_option("--pport",
help="Parent broker port. The port NAOqi is listening to",
dest="pport",
type="int")
parser.set_defaults(
pip=NAO_IP,
pport=9559)
(opts, args_) = parser.parse_args()
pip = opts.pip
pport = opts.pport
# We need this broker to be able to construct
# NAOqi modules and subscribe to other modules
# The broker must stay alive until the program exists
myBroker = naoqi.ALBroker("myBroker",
"0.0.0.0", # listen to anyone
0, # find a free port and use it
pip, # parent broker IP
pport) # parent broker port
"""fin initialisation
"""
global SoundReceiver
SoundReceiver = SoundReceiverModule("SoundReceiver", pip) #thread1
SoundReceiver.start()
try:
while True:
time.sleep(1)
print "hello"
except KeyboardInterrupt:
print "Interrupted by user, shutting down"
myBroker.shutdown()
SoundReceiver.stop()
sys.exit(0)
if __name__ == "__main__":
main()
I would be thankful if anyone had any idea on how to bypass that error or on what to try to get useful info. I first believed that I was sending "wrong" data to watson however after lots of attempts I have no clue on how to fix that problem.
Thank you a lot,
Alex

How to make gcloud app logs read tail mode start with recent logs?

we are using GAE and we want to watch the logs live (few seconds delay is acceptable).
The gcloud app logs read command just returns the last log lines.
We wrote a little script that calls gcloud app logs read every 3 seconds and prints only new lines.
Is it possible to run the command in some "tail" mode so it will keep a live connection and print new lines as soon as they arrive?
Thanks
---- EDIT ----
#gregology, Here's my python script:
#!/usr/bin/env python
import subprocess
from time import sleep
def run(extra_args = None, min_cache_size = 50, max_cache_size = 2000):
cached_lines = None
cached_order = None
cache_size = min_cache_size
if extra_args is None:
extra_args = []
while True:
while True:
sleep(0.1)
lines = subprocess.check_output(["gcloud", "app", "logs", "read", "--limit", str(cache_size)] + list(extra_args)).splitlines()
if cached_lines is None:
cached_lines = set(lines)
cached_order = list(lines)
continue
if all(line not in cached_lines for line in lines) and (cache_size * 2) < max_cache_size:
# None of the lines is in the cache - retry with a larger cache_size
cache_size *= 2
print "... google-logs-tail increasing cache size to %d lines" % (cache_size,)
continue
break
new_lines = [line for line in lines if line not in cached_lines]
if len(new_lines) == len(lines):
print "... google-logs-tail log is broken - some lines may be missing ..."
cache_size = min_cache_size
for line in new_lines:
print line
cached_order.append(line)
cached_lines.add(line)
if (len(new_lines) < cache_size / 4) and (cache_size >= min_cache_size * 2):
cache_size /= 2
print "... google-logs-tail decreasing cache size to %d lines" % (cache_size,)
if len(cached_order) > max_cache_size * 8:
# much more lines than we need
# keep only max_cache_size * 4 lines
drop_lines = cached_order[:len(cached_order) - (max_cache_size * 4)]
for line in drop_lines:
cached_lines.discard(line)
del cached_order[:len(drop_lines)]
sleep(3)
if __name__ == '__main__':
import sys
run(sys.argv[1:])
gcloud app logs tail -s default does what you want, I think. gcloud app deploy suggests it.

Resources