I don't understand how to recall the value from the entry grid, I got the array of the entries
import tkinter as tk
import sys
class Main:
# >>>>> Init <<<<<
def __init__(self, master):
self.master = master
self.entries = []
# >>>>> Menu <<<<<
def menu(self):
self.master.geometry("400x150")
self.master.title("Score Calculator")
main_label = tk.Label(self.master, text="Score program")
main_button1 = tk.Button(self.master, text="Enter score", command=self.judge_score)
main_button2 = tk.Button(self.master, text="Calculate results", command=self.calculate_results)
main_button3 = tk.Button(self.master, text="Save and Quit", command=self.save_results)
main_button4 = tk.Button(self.master, text="Show current results", command=self.show_result)
main_label.pack()
main_button1.pack()
main_button2.pack()
main_button3.pack()
main_button4.pack()
# >>>>> Score <<<<<
def score(self):
window = tk.Tk()
# >>> The 5*6 grid <<<
for x in range(6):
for i in range(5):
entry = tk.Entry(window, width=4).grid(column=i, row=x)
self.entries.append(entry)
So this part has the score grid that needs to be able to output a value when the value is typed in I will add the button for confirming results but for now I need to be able to recall
root = tk.Tk()
app = Main(root)
app.menu()
root.mainloop()
print(app.entries)
Related
I'm working on deepstream code to pass rtsp streams to virtual V4L2 devices (I used v4l2loopback to create the virtual devices). I have a code that works without errors, however, I can't read the V4L2 device.
Does anyone know of a working DeepStream code where v4l2sink is used? I have tried to find an example without success.
Here is my code. The writing part to v4l2sink is in the function: create_v4l2sink_branch()
import sys
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstRtspServer', '1.0')
import math
import sys
import common.utils as DS_UTILS
import pyds
from common.bus_call import bus_call
from common.FPS import PERF_DATA
from common.is_aarch_64 import is_aarch64
from gi.repository import GLib, Gst, GstRtspServer
CODEC="H264"
BITRATE=4000000
MAX_DISPLAY_LEN = 64
MUXER_OUTPUT_WIDTH = 1920
MUXER_OUTPUT_HEIGHT = 1080
MUXER_BATCH_TIMEOUT_USEC = 400000
TILED_OUTPUT_WIDTH = 1920
TILED_OUTPUT_HEIGHT = 1080
GST_CAPS_FEATURES_NVMM = "memory:NVMM"
OSD_PROCESS_MODE = 0
OSD_DISPLAY_TEXT = 1
MUX_SYNC_INPUTS = 0
ds_loop=None
perf_data = None
def terminate_pipeline(u_data):
global ds_loop
pass
# if global_config.request_to_stop == True:
# print("Aborting pipeline by request")
# ds_loop.quit()
# return False
return True
def create_onscreen_branch(pipeline, gst_elem, index):
print("Creating EGLSink")
sink = DS_UTILS.create_gst_element("nveglglessink", f"nvvideo-renderer-{index}")
sink.set_property('sync', 0)
sink.set_property('async', 1)
pipeline.add(sink)
if is_aarch64():
transform = DS_UTILS.create_gst_element("nvegltransform", f"nvegl-transform{index}")
pipeline.add(transform)
gst_elem.link(transform)
transform.link(sink)
else:
gst_elem.link(sink)
sink.set_property("qos", 0)
def create_v4l2sink_branch(pipeline, gst_elem, index, output_video_device):
# Create a caps filter
caps = DS_UTILS.create_gst_element("capsfilter", f"filter-{index}")
#caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420"))
#caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=NV12"))
identity = DS_UTILS.create_gst_element("identity", f"identity-{index}")
identity.set_property("drop-allocation", 1)
nvvidconv = DS_UTILS.create_gst_element("nvvideoconvert", f"convertor-{index}")
sink = DS_UTILS.create_gst_element("v4l2sink", f"v4l2sink-{index}")
sink.set_property('device', output_video_device)
sink.set_property("sync", 0)
sink.set_property("async", 1)
pipeline.add(caps)
pipeline.add(nvvidconv)
pipeline.add(identity)
pipeline.add(sink)
gst_elem.link(caps)
caps.link(nvvidconv)
nvvidconv.link(identity)
identity.link(sink)
def run_pipeline(rtsp_v4l2_pairs):
# Check input arguments
number_sources = len(rtsp_v4l2_pairs)
perf_data = PERF_DATA(number_sources)
# Standard GStreamer initialization
Gst.init(None)
# Create gstreamer elements */
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline")
pipeline = Gst.Pipeline()
is_live = False
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
return
# Create nvstreammux instance to form batches from one or more sources.
streammux = DS_UTILS.create_gst_element("nvstreammux", "Stream-muxer")
pipeline.add(streammux)
for i in range(number_sources):
uri_name = rtsp_v4l2_pairs[i][0]
print(" Creating source_bin {} --> {}".format(i, uri_name))
is_live = uri_name.find("rtsp://") == 0
source_bin = DS_UTILS.create_source_bin(i, uri_name)
pipeline.add(source_bin)
padname = "sink_%u" % i
sinkpad = streammux.get_request_pad(padname)
if not sinkpad:
sys.stderr.write("Unable to create sink pad bin \n")
srcpad = source_bin.get_static_pad("src")
if not srcpad:
sys.stderr.write("Unable to create src pad bin \n")
srcpad.link(sinkpad)
# streammux setup
if is_live:
print(" At least one of the sources is live")
streammux.set_property('live-source', 1)
streammux.set_property('width', MUXER_OUTPUT_WIDTH)
streammux.set_property('height', MUXER_OUTPUT_HEIGHT)
streammux.set_property('batch-size', number_sources)
streammux.set_property("batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC)
#streammux.set_property("sync-inputs", MUX_SYNC_INPUTS)
queue = DS_UTILS.create_gst_element("queue", "queue1")
pipeline.add(queue)
nvstreamdemux = DS_UTILS.create_gst_element("nvstreamdemux", "nvstreamdemux")
pipeline.add(nvstreamdemux)
# linking
streammux.link(queue)
queue.link(nvstreamdemux)
for i in range(number_sources):
queue = DS_UTILS.create_gst_element("queue", f"queue{2+i}")
pipeline.add(queue)
demuxsrcpad = nvstreamdemux.get_request_pad(f"src_{i}")
if not demuxsrcpad:
sys.stderr.write("Unable to create demux src pad \n")
queuesinkpad = queue.get_static_pad("sink")
if not queuesinkpad:
sys.stderr.write("Unable to create queue sink pad \n")
demuxsrcpad.link(queuesinkpad)
#create_onscreen_branch(pipeline=pipeline, gst_elem=queue, index=i)
create_v4l2sink_branch(pipeline=pipeline, gst_elem=queue, index=i, output_video_device=rtsp_v4l2_pairs[i][1])
# for termate the pipeline
GLib.timeout_add_seconds(1, terminate_pipeline, 0)
# display FPS
GLib.timeout_add(5000, perf_data.perf_print_callback)
# create an event loop and feed gstreamer bus mesages to it
loop = GLib.MainLoop()
ds_loop = loop
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_call, loop)
print("Starting pipeline")
# start play back and listed to events
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# cleanup
print("Pipeline ended")
pipeline.set_state(Gst.State.NULL)
if __name__ == '__main__':
import json
import sys
pairs = [
("rtsp://192.168.1.88:554/22", "/dev/video6")
]
run_pipeline(rtsp_v4l2_pairs=pairs)
I have the following class for loading and converting my images into train and test arrays for a deep learning model in Tensorflow 2.
The images are in three folders, named 'Car', 'Cat' and 'Man', which are within the Train and Test main folders. Each image is of 300 x 400 pixels.
import os
import pickle
import cv2
import numpy as np
os.getcwd()
out: 'C:\\Users\\me\\Jupiter_Notebooks\\Dataset_Thermal\\SeekThermal'
path_train = "../SeekThermal/Train"
path_test = "../SeekThermal/Test"
class MasterImage(object):
def __init__(self,PATH='', IMAGE_SIZE = 50):
self.PATH = PATH
self.IMAGE_SIZE = IMAGE_SIZE
self.image_data = []
self.x_data = []
self.y_data = []
self.CATEGORIES = []
# This will get List of categories
self.list_categories = []
def get_categories(self):
for path in os.listdir(self.PATH):
if '.DS_Store' in path:
pass
else:
self.list_categories.append(path)
print("Found Categories ",self.list_categories,'\n')
return self.list_categories
def process_image(self):
try:
"""
Return Numpy array of image
:return: X_Data, Y_Data
"""
self.CATEGORIES = self.get_categories()
for categories in self.CATEGORIES: # Iterate over categories
train_folder_path = os.path.join(self.PATH, categories) # Folder Path
class_index = self.CATEGORIES.index(categories) # this will get index for classification
for img in os.listdir(train_folder_path): # This will iterate in the Folder
new_path = os.path.join(train_folder_path, img) # image Path
try: # if any image is corrupted
image_data_temp = cv2.imread(new_path) # Read Image as numbers
image_temp_resize = cv2.resize(image_data_temp,(self.IMAGE_SIZE,self.IMAGE_SIZE))
self.image_data.append([image_temp_resize,class_index])
random.shuffle(self.image_data)
except:
pass
data = np.asanyarray(self.image_data) # or, data = np.asanyarray(self.image_data,dtype=object)
# Iterate over the Data
for x in data:
self.x_data.append(x[0]) # Get the X_Data
self.y_data.append(x[1]) # get the label
X_Data = np.asarray(self.x_data) / (255.0) # Normalize Data
Y_Data = np.asarray(self.y_data)
# reshape x_Data
X_Data = X_Data.reshape(-1, self.IMAGE_SIZE, self.IMAGE_SIZE, 3)
return X_Data, Y_Data
except:
print("Failed to run Function Process Image ")
def pickle_image(self):
"""
:return: None Creates a Pickle Object of DataSet
"""
# Call the Function and Get the Data
X_Data,Y_Data = self.process_image()
# Write the Entire Data into a Pickle File
pickle_out = open('X_Data','wb')
pickle.dump(X_Data, pickle_out)
pickle_out.close()
# Write the Y Label Data
pickle_out = open('Y_Data', 'wb')
pickle.dump(Y_Data, pickle_out)
pickle_out.close()
print("Pickled Image Successfully ")
return X_Data,Y_Data
def load_dataset(self):
try:
# Read the Data from Pickle Object
X_Temp = open('..\SeekThermal\X_Data','rb')
X_Data = pickle.load(X_Temp)
Y_Temp = open('..\SeekThermal\Y_Data','rb')
Y_Data = pickle.load(Y_Temp)
print('Reading Dataset from Pickle Object')
return X_Data,Y_Data
except:
print('Could not Found Pickle File ')
print('Loading File and Dataset ..........')
X_Data,Y_Data = self.pickle_image()
return X_Data,Y_Data
I dont understand what the problem is with the pickle file, because just last week I able to create these arrays successfully with the same code??
Is there an easier way to load images in Tensorflow rather than through the custom defined class?
a = MasterImage(PATH = path_train,IMAGE_SIZE = 224)
a.process_image()
out:
it produces an array with a warning.
VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
data = np.asanyarray(self.image_data)
a.pickle_image()
out:
TypeError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_1692\507657192.py in <cell line: 1>()
----> 1 a.pickle_image()
~\AppData\Local\Temp\ipykernel_1692\1410849712.py in pickle_image(self)
71 """
72 # Call the Function and Get the Data
---> 73 X_Data,Y_Data = self.process_image()
74
75 # Write the Entire Data into a Pickle File
TypeError: cannot unpack non-iterable NoneType object
a.load_dataset()
out:
Could not Found Pickle File
Loading File and Dataset ..........
Found Categories ['Car', 'Cat', 'Man', 'Car', 'Cat', 'Man']
Pickled Image Successfully
I'm running Python 3.8.8 via anaconda on Windows 10. Thank you for any advice.
i am trying to plot an current-voltage acquisition from an instrument but for different temperatures. I need to keep the structure as presented but i would like to have for each temperature get the plot in 1 uniform color then for the next temperature the color is changed so i can identify different plot and read their legend (the temperature). right now i get the animation but i cannot iterate on temperatures and colors. thanks
import random
import csv
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import pandas as pd
class IV_SWEEP():
def __init__(self):
self.IV_sweep()
def init_instrument(self):
#######settings
self.data_points = 10 # number of steps in the sweep
self.max_current = 0.001
self.min_current = -self.max_current
def measurement(self):
# Allocate arrays to store the measurement results
currents = np.linspace(self.min_current, self.max_current, num=self.data_points)
voltages = np.zeros_like(currents)
temp_list = [300, 310, 320] # want to plot a curve for each of this temperature
for i in range(self.data_points):
voltages[i] = random.random() + currents[i]
now = datetime.now()
self.record_csv(now, currents[i], voltages[i])
self.animate()
self.plot() # i am calling the plot method separately but dont know how to iterate the label so the color and legend is changed per temp
plt.pause(3)
self.plot_enable = True
def IV_sweep(self):
self.xdata = []
self.ydata = []
self.init_instrument()
self.measurement()
def animate(self):
data = pd.read_csv("test.csv")
self.xdata = data["Voltage (V)"]
self.ydata = data["Current (A)"]
def plot(self):
plt.gcf()
temp_list = [300, 310, 320]
labels = temp_list
# for lab in labels:
plt.scatter(self.xdata, self.ydata, label=temp_list) # need to add a different lab for each temperature scan
plt.xlabel('Voltage (V)')
plt.ylabel('Current (mA)')
# plt.title("IV sweep at {} K".format(temp_list[i]))
plt.title("IV sweep at temperature xx") # call the temperature so it s printed as legend
# plt.legend(loc='best')
plt.tight_layout()
return
def record_csv(filename,timestamp, currents, voltages):
filename = "test.csv"
with open(filename, 'a', newline='') as csvfile:
header = ["Timestamp", "Current (A)", "Voltage (V)", "Voltage stdv (V)"]
writer = csv.DictWriter(csvfile, fieldnames=header)
if csvfile.tell() == 0:
writer.writeheader()
writer.writerow(
{
"Timestamp": timestamp,
"Current (A)": currents,
"Voltage (V)": voltages,
}
)
csvfile.close()
IV_SWEEP()
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import random
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
def main(i):
# Actual parameters
# A0 = 10
A0 = 2
K0 = [1, 3, 5]
C0 = 5
# Generate some data based on these
tmin, tmax = 0, 0.5
num = 20
t = []
y = []
for K in K0: #would like to scan on K and plot each curve with a different color
t = np.linspace(tmin, tmax, num)
y = model_func(t, A0, K, C0)
ax1.scatter(t,y, label = K) # would like to have as a legend each K
ax1.legend(loc="upper left")
def model_func(t, A, K, C):
return A * np.exp(K * t) + C
ani = animation.FuncAnimation(fig, main, interval=1000)
plt.show()
Result:
I have been trying to build a SagemakerUser from the base User class in the Locust library. The issue though is when I use it with a timed shape test, when said test ends (you can see a message: Shape test stopping) the load test shrugs it off and continues. Below is the script I have written to this end. My question is how is this behaviour explained?
import pandas as pd
from locust import HttpUser, User, task, TaskSet, events, LoadTestShape
from sagemaker.serializers import JSONSerializer
from sagemaker.session import Session
import sagemaker
import time
import sys
import math
import pdb
df = "some df to load samples from"
endpoint = "sage maker end point name"
class SagemakerClient(sagemaker.predictor.Predictor):
def predictEx(self, data):
start_time = time.time()
start_perf_counter = time.perf_counter()
name = 'predictEx'
try:
result = self.predict(data)
except:
total_time = int((time.perf_counter() - start_perf_counter) * 1000)
events.request_failure.fire(request_type="sagemaker", name=name, response_time=total_time, exception=sys.exc_info(), response_length=0)
else:
total_time = int((time.perf_counter() - start_perf_counter) * 1000)
events.request_success.fire(request_type="sagemaker", name=name, response_time=total_time, response_length=sys.getsizeof(result))
class SagemakerLocust(User):
abstract = True
def __init__(self, *args, **kwargs):
super(SagemakerLocust, self).__init__(*args, **kwargs)
self.client = SagemakerClient(
sagemaker_session = Session(),
endpoint_name = "sagemaker-test",
serializer = JSONSerializer())
class APIUser(SagemakerLocust):
#task
def call(self):
request = df.text.sample(1, weights=df.length).iloc[0]
self.client.predictEx(request)
class StepLoadShape(LoadTestShape):
"""
A step load shape
Keyword arguments:
step_time -- Time between steps
step_load -- User increase amount at each step
spawn_rate -- Users to stop/start per second at every step
time_limit -- Time limit in seconds
"""
step_time = 30#3600
step_load = 1
spawn_rate = 1
time_limit =2#3600*6
#pdb.set_trace()
def tick(self):
run_time = self.get_run_time()
if run_time > self.time_limit:
return None
current_step = math.floor(run_time / self.step_time) + 1
return (current_step * self.step_load, self.spawn_rate)
I'm currently writing some code and am using pandas to export all of the data into csv files. My program runs multiple iterations until it has gone through all of the necessary files. Pandas is re-writing one file each iteration but when it moves onto the next file I need it to reset all of the data (I think).
Structure is roughly:
While loop>a few variables are named>program runs>dataframe=(pandas.DataFrame(averagepercentagelist,index=namelist,columns=header))
This part works with no problem for one file. When moving onto the next file, all of the arrays I use are reset and this I think is why pandas gives the error Shape of passed values is (1,1), indices imply (3,1).
Please let me know if I need to explain it better.
EDIT:
While True:
try:
averagepercentagelist=[]
namelist=[]
columns=[]
for row in database:
averagepercentagelist=["12","23"]
namelist=["Name0","Name1"]
columns=["Average percentage"]
dataframe=(pandas.DataFrame(averagepercentagelist,index=namelist,columns=header))
except Exception as e:
print e
break
SNIPPET:
dataframe= (pandas.DataFrame(averagepercentagelist,index=namelist,columns=header))
currentcalculatedatafrane = 'averages' + currentcalculate
dataframeexportpath = os.path.join(ROOT_PATH,'Averages',currentcalculatedatafrane)
dataframe.to_csv(dataframeexportpath)
FULL PROGRAM SO FAR:
import csv
import os
import re
import pandas
import tkinter as tk
from tkinter import messagebox
from os.path import isfile, join
from os import listdir
import time
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
indexforcalcu=0
line_count=0
testlist=[]
namelist=[]
header=['Average Percentage']
def clearvariables():
indexforcalcu=0
testlist=[]
def findaverageofstudent(findaveragenumber,numoftests):
total=0
findaveragenumber = findaveragenumber/numoftests
findaveragenumber = round(findaveragenumber, 1)
return findaveragenumber
def removecharacters(nameforfunc):
nameforfunc=str(nameforfunc)
elem=re.sub("[{'}]", "",nameforfunc)
return elem
def getallclasses():
onlyfiles = [f for f in listdir(ROOT_PATH) if isfile(join(ROOT_PATH, f))]
onlyfiles.remove("averagecalculatorv2.py")
return onlyfiles
def findaveragefunc():
indexforcalcu=-1
while True:
try:
totaltests=0
line_count=0
averagepercentagelist=[]
indexforcalcu=indexforcalcu+1
allclasses=getallclasses()
currentcalculate=allclasses[indexforcalcu]
classpath = os.path.join(ROOT_PATH, currentcalculate)
with open(classpath) as csv_file:
classscoredb = csv.reader(csv_file, delimiter=',')
for i, row in enumerate(classscoredb):
if line_count == 0:
while True:
try:
totaltests=totaltests+1
rowreader= {row[totaltests]}
except:
totaltests=totaltests-1
line_count = line_count + 1
break
else:
calculating_column_location=1
total=0
while True:
try:
total = total + int(row[calculating_column_location])
calculating_column_location = calculating_column_location + 1
except:
break
i=str(i)
name=row[0]
cleanname=removecharacters(nameforfunc=name)
namelist.append(cleanname)
findaveragenumbercal=findaverageofstudent(findaveragenumber=total,numoftests=totaltests)
averagepercentagelist.append(findaveragenumbercal)
line_count = line_count + 1
dataframe= (pandas.DataFrame(averagepercentagelist,index=namelist,columns=header))
currentcalculatedatafrane = 'averages' + i + currentcalculate
dataframeexportpath = os.path.join(ROOT_PATH,'Averages',currentcalculatedatafrane)
dataframe.to_csv(dataframeexportpath)
i=int(i)
except Exception as e:
print("ERROR!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n",e)
break
def makenewclass():
global newclassname
getclassname=str(newclassname.get())
if getclassname == "":
messagebox.showerror("Error","The class name you have entered is invalid.")
else:
classname = getclassname + ".csv"
with open(classname, mode='w') as employee_file:
classwriter = csv.writer(employee_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
classwriter.writerow(["Name","Test 1"])
root=tk.Tk()
root.title("Test result average finder")
findaveragebutton=tk.Button(root,text="Find Averages",command=findaveragefunc())
findaveragebutton.grid(row=2,column=2,padx=(10, 10),pady=(0,10))
classnamelabel=tk.Label(root, text="Class name:")
classnamelabel.grid(row=1, column=0,padx=(10,0),pady=(10,10))
newclassname = tk.Entry(root)
newclassname.grid(row=1,column=1,padx=(10, 10))
newclassbutton=tk.Button(root,text="Create new class",command=makenewclass)
newclassbutton.grid(row=1,column=2,padx=(0, 10),pady=(10,10))
root.mainloop()
Thanks in advance,
Sean
Use:
import glob, os
import pandas as pd
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
#extract all csv files to list
files = glob.glob(f'{ROOT_PATH}/*.csv')
print (files)
#create new folder if necessary
new = os.path.join(ROOT_PATH,'Averages')
if not os.path.exists(new):
os.makedirs(new)
#loop each file
for f in files:
#create DataFrame and convert first column to index
df = pd.read_csv(f, index_col=[0])
#count average in each row, rond and create one colum DataFrame
avg = df.mean(axis=1).round(1).to_frame('Average Percentage')
#remove index name if nncessary
avg.index.name = None
print (avg)
#create new path
head, tail = os.path.split(f)
path = os.path.join(head, 'Averages', tail)
print (path)
#write DataFrame to csv
avg.to_csv(path)