I have a little GUI Program that should only get some inputs to use these inputs for a method call.
Therefore I have several tk.Entry objects. Now my method that I want to execute needs a file from my hard disk. I want to scrape it with ElementTree. (I think the file needs to be in the same directory as my .py file? If not please tell me how to use a file from another path). My Idea was to copy that file into the current filepath and then only use a simple call to get access to this file.
But when I want to copy this file it is getting copied after I close the program and that should not happen. It should instantly copy the file so I can work with it.
Is there a way to refresh my files or do you have another idea?
Thank you and here is my code:
from Modules import copy_file
import tkinter as tk
class MainApplication(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
for Frame in (StartPage, SuccessPage):
page_name = Frame.__name__
frame = Frame(parent=container, controller=self)
self.frames[page_name] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame("StartPage")
# method to show frames
def show_frame(self, page_name):
frame = self.frames[page_name] #getting our frame from our frames list
frame.tkraise() #raise it up to the front
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
# Open File Button
self.button_open_file = ttk.Button(self, text = "Open File", command = self.get_file_directory).grid(row=5, column=0, sticky="WE")
def get_file_directory(self):
filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("all files","*.*"),("jpeg files","*.jpg")))
copy_file(filename)
class SuccessPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
# Open File Button
self.label = tk.Label(self, text = "Lab").grid(row=0 column=0, sticky="WE")
if __name__ == '__main__':
application = MainApplication().mainloop()
----------------------------------------------------------------------------------------------
Modules.py
from shutil import copy
def copy_file(src):
destination_path = os.path.dirname(os.path.realpath(__file__))
copy(src, destination_path)
Related
Is there any simple way to pass custom arguments to View instances with aiohttp ?
This works:
import aiohttp.web
import functools
class Parent():
def __init__(self, val):
self.var = val
class BaseView(aiohttp.web.View):
def __init__(self, *args, **kwargs):
self.parent = kwargs.pop("parent")
super().__init__(*args, **kwargs)
class Handler(BaseView):
async def get(self):
return aiohttp.web.Response(text=self.parent.var)
def partial_class(cls, *args, **kwargs):
class NewCls(cls):
__init__ = functools.partialmethod(cls.__init__, *args, **kwargs)
return NewCls
def main():
parent = Parent("blablabla")
app = aiohttp.web.Application()
# New method with args
app.router.add_view_with_args = functools.partial(
lambda this, path, handler, d: this.add_view(path, partial_class(handler, **d)),
app.router,
)
# Tornado-style
app.router.add_view_with_args("/test", Handler, {"parent": parent})
aiohttp.web.run_app(app)
main()
But I feel like this is overcomplicated.
With Tornado, you can pass additionnal data as a dict object when you instanciate your web Application.
Answering my own question:
It turns out that you are allowed to store global-like variables in an Application instance and then access it in the request handler. It is described in the docs: https://docs.aiohttp.org/en/latest/web_advanced.html#application-s-config
I am new to pytorch. I am trying to create a DataLoader for a dataset of images where each image got a corresponding ground truth (same name):
root:
--->RGB:
------>img1.png
------>img2.png
------>...
------>imgN.png
--->GT:
------>img1.png
------>img2.png
------>...
------>imgN.png
When I use the path for root folder (that contains RGB and GT folders) as input for the torchvision.datasets.ImageFolder it reads all of the images as if they were all intended for input (classified as RGB and GT), and it seems like there is no way to pair the RGB-GT images. I would like to pair the RGB-GT images, shuffle, and divide it to batches of defined size. How can it be done? Any advice will be appreciated.
Thanks.
I think, the good starting point is to use VisionDataset class as a base. What we are going to use here is: DatasetFolder source code. So, we going to create smth similar. You can notice this class depends on two other functions from datasets.folder module: default_loader and make_dataset.
We are not going to modify default_loader, because it's already fine, it just helps us to load images, so we will import it.
But we need a new make_dataset function, that prepared the right pairs of images from root folder. Since original make_dataset pairs images (image paths if to be more precisely) and their root folder as target class (class index) and we have a list of (path, class_to_idx[target]) pairs, but we need (rgb_path, gt_path). Here is the code for new make_dataset:
def make_dataset(root: str) -> list:
"""Reads a directory with data.
Returns a dataset as a list of tuples of paired image paths: (rgb_path, gt_path)
"""
dataset = []
# Our dir names
rgb_dir = 'RGB'
gt_dir = 'GT'
# Get all the filenames from RGB folder
rgb_fnames = sorted(os.listdir(os.path.join(root, rgb_dir)))
# Compare file names from GT folder to file names from RGB:
for gt_fname in sorted(os.listdir(os.path.join(root, gt_dir))):
if gt_fname in rgb_fnames:
# if we have a match - create pair of full path to the corresponding images
rgb_path = os.path.join(root, rgb_dir, gt_fname)
gt_path = os.path.join(root, gt_dir, gt_fname)
item = (rgb_path, gt_path)
# append to the list dataset
dataset.append(item)
else:
continue
return dataset
What do we have now? Let's compare our function with original one:
from torchvision.datasets.folder import make_dataset as make_dataset_original
dataset_original = make_dataset_original(root, {'RGB': 0, 'GT': 1}, extensions='png')
dataset = make_dataset(root)
print('Original make_dataset:')
print(*dataset_original, sep='\n')
print('Our make_dataset:')
print(*dataset, sep='\n')
Original make_dataset:
('./data/GT/img1.png', 1)
('./data/GT/img2.png', 1)
...
('./data/RGB/img1.png', 0)
('./data/RGB/img2.png', 0)
...
Our make_dataset:
('./data/RGB/img1.png', './data/GT/img1.png')
('./data/RGB/img2.png', './data/GT/img2.png')
...
I think it works great) It's time to create our class Dataset. The most important part here is __getitem__ methods, because it imports images, applies transformation and returns a tensors, that can be used by dataloaders. We need to read a pair of images (rgb and gt) and return a tuple of 2 tensor images:
from torchvision.datasets.folder import default_loader
from torchvision.datasets.vision import VisionDataset
class CustomVisionDataset(VisionDataset):
def __init__(self,
root,
loader=default_loader,
rgb_transform=None,
gt_transform=None):
super().__init__(root,
transform=rgb_transform,
target_transform=gt_transform)
# Prepare dataset
samples = make_dataset(self.root)
self.loader = loader
self.samples = samples
# list of RGB images
self.rgb_samples = [s[1] for s in samples]
# list of GT images
self.gt_samples = [s[1] for s in samples]
def __getitem__(self, index):
"""Returns a data sample from our dataset.
"""
# getting our paths to images
rgb_path, gt_path = self.samples[index]
# import each image using loader (by default it's PIL)
rgb_sample = self.loader(rgb_path)
gt_sample = self.loader(gt_path)
# here goes tranforms if needed
# maybe we need different tranforms for each type of image
if self.transform is not None:
rgb_sample = self.transform(rgb_sample)
if self.target_transform is not None:
gt_sample = self.target_transform(gt_sample)
# now we return the right imported pair of images (tensors)
return rgb_sample, gt_sample
def __len__(self):
return len(self.samples)
Let's test it:
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor
import matplotlib.pyplot as plt
bs=4 # batch size
transforms = ToTensor() # we need this to convert PIL images to Tensor
shuffle = True
dataset = CustomVisionDataset('./data', rgb_transform=transforms, gt_transform=transforms)
dataloader = DataLoader(dataset, batch_size=bs, shuffle=shuffle)
for i, (rgb, gt) in enumerate(dataloader):
print(f'batch {i+1}:')
# some plots
for i in range(bs):
plt.figure(figsize=(10, 5))
plt.subplot(221)
plt.imshow(rgb[i].squeeze().permute(1, 2, 0))
plt.title(f'RGB img{i+1}')
plt.subplot(222)
plt.imshow(gt[i].squeeze().permute(1, 2, 0))
plt.title(f'GT img{i+1}')
plt.show()
Out:
batch 1:
...
Here you can find a notebook with code and simple dummy dataset.
I am using "Inotify" to logs event when a file or folder is created in a directory ( tmp here) . The example here does the job in as serial process. Meaning, All file creation are treated one after the other, in a sequential way.
import logging
import inotify.adapters
_DEFAULT_LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
_LOGGER = logging.getLogger(__name__)
def _configure_logging():
_LOGGER.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter(_DEFAULT_LOG_FORMAT)
ch.setFormatter(formatter)
_LOGGER.addHandler(ch)
def _main():
i = inotify.adapters.Inotify()
i.add_watch(b'/tmp')
try:
for event in i.event_gen():
if event is not None:
(header, type_names, watch_path, filename) = event
_LOGGER.info("WD=(%d) MASK=(%d) COOKIE=(%d) LEN=(%d) MASK->NAMES=%s "
"WATCH-PATH=[%s] FILENAME=[%s]",
header.wd, header.mask, header.cookie, header.len, type_names,
watch_path.decode('utf-8'), filename.decode('utf-8'))
finally:
i.remove_watch(b'/tmp')
if __name__ == '__main__':
_configure_logging()
_main()
I would like to introduce parallelization of the events notification in case of several files are uploaded by importing threading, should I add a threading as loop ?
Second concern, I am not sure where it would make sens to put the thread function.
The below scripts handles multiples events in case of multiples sessions. So in my case, this is enough. I Added the multiprocessing option instead of threading. I found multiprocessing faster than threading.
import logging
import threading
import inotify.adapters
import multiprocessing
_DEFAULT_LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
_LOGGER = logging.getLogger(__name__)
def _configure_logging():
_LOGGER.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter(_DEFAULT_LOG_FORMAT)
ch.setFormatter(formatter)
_LOGGER.addHandler(ch)
def PopUpMessage (event):
if event is not None:
(header, type_names, watch_path, filename) = event
_LOGGER.info("WD=(%d) MASK=(%d) COOKIE=(%d) LEN=(%d) MASK->NAMES=%s "
"WATCH-PATH=[%s] FILENAME=[%s]",
header.wd, header.mask, header.cookie, header.len, type_names,
watch_path.decode('utf-8'), filename.decode('utf-8'))
def My_main(count):
i = inotify.adapters.Inotify()
DirWatcher=i.add_watch(b'/PARA')
try:
while True:
for event in i.event_gen():
m = multiprocessing.Process(target=PopUpMessage, args=(event,))
m.start()
finally:
i.remove_watch(b'/PARA')
if __name__ == '__main__':
_configure_logging()
N = multiprocessing.Process(target=My_main)
N.start()
Following this How-to:
https://github.com/carrierwaveuploader/carrierwave/wiki/How-to:-Add-more-files-and-remove-single-file-when-using-default-multiple-file-uploads-feature
class ImagesController < ApplicationController
before_action :set_gallery
def create
add_more_images(images_params[:images])
flash[:error] = "Failed uploading images" unless #gallery.save
redirect_to :back
end
def destroy
remove_image_at_index(params[:id].to_i)
flash[:error] = "Failed deleting image" unless #gallery.save
redirect_to :back
end
private
def set_gallery
#gallery = Gallery.find(params[:gallery_id])
end
def add_more_images(new_images)
images = #gallery.images
images += new_images
#gallery.images = images
end
def remove_image_at_index(index)
remain_images = #gallery.images # copy the array
deleted_image = remain_images.delete_at(index) # delete the target image
deleted_image.try(:remove!) # delete image from S3
#gallery.images = remain_images # re-assign back
end
def images_params
params.require(:gallery).permit({images: []}) # allow nested params as array
end
end
I seem to not be able to correctly remove the very last file. In my printed file list it keeps on standing there. Oddly enough with 0kb.
Then when I load up new files this one does go away.
I had the same problem and I found that you have to call 'remove_images!' if this was the last one. In 'remove_image_at_index' function add:
#gallery.remove_images! if remain_images.empty?
Regards
I am using Angular and Bootstrap to serve my forms. If a user uploads an image, Angular serves it in the "data:" format, but Django is looking for a file type. I have fixed this issue by overriding both perform_authentication (To modify the image to a file) and perform_create (to inject my user_id). Is there a better way to override?
I'd rather not override my view. I'd rather override the way Django validates ImageFields. What I want to do is check if the passed value is a 64-bit string, if it is, modify it to a file type, then validate the ImageField. The below code works as is, I just don't feel is optimal.
Here is my view:
class UserCredentialList(generics.ListCreateAPIView):
permission_classes = (IsCredentialOwnerOrAdmin,)
serializer_class = CredentialSerializer
"""
This view should return a list of all the purchases
for the currently authenticated user.
"""
def get_queryset(self):
"""
This view should return a list of all models by
the maker passed in the URL
"""
user = self.request.user
return Credential.objects.filter(member=user)
def perform_create(self, serializer):
serializer.save(member_id=self.request.user.id)
def perform_authentication(self, request):
if request.method == 'POST':
data = request.data.pop('document_image', None)
from django.core.files.base import ContentFile
import base64
import six
import uuid
# Check if this is a base64 string
if isinstance(data, six.string_types):
# Check if the base64 string is in the "data:" format
if 'data:' in data and ';base64,' in data:
# Break out the header from the base64 content
header, data = data.split(';base64,')
# Try to decode the file. Return validation error if it fails.
try:
decoded_file = base64.b64decode(data)
except TypeError:
self.fail('invalid_image')
# Generate file name:
file_name = str(uuid.uuid4())[:12] # 12 characters are more than enough.
# Get the file name extension:
import imghdr
file_extension = imghdr.what(file_name, decoded_file)
file_extension = "jpg" if file_extension == "jpeg" else file_extension
complete_file_name = "%s.%s" % (file_name, file_extension,)
data = ContentFile(decoded_file, name=complete_file_name)
request.data['document_image'] = data
request.user
And here is my serializer:
class CredentialSerializer(serializers.ModelSerializer):
class Meta:
model = Credential
fields = (
'id',
'credential_type',
'credential_number',
'date_received',
'is_verified',
'date_verified',
'document_image',
)
And here is my model:
class Credential(models.Model):
"""Used to store various credentials for member validation."""
document_image = models.ImageField(
upload_to=get_upload_path(instance="instance",
filename="filename.ext",
path='images/credentials/'))
PASSENGER = 'P'
OWNER = 'O'
CAPTAIN = 'C'
CREDENTIAL_CHOICES = (
(PASSENGER, 'Passenger'),
(OWNER, 'Owner'),
(CAPTAIN, 'Captain'),
)
credential_type = models.CharField(max_length=1,
choices=CREDENTIAL_CHOICES,
default=PASSENGER)
credential_number = models.CharField(max_length=255)
date_received = models.DateTimeField(auto_now_add=True)
is_verified = models.BooleanField(default=False)
date_verified = models.DateTimeField(blank=True, null=True)
member = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='credentials')
I used the below link to help me, now I just want to figure out how override the proper method
Django REST Framework upload image: "The submitted data was not a file"
Well I've made one change since making: I have moved this function to my serializer and instead I now override the method: is_valid and that works as well. At least it's not in my view anymore.