diff --git a/python_apps/media-monitor2/media/monitor/handler.py b/python_apps/media-monitor2/media/monitor/handler.py index b23043996..3afef8ffc 100644 --- a/python_apps/media-monitor2/media/monitor/handler.py +++ b/python_apps/media-monitor2/media/monitor/handler.py @@ -11,8 +11,6 @@ class Handles(object): @abc.abstractmethod def handle(self, sender, event, *args, **kwargs): pass -# TODO : remove the code duplication between ReportHandler and -# ProblemFileHandler. Namely the part where both initialize pydispatch # TODO : Investigate whether weak reffing in dispatcher.connect could possibly # cause a memory leak @@ -22,26 +20,32 @@ class ReportHandler(Handles): self.signal = signal self.report_signal = "badfile" def dummy(sender, event): self.handle(sender,event) - dispatcher.connect(dummy, signal=signal, sender=dispatcher.Any, weak=False) + dispatcher.connect(dummy, signal=signal, sender=dispatcher.Any, + weak=False) def report_problem_file(self, event, exception=None): - dispatcher.send(signal=self.report_signal, sender=self, event=event, exception=exception) + dispatcher.send(signal=self.report_signal, sender=self, event=event, + exception=exception) class ProblemFileHandler(Handles, Loggable): def __init__(self, channel, **kwargs): self.channel = channel self.signal = self.channel.signal self.problem_dir = self.channel.path - def dummy(sender, event, exception): self.handle(sender, event, exception) - dispatcher.connect(dummy, signal=self.signal, sender=dispatcher.Any, weak=False) + def dummy(sender, event, exception): + self.handle(sender, event, exception) + dispatcher.connect(dummy, signal=self.signal, sender=dispatcher.Any, + weak=False) mmp.create_dir( self.problem_dir ) - self.logger.info("Initialized problem file handler. Problem dir: '%s'" % self.problem_dir) + self.logger.info("Initialized problem file handler. Problem dir: '%s'" % + self.problem_dir) def handle(self, sender, event, exception=None): # TODO : use the exception parameter for something - self.logger.info("Received problem file: '%s'. Supposed to move it to problem dir", event.path) - import ipdb; ipdb.set_trace() + self.logger.info("Received problem file: '%s'. Supposed to move it to \ + problem dir", event.path) try: mmp.move_to_dir(dir_path=self.problem_dir, file_path=event.path) except Exception as e: - self.logger.info("Could not move file: '%s' to problem dir: '%s'" % (event.path, self.problem_dir)) + self.logger.info("Could not move file: '%s' to problem dir: '%s'" % + (event.path, self.problem_dir)) self.logger.info("Exception: %s" % str(e)) diff --git a/python_apps/media-monitor2/media/monitor/listeners.py b/python_apps/media-monitor2/media/monitor/listeners.py index 399bc6ff0..5c7f2d5f1 100644 --- a/python_apps/media-monitor2/media/monitor/listeners.py +++ b/python_apps/media-monitor2/media/monitor/listeners.py @@ -10,8 +10,8 @@ from media.monitor.events import OrganizeFile, NewFile, MoveFile, DeleteFile, \ from media.monitor.log import Loggable, get_logger # We attempt to document a list of all special cases and hacks that the -# following classes should be able to handle. -# TODO : implement all of the following special cases +# following classes should be able to handle. TODO : implement all of the +# following special cases # # - Recursive directories being added to organized dirs are not handled # properly as they only send a request for the dir and not for every file. Also @@ -54,19 +54,21 @@ class FileMediator(object): # Poor man's default arguments if 'key' not in kwargs: kwargs['key'] = 'maskname' for skip in what_to_skip: - # standard nasty hack, too long to explain completely in comments but - # the gist of it is: + # standard nasty hack, too long to explain completely in comments + # but the gist of it is: # 1. python's scoping rules are sometimes strange. # 2. workaround is very similar to what you do in javascript when # you write stuff like (function (x,y) { console.log(x+y); })(2,4) # to be avoid clobbering peoples' namespace. - skip_check = (lambda skip: lambda v: getattr(v,kwargs['key']) == skip)(skip) + skip_check = (lambda skip: + lambda v: getattr(v,kwargs['key']) == skip)(skip) FileMediator.skip_checks.add( skip_check ) def mediate_ignored(fn): def wrapped(self, event, *args,**kwargs): event.pathname = unicode(event.pathname, "utf-8") - skip_events = [s_check for s_check in FileMediator.skip_checks if s_check(event)] + skip_events = [s_check for s_check in FileMediator.skip_checks + if s_check(event)] for s_check in skip_events: FileMediator.skip_checks.remove( s_check ) # Only process skip_checks one at a time @@ -93,20 +95,22 @@ class OrganizeListener(BaseListener, pyinotify.ProcessEvent, Loggable): handle does to every file""" flushed = 0 for f in mmp.walk_supported(path, clean_empties=True): - self.logger.info("Bootstrapping: File in 'organize' directory: '%s'" % f) - dispatcher.send(signal=self.signal, sender=self, event=OrganizeFile(f)) + self.logger.info("Bootstrapping: File in 'organize' directory: \ + '%s'" % f) + dispatcher.send(signal=self.signal, sender=self, + event=OrganizeFile(f)) flushed += 1 self.logger.info("Flushed organized directory with %d files" % flushed) @mediate_ignored @IncludeOnly(mmp.supported_extensions) def process_to_organize(self, event): - dispatcher.send(signal=self.signal, sender=self, event=OrganizeFile(event)) + dispatcher.send(signal=self.signal, sender=self, + event=OrganizeFile(event)) class StoreWatchListener(BaseListener, Loggable, pyinotify.ProcessEvent): # TODO : must intercept DeleteDirWatch events somehow def process_IN_CLOSE_WRITE(self, event): - import ipdb; ipdb.set_trace() self.process_create(event) def process_IN_MOVED_TO(self, event): if EventRegistry.registered(event): @@ -166,9 +170,10 @@ class StoreWatchListener(BaseListener, Loggable, pyinotify.ProcessEvent): def flush_events(self, path): """ - walk over path and send a NewFile event for every file in this directory. - Not to be confused with bootstrapping which is a more careful process that - involved figuring out what's in the database first. + walk over path and send a NewFile event for every file in this + directory. Not to be confused with bootstrapping which is a more + careful process that involved figuring out what's in the database + first. """ # Songs is a dictionary where every key is the watched the directory # and the value is a set with all the files in that directory. diff --git a/python_apps/media-monitor2/media/monitor/watchersyncer.py b/python_apps/media-monitor2/media/monitor/watchersyncer.py index 92dcb9760..4c46d382b 100644 --- a/python_apps/media-monitor2/media/monitor/watchersyncer.py +++ b/python_apps/media-monitor2/media/monitor/watchersyncer.py @@ -6,7 +6,6 @@ import traceback from media.monitor.handler import ReportHandler from media.monitor.log import Loggable -#from media.monitor.listeners import FileMediator from media.monitor.exceptions import BadSongFile from media.monitor.pure import LazyProperty @@ -25,7 +24,8 @@ class RequestSync(threading.Thread,Loggable): return ac.AirtimeApiClient.create_right_config() def run(self): - self.logger.info("Attempting request with %d items." % len(self.requests)) + self.logger.info("Attempting request with %d items." % + len(self.requests)) # Note that we must attach the appropriate mode to every response. Also # Not forget to attach the 'is_record' to any requests that are related # to recorded shows @@ -43,7 +43,6 @@ class RequestSync(threading.Thread,Loggable): except Exception as e: self.logger.info("An evil exception occured") self.logger.error( traceback.format_exc() ) - import ipdb; ipdb.set_trace() def make_req(): self.apiclient.send_media_monitor_requests( packed_requests ) # Is this retry shit even necessary? Consider getting rid of this. @@ -54,15 +53,16 @@ class RequestSync(threading.Thread,Loggable): self.logger.info("Api Controller is a piece of shit\n \ it's not returning json when it should\n \ ... will fix once I setup the damn debugger") - self.logger.info("Trying again after %f seconds" % self.request_wait) + self.logger.info("Trying again after %f seconds" % + self.request_wait) time.sleep( self.request_wait ) - except Exception as e: - self.unexpected_exception(e) + except Exception as e: self.unexpected_exception(e) else: - self.logger.info("Request worked on the '%d' try" % (try_index + 1)) + self.logger.info("Request worked on the '%d' try" % + (try_index + 1)) break - else: self.logger.info("Failed to send request after '%d' tries..." % self.retries) - #self.logger.info("Now ignoring: %d files" % len(FileMediator.ignored_set)) + else: self.logger.info("Failed to send request after '%d' tries..." % + self.retries) self.watcher.flag_done() class TimeoutWatcher(threading.Thread,Loggable): @@ -86,7 +86,8 @@ class TimeoutWatcher(threading.Thread,Loggable): self.watcher.request_do() # Same for events, this behaviour is mandatory however. if self.watcher.events_in_queue(): - self.logger.info("We got %d events that are unflushed" % self.watcher.events_left_count()) + self.logger.info("We got %d events that are unflushed" % + self.watcher.events_left_count()) self.watcher.flush_events() class WatchSyncer(ReportHandler,Loggable): @@ -146,7 +147,8 @@ class WatchSyncer(ReportHandler,Loggable): self.request_do() def events_in_queue(self): - """returns true if there are events in the queue that haven't been processed yet""" + """returns true if there are events in the queue that haven't been + processed yet""" return len(self.__queue) > 0 def requests_in_queue(self): diff --git a/python_apps/media-monitor2/media/update/replaygainupdater.py b/python_apps/media-monitor2/media/update/replaygainupdater.py index 807559a15..05ff3d7bd 100644 --- a/python_apps/media-monitor2/media/update/replaygainupdater.py +++ b/python_apps/media-monitor2/media/update/replaygainupdater.py @@ -11,13 +11,15 @@ from media.update import replaygain class ReplayGainUpdater(Thread): """ - The purpose of the class is to query the server for a list of files which do not have a ReplayGain - value calculated. This class will iterate over the list calculate the values, update the server and - repeat the process until the server reports there are no files left. + The purpose of the class is to query the server for a list of files which + do not have a ReplayGain value calculated. This class will iterate over the + list calculate the values, update the server and repeat the process until + the server reports there are no files left. - This class will see heavy activity right after a 2.1->2.2 upgrade since 2.2 introduces ReplayGain - normalization. A fresh install of Airtime 2.2 will see this class not used at all since a file - imported in 2.2 will automatically have its ReplayGain value calculated. + This class will see heavy activity right after a 2.1->2.2 upgrade since 2.2 + introduces ReplayGain normalization. A fresh install of Airtime 2.2 will + see this class not used at all since a file imported in 2.2 will + automatically have its ReplayGain value calculated. """ def __init__(self, logger): @@ -34,8 +36,9 @@ class ReplayGainUpdater(Thread): try: processed_data = [] - #keep getting few rows at a time for current music_dir (stor or watched folder). - #When we get a response with 0 rows, then we will set 'finished' to True. + #keep getting few rows at a time for current music_dir (stor or + #watched folder). #When we get a response with 0 rows, then we + #will set 'finished' to True. finished = False while not finished: