cc-4105: fixed a ton of formatting

This commit is contained in:
Rudi Grinberg 2012-08-08 10:49:41 -04:00
parent 4f29301334
commit a8b9b300e5
4 changed files with 56 additions and 42 deletions

View File

@ -11,8 +11,6 @@ class Handles(object):
@abc.abstractmethod @abc.abstractmethod
def handle(self, sender, event, *args, **kwargs): pass def handle(self, sender, event, *args, **kwargs): pass
# TODO : remove the code duplication between ReportHandler and
# ProblemFileHandler. Namely the part where both initialize pydispatch
# TODO : Investigate whether weak reffing in dispatcher.connect could possibly # TODO : Investigate whether weak reffing in dispatcher.connect could possibly
# cause a memory leak # cause a memory leak
@ -22,26 +20,32 @@ class ReportHandler(Handles):
self.signal = signal self.signal = signal
self.report_signal = "badfile" self.report_signal = "badfile"
def dummy(sender, event): self.handle(sender,event) def dummy(sender, event): self.handle(sender,event)
dispatcher.connect(dummy, signal=signal, sender=dispatcher.Any, weak=False) dispatcher.connect(dummy, signal=signal, sender=dispatcher.Any,
weak=False)
def report_problem_file(self, event, exception=None): def report_problem_file(self, event, exception=None):
dispatcher.send(signal=self.report_signal, sender=self, event=event, exception=exception) dispatcher.send(signal=self.report_signal, sender=self, event=event,
exception=exception)
class ProblemFileHandler(Handles, Loggable): class ProblemFileHandler(Handles, Loggable):
def __init__(self, channel, **kwargs): def __init__(self, channel, **kwargs):
self.channel = channel self.channel = channel
self.signal = self.channel.signal self.signal = self.channel.signal
self.problem_dir = self.channel.path self.problem_dir = self.channel.path
def dummy(sender, event, exception): self.handle(sender, event, exception) def dummy(sender, event, exception):
dispatcher.connect(dummy, signal=self.signal, sender=dispatcher.Any, weak=False) self.handle(sender, event, exception)
dispatcher.connect(dummy, signal=self.signal, sender=dispatcher.Any,
weak=False)
mmp.create_dir( self.problem_dir ) mmp.create_dir( self.problem_dir )
self.logger.info("Initialized problem file handler. Problem dir: '%s'" % self.problem_dir) self.logger.info("Initialized problem file handler. Problem dir: '%s'" %
self.problem_dir)
def handle(self, sender, event, exception=None): def handle(self, sender, event, exception=None):
# TODO : use the exception parameter for something # TODO : use the exception parameter for something
self.logger.info("Received problem file: '%s'. Supposed to move it to problem dir", event.path) self.logger.info("Received problem file: '%s'. Supposed to move it to \
import ipdb; ipdb.set_trace() problem dir", event.path)
try: mmp.move_to_dir(dir_path=self.problem_dir, file_path=event.path) try: mmp.move_to_dir(dir_path=self.problem_dir, file_path=event.path)
except Exception as e: except Exception as e:
self.logger.info("Could not move file: '%s' to problem dir: '%s'" % (event.path, self.problem_dir)) self.logger.info("Could not move file: '%s' to problem dir: '%s'" %
(event.path, self.problem_dir))
self.logger.info("Exception: %s" % str(e)) self.logger.info("Exception: %s" % str(e))

View File

@ -10,8 +10,8 @@ from media.monitor.events import OrganizeFile, NewFile, MoveFile, DeleteFile, \
from media.monitor.log import Loggable, get_logger from media.monitor.log import Loggable, get_logger
# We attempt to document a list of all special cases and hacks that the # We attempt to document a list of all special cases and hacks that the
# following classes should be able to handle. # following classes should be able to handle. TODO : implement all of the
# TODO : implement all of the following special cases # following special cases
# #
# - Recursive directories being added to organized dirs are not handled # - Recursive directories being added to organized dirs are not handled
# properly as they only send a request for the dir and not for every file. Also # properly as they only send a request for the dir and not for every file. Also
@ -54,19 +54,21 @@ class FileMediator(object):
# Poor man's default arguments # Poor man's default arguments
if 'key' not in kwargs: kwargs['key'] = 'maskname' if 'key' not in kwargs: kwargs['key'] = 'maskname'
for skip in what_to_skip: for skip in what_to_skip:
# standard nasty hack, too long to explain completely in comments but # standard nasty hack, too long to explain completely in comments
# the gist of it is: # but the gist of it is:
# 1. python's scoping rules are sometimes strange. # 1. python's scoping rules are sometimes strange.
# 2. workaround is very similar to what you do in javascript when # 2. workaround is very similar to what you do in javascript when
# you write stuff like (function (x,y) { console.log(x+y); })(2,4) # you write stuff like (function (x,y) { console.log(x+y); })(2,4)
# to be avoid clobbering peoples' namespace. # to be avoid clobbering peoples' namespace.
skip_check = (lambda skip: lambda v: getattr(v,kwargs['key']) == skip)(skip) skip_check = (lambda skip:
lambda v: getattr(v,kwargs['key']) == skip)(skip)
FileMediator.skip_checks.add( skip_check ) FileMediator.skip_checks.add( skip_check )
def mediate_ignored(fn): def mediate_ignored(fn):
def wrapped(self, event, *args,**kwargs): def wrapped(self, event, *args,**kwargs):
event.pathname = unicode(event.pathname, "utf-8") event.pathname = unicode(event.pathname, "utf-8")
skip_events = [s_check for s_check in FileMediator.skip_checks if s_check(event)] skip_events = [s_check for s_check in FileMediator.skip_checks
if s_check(event)]
for s_check in skip_events: for s_check in skip_events:
FileMediator.skip_checks.remove( s_check ) FileMediator.skip_checks.remove( s_check )
# Only process skip_checks one at a time # Only process skip_checks one at a time
@ -93,20 +95,22 @@ class OrganizeListener(BaseListener, pyinotify.ProcessEvent, Loggable):
handle does to every file""" handle does to every file"""
flushed = 0 flushed = 0
for f in mmp.walk_supported(path, clean_empties=True): for f in mmp.walk_supported(path, clean_empties=True):
self.logger.info("Bootstrapping: File in 'organize' directory: '%s'" % f) self.logger.info("Bootstrapping: File in 'organize' directory: \
dispatcher.send(signal=self.signal, sender=self, event=OrganizeFile(f)) '%s'" % f)
dispatcher.send(signal=self.signal, sender=self,
event=OrganizeFile(f))
flushed += 1 flushed += 1
self.logger.info("Flushed organized directory with %d files" % flushed) self.logger.info("Flushed organized directory with %d files" % flushed)
@mediate_ignored @mediate_ignored
@IncludeOnly(mmp.supported_extensions) @IncludeOnly(mmp.supported_extensions)
def process_to_organize(self, event): def process_to_organize(self, event):
dispatcher.send(signal=self.signal, sender=self, event=OrganizeFile(event)) dispatcher.send(signal=self.signal, sender=self,
event=OrganizeFile(event))
class StoreWatchListener(BaseListener, Loggable, pyinotify.ProcessEvent): class StoreWatchListener(BaseListener, Loggable, pyinotify.ProcessEvent):
# TODO : must intercept DeleteDirWatch events somehow # TODO : must intercept DeleteDirWatch events somehow
def process_IN_CLOSE_WRITE(self, event): def process_IN_CLOSE_WRITE(self, event):
import ipdb; ipdb.set_trace()
self.process_create(event) self.process_create(event)
def process_IN_MOVED_TO(self, event): def process_IN_MOVED_TO(self, event):
if EventRegistry.registered(event): if EventRegistry.registered(event):
@ -166,9 +170,10 @@ class StoreWatchListener(BaseListener, Loggable, pyinotify.ProcessEvent):
def flush_events(self, path): def flush_events(self, path):
""" """
walk over path and send a NewFile event for every file in this directory. walk over path and send a NewFile event for every file in this
Not to be confused with bootstrapping which is a more careful process that directory. Not to be confused with bootstrapping which is a more
involved figuring out what's in the database first. careful process that involved figuring out what's in the database
first.
""" """
# Songs is a dictionary where every key is the watched the directory # Songs is a dictionary where every key is the watched the directory
# and the value is a set with all the files in that directory. # and the value is a set with all the files in that directory.

View File

@ -6,7 +6,6 @@ import traceback
from media.monitor.handler import ReportHandler from media.monitor.handler import ReportHandler
from media.monitor.log import Loggable from media.monitor.log import Loggable
#from media.monitor.listeners import FileMediator
from media.monitor.exceptions import BadSongFile from media.monitor.exceptions import BadSongFile
from media.monitor.pure import LazyProperty from media.monitor.pure import LazyProperty
@ -25,7 +24,8 @@ class RequestSync(threading.Thread,Loggable):
return ac.AirtimeApiClient.create_right_config() return ac.AirtimeApiClient.create_right_config()
def run(self): def run(self):
self.logger.info("Attempting request with %d items." % len(self.requests)) self.logger.info("Attempting request with %d items." %
len(self.requests))
# Note that we must attach the appropriate mode to every response. Also # Note that we must attach the appropriate mode to every response. Also
# Not forget to attach the 'is_record' to any requests that are related # Not forget to attach the 'is_record' to any requests that are related
# to recorded shows # to recorded shows
@ -43,7 +43,6 @@ class RequestSync(threading.Thread,Loggable):
except Exception as e: except Exception as e:
self.logger.info("An evil exception occured") self.logger.info("An evil exception occured")
self.logger.error( traceback.format_exc() ) self.logger.error( traceback.format_exc() )
import ipdb; ipdb.set_trace()
def make_req(): def make_req():
self.apiclient.send_media_monitor_requests( packed_requests ) self.apiclient.send_media_monitor_requests( packed_requests )
# Is this retry shit even necessary? Consider getting rid of this. # Is this retry shit even necessary? Consider getting rid of this.
@ -54,15 +53,16 @@ class RequestSync(threading.Thread,Loggable):
self.logger.info("Api Controller is a piece of shit\n \ self.logger.info("Api Controller is a piece of shit\n \
it's not returning json when it should\n \ it's not returning json when it should\n \
... will fix once I setup the damn debugger") ... will fix once I setup the damn debugger")
self.logger.info("Trying again after %f seconds" % self.request_wait) self.logger.info("Trying again after %f seconds" %
self.request_wait)
time.sleep( self.request_wait ) time.sleep( self.request_wait )
except Exception as e: except Exception as e: self.unexpected_exception(e)
self.unexpected_exception(e)
else: else:
self.logger.info("Request worked on the '%d' try" % (try_index + 1)) self.logger.info("Request worked on the '%d' try" %
(try_index + 1))
break break
else: self.logger.info("Failed to send request after '%d' tries..." % self.retries) else: self.logger.info("Failed to send request after '%d' tries..." %
#self.logger.info("Now ignoring: %d files" % len(FileMediator.ignored_set)) self.retries)
self.watcher.flag_done() self.watcher.flag_done()
class TimeoutWatcher(threading.Thread,Loggable): class TimeoutWatcher(threading.Thread,Loggable):
@ -86,7 +86,8 @@ class TimeoutWatcher(threading.Thread,Loggable):
self.watcher.request_do() self.watcher.request_do()
# Same for events, this behaviour is mandatory however. # Same for events, this behaviour is mandatory however.
if self.watcher.events_in_queue(): if self.watcher.events_in_queue():
self.logger.info("We got %d events that are unflushed" % self.watcher.events_left_count()) self.logger.info("We got %d events that are unflushed" %
self.watcher.events_left_count())
self.watcher.flush_events() self.watcher.flush_events()
class WatchSyncer(ReportHandler,Loggable): class WatchSyncer(ReportHandler,Loggable):
@ -146,7 +147,8 @@ class WatchSyncer(ReportHandler,Loggable):
self.request_do() self.request_do()
def events_in_queue(self): def events_in_queue(self):
"""returns true if there are events in the queue that haven't been processed yet""" """returns true if there are events in the queue that haven't been
processed yet"""
return len(self.__queue) > 0 return len(self.__queue) > 0
def requests_in_queue(self): def requests_in_queue(self):

View File

@ -11,13 +11,15 @@ from media.update import replaygain
class ReplayGainUpdater(Thread): class ReplayGainUpdater(Thread):
""" """
The purpose of the class is to query the server for a list of files which do not have a ReplayGain The purpose of the class is to query the server for a list of files which
value calculated. This class will iterate over the list calculate the values, update the server and do not have a ReplayGain value calculated. This class will iterate over the
repeat the process until the server reports there are no files left. list calculate the values, update the server and repeat the process until
the server reports there are no files left.
This class will see heavy activity right after a 2.1->2.2 upgrade since 2.2 introduces ReplayGain This class will see heavy activity right after a 2.1->2.2 upgrade since 2.2
normalization. A fresh install of Airtime 2.2 will see this class not used at all since a file introduces ReplayGain normalization. A fresh install of Airtime 2.2 will
imported in 2.2 will automatically have its ReplayGain value calculated. see this class not used at all since a file imported in 2.2 will
automatically have its ReplayGain value calculated.
""" """
def __init__(self, logger): def __init__(self, logger):
@ -34,8 +36,9 @@ class ReplayGainUpdater(Thread):
try: try:
processed_data = [] processed_data = []
#keep getting few rows at a time for current music_dir (stor or watched folder). #keep getting few rows at a time for current music_dir (stor or
#When we get a response with 0 rows, then we will set 'finished' to True. #watched folder). #When we get a response with 0 rows, then we
#will set 'finished' to True.
finished = False finished = False
while not finished: while not finished: