Some defensive code against multiprocess related deadlocks

* Reinitialize logging in child processes so we don't inherit locks.
  Might be causing a deadlock we're seeing on Pro right now.
This commit is contained in:
Albert Santoni 2014-05-05 18:25:47 -04:00
parent d063700254
commit ed494ac587
2 changed files with 15 additions and 1 deletions

View File

@ -30,6 +30,10 @@ class AnalyzerPipeline:
temporary randomly generated name, which is why we want temporary randomly generated name, which is why we want
to know what the original name was. to know what the original name was.
""" """
# Might be super critical to initialize a separate log file here so that we
# don't inherit logging/locks from the parent process. Supposedly
# this can lead to Bad Things (deadlocks): http://bugs.python.org/issue6721
AnalyzerPipeline.setup_logging()
try: try:
if not isinstance(queue, multiprocessing.queues.Queue): if not isinstance(queue, multiprocessing.queues.Queue):
raise TypeError("queue must be a multiprocessing.Queue()") raise TypeError("queue must be a multiprocessing.Queue()")
@ -59,4 +63,10 @@ class AnalyzerPipeline:
logging.exception(e) logging.exception(e)
raise e raise e
@staticmethod
def setup_logging():
_LOG_PATH = "/var/log/airtime/airtime_analyzer_pipeline.log"
FORMAT = "%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s"
logging.basicConfig(filename=_LOG_PATH,level=logging.DEBUG, format=FORMAT)
#rootLogger = logging.getLogger()
#rootLogger.setFormatter(logFormatter)

View File

@ -212,6 +212,10 @@ class MessageListener:
logging.info(results) logging.info(results)
else: else:
raise Exception("Analyzer process terminated unexpectedly.") raise Exception("Analyzer process terminated unexpectedly.")
# Ensure our queue doesn't fill up and block due to unexpected behaviour. Defensive code.
while not q.empty():
q.get()
return results return results