refactor(analyzer): fix linting errors (#2029)

This commit is contained in:
Jonas L 2022-08-09 20:49:04 +02:00 committed by GitHub
parent b465629977
commit 1b93b7645e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 51 additions and 74 deletions

View File

@ -174,8 +174,8 @@ class MessageListener:
) )
metadata = queue.get() metadata = queue.get()
except Exception as exception: except Exception as exception:
logger.error("Analyzer pipeline exception: %s" % str(exception)) logger.exception(f"Analyzer pipeline exception: {exception}")
metadata["import_status"] = PipelineStatus.failed metadata["import_status"] = PipelineStatus.FAILED
# Ensure our queue doesn't fill up and block due to unexpected behavior. Defensive code. # Ensure our queue doesn't fill up and block due to unexpected behavior. Defensive code.
while not queue.empty(): while not queue.empty():

View File

@ -30,16 +30,15 @@ def analyze_playability(filename: str, metadata: Dict[str, Any]):
try: try:
subprocess.check_output(command, stderr=subprocess.STDOUT, close_fds=True) subprocess.check_output(command, stderr=subprocess.STDOUT, close_fds=True)
except OSError as e: # liquidsoap was not found except OSError as exception: # liquidsoap was not found
logger.warning( logger.warning(
"Failed to run: %s - %s. %s" f"Failed to run: {command[0]} - {exception}. Is liquidsoap installed?"
% (command[0], e.strerror, "Do you have liquidsoap installed?")
) )
except ( except (
subprocess.CalledProcessError, subprocess.CalledProcessError,
Exception, Exception,
) as e: # liquidsoap returned an error code ) as exception: # liquidsoap returned an error code
logger.warning(e) logger.warning(exception)
raise UnplayableFileError() raise UnplayableFileError() from exception
return metadata return metadata

View File

@ -19,9 +19,9 @@ class Step(Protocol):
class PipelineStatus(int, Enum): class PipelineStatus(int, Enum):
succeed = 0 SUCCEED = 0
pending = 1 PENDING = 1
failed = 2 FAILED = 2
class Pipeline: class Pipeline:
@ -89,13 +89,13 @@ class Pipeline:
metadata, metadata,
) )
metadata["import_status"] = PipelineStatus.succeed metadata["import_status"] = PipelineStatus.SUCCEED
# Pass all the file metadata back to the main analyzer process # Pass all the file metadata back to the main analyzer process
queue.put(metadata) queue.put(metadata)
except UnplayableFileError as exception: except UnplayableFileError as exception:
logger.exception(exception) logger.exception(exception)
metadata["import_status"] = PipelineStatus.failed metadata["import_status"] = PipelineStatus.FAILED
metadata["reason"] = "The file could not be played." metadata["reason"] = "The file could not be played."
raise exception raise exception
except Exception as exception: except Exception as exception:

View File

@ -4,11 +4,11 @@ import pickle
import queue import queue
import threading import threading
import time import time
import traceback
from urllib.parse import urlparse from urllib.parse import urlparse
import requests import requests
from loguru import logger from loguru import logger
from requests.exceptions import HTTPError
class PicklableHttpRequest: class PicklableHttpRequest:
@ -48,17 +48,14 @@ def process_http_requests(ipc_queue, http_retry_queue_path):
try: try:
with open(http_retry_queue_path, "rb") as pickle_file: with open(http_retry_queue_path, "rb") as pickle_file:
retry_queue = pickle.load(pickle_file) retry_queue = pickle.load(pickle_file)
except OSError as e: except OSError as exception:
if e.errno == 2: if exception.errno != 2:
pass raise exception
else: except Exception:
raise e
except Exception as e:
# If we fail to unpickle a saved queue of failed HTTP requests, then we'll just log an error # If we fail to unpickle a saved queue of failed HTTP requests, then we'll just log an error
# and continue because those HTTP requests are lost anyways. The pickled file will be # and continue because those HTTP requests are lost anyways. The pickled file will be
# overwritten the next time the analyzer is shut down too. # overwritten the next time the analyzer is shut down too.
logger.error("Failed to unpickle %s. Continuing..." % http_retry_queue_path) logger.error(f"Failed to unpickle {http_retry_queue_path}. Continuing...")
pass
while True: while True:
try: try:
@ -70,11 +67,6 @@ def process_http_requests(ipc_queue, http_retry_queue_path):
): # Bit of a cheat ): # Bit of a cheat
shutdown = True shutdown = True
break break
if not isinstance(request, PicklableHttpRequest):
raise TypeError(
"request must be a PicklableHttpRequest. Was of type "
+ type(request).__name__
)
except queue.Empty: except queue.Empty:
request = None request = None
@ -84,7 +76,7 @@ def process_http_requests(ipc_queue, http_retry_queue_path):
send_http_request(request, retry_queue) send_http_request(request, retry_queue)
else: else:
# Using a for loop instead of while so we only iterate over all the requests once! # Using a for loop instead of while so we only iterate over all the requests once!
for i in range(len(retry_queue)): for _ in range(len(retry_queue)):
request = retry_queue.popleft() request = retry_queue.popleft()
send_http_request(request, retry_queue) send_http_request(request, retry_queue)
@ -94,55 +86,49 @@ def process_http_requests(ipc_queue, http_retry_queue_path):
with open(http_retry_queue_path, "wb") as pickle_file: with open(http_retry_queue_path, "wb") as pickle_file:
pickle.dump(retry_queue, pickle_file) pickle.dump(retry_queue, pickle_file)
return return
except Exception as e: # Terrible top-level exception handler to prevent the thread from dying, just in case. except Exception as exception: # Terrible top-level exception handler to prevent the thread from dying, just in case.
if shutdown: if shutdown:
return return
logger.exception("Unhandled exception in StatusReporter") logger.exception(f"Unhandled exception in StatusReporter {exception}")
logger.exception(e)
logger.info("Restarting StatusReporter thread") logger.info("Restarting StatusReporter thread")
time.sleep(2) # Throttle it time.sleep(2) # Throttle it
def send_http_request(picklable_request, retry_queue): def send_http_request(picklable_request: PicklableHttpRequest, retry_queue):
if not isinstance(picklable_request, PicklableHttpRequest):
raise TypeError(
"picklable_request must be a PicklableHttpRequest. Was of type "
+ type(picklable_request).__name__
)
try: try:
bare_request = picklable_request.create_request() bare_request = picklable_request.create_request()
s = requests.Session() session = requests.Session()
prepared_request = s.prepare_request(bare_request) prepared_request = session.prepare_request(bare_request)
r = s.send(prepared_request, timeout=StatusReporter._HTTP_REQUEST_TIMEOUT) resp = session.send(
r.raise_for_status() # Raise an exception if there was an http error code returned prepared_request, timeout=StatusReporter._HTTP_REQUEST_TIMEOUT
)
resp.raise_for_status() # Raise an exception if there was an http error code returned
logger.info("HTTP request sent successfully.") logger.info("HTTP request sent successfully.")
except requests.exceptions.HTTPError as e: except requests.exceptions.HTTPError as exception:
if e.response.status_code == 422: if exception.response.status_code == 422:
# Do no retry the request if there was a metadata validation error # Do no retry the request if there was a metadata validation error
logger.error( logger.exception(
"HTTP request failed due to an HTTP exception. Exception was: %s" f"HTTP request failed due to an HTTP exception: {exception}"
% str(e)
) )
else: else:
# The request failed with an error 500 probably, so let's check if Airtime and/or # The request failed with an error 500 probably, so let's check if Airtime and/or
# the web server are broken. If not, then our request was probably causing an # the web server are broken. If not, then our request was probably causing an
# error 500 in the media API (ie. a bug), so there's no point in retrying it. # error 500 in the media API (ie. a bug), so there's no point in retrying it.
logger.error("HTTP request failed. Exception was: %s" % str(e)) logger.exception(f"HTTP request failed: {exception}")
parsed_url = urlparse(e.response.request.url) parsed_url = urlparse(exception.response.request.url)
if is_web_server_broken(parsed_url.scheme + "://" + parsed_url.netloc): if is_web_server_broken(parsed_url.scheme + "://" + parsed_url.netloc):
# If the web server is having problems, retry the request later: # If the web server is having problems, retry the request later:
retry_queue.append(picklable_request) retry_queue.append(picklable_request)
# Otherwise, if the request was bad, the request is never retried. # Otherwise, if the request was bad, the request is never retried.
# You will have to find these bad requests in logs or you'll be # You will have to find these bad requests in logs or you'll be
# notified by sentry. # notified by sentry.
except requests.exceptions.ConnectionError as e: except requests.exceptions.ConnectionError as exception:
logger.error( logger.exception(
"HTTP request failed due to a connection error. Retrying later. %s" % str(e) f"HTTP request failed due to a connection error. Retrying later. {exception}"
) )
retry_queue.append(picklable_request) # Retry it later retry_queue.append(picklable_request) # Retry it later
except Exception as e: except Exception as exception:
logger.error("HTTP request failed with unhandled exception. %s" % str(e)) logger.exception(f"HTTP request failed with unhandled exception. {exception}")
logger.error(traceback.format_exc())
# Don't put the request into the retry queue, just give up on this one. # Don't put the request into the retry queue, just give up on this one.
# I'm doing this to protect against us getting some pathological request # I'm doing this to protect against us getting some pathological request
# that breaks our code. I don't want us pickling data that potentially # that breaks our code. I don't want us pickling data that potentially
@ -158,11 +144,9 @@ def is_web_server_broken(url):
try: try:
test_req = requests.get(url) test_req = requests.get(url)
test_req.raise_for_status() test_req.raise_for_status()
except Exception as e: except HTTPError:
return True return True
else: return False
# The request worked fine, so the web server and Airtime are still up.
return False
class StatusReporter: class StatusReporter:
@ -176,7 +160,7 @@ class StatusReporter:
_http_thread = None _http_thread = None
@classmethod @classmethod
def start_thread(self, http_retry_queue_path): def start_thread(cls, http_retry_queue_path):
StatusReporter._http_thread = threading.Thread( StatusReporter._http_thread = threading.Thread(
target=process_http_requests, target=process_http_requests,
args=(StatusReporter._ipc_queue, http_retry_queue_path), args=(StatusReporter._ipc_queue, http_retry_queue_path),
@ -184,18 +168,18 @@ class StatusReporter:
StatusReporter._http_thread.start() StatusReporter._http_thread.start()
@classmethod @classmethod
def stop_thread(self): def stop_thread(cls):
logger.info("Terminating status_reporter process") logger.info("Terminating status_reporter process")
StatusReporter._ipc_queue.put("shutdown") StatusReporter._ipc_queue.put("shutdown")
StatusReporter._http_thread.join() StatusReporter._http_thread.join()
@classmethod @classmethod
def _send_http_request(self, request): def _send_http_request(cls, request):
StatusReporter._ipc_queue.put(request) StatusReporter._ipc_queue.put(request)
@classmethod @classmethod
def report_success( def report_success(
self, cls,
callback_url: str, callback_url: str,
callback_api_key: str, callback_api_key: str,
metadata: dict, metadata: dict,
@ -215,18 +199,12 @@ class StatusReporter:
@classmethod @classmethod
def report_failure( def report_failure(
self, cls,
callback_url, callback_url,
callback_api_key, callback_api_key,
import_status, import_status: int,
reason, reason,
): ):
if not isinstance(import_status, int):
raise TypeError(
"import_status must be an integer. Was of type "
+ type(import_status).__name__
)
logger.debug("Reporting import failure to Airtime REST API...") logger.debug("Reporting import failure to Airtime REST API...")
audio_metadata = {} audio_metadata = {}
audio_metadata["import_status"] = import_status audio_metadata["import_status"] = import_status

View File

@ -15,7 +15,7 @@ from ..fixtures import FILES
), ),
) )
def test_analyze_cuepoint(filepath, length, cuein, cueout): def test_analyze_cuepoint(filepath, length, cuein, cueout):
metadata = analyze_cuepoint(filepath, dict()) metadata = analyze_cuepoint(filepath, {})
assert metadata["length_seconds"] == pytest.approx(length, abs=0.1) assert metadata["length_seconds"] == pytest.approx(length, abs=0.1)
assert float(metadata["cuein"]) == pytest.approx(float(cuein), abs=1) assert float(metadata["cuein"]) == pytest.approx(float(cuein), abs=1)

View File

@ -16,7 +16,7 @@ from ..fixtures import FILE_INVALID_DRM, FILES
map(lambda i: str(i.path), FILES), map(lambda i: str(i.path), FILES),
) )
def test_analyze_playability(filepath): def test_analyze_playability(filepath):
analyze_playability(filepath, dict()) analyze_playability(filepath, {})
def test_analyze_playability_missing_liquidsoap(): def test_analyze_playability_missing_liquidsoap():
@ -24,7 +24,7 @@ def test_analyze_playability_missing_liquidsoap():
"libretime_analyzer.pipeline.analyze_playability.LIQUIDSOAP_EXECUTABLE", "libretime_analyzer.pipeline.analyze_playability.LIQUIDSOAP_EXECUTABLE",
"foobar", "foobar",
): ):
analyze_playability(str(FILES[0].path), dict()) analyze_playability(str(FILES[0].path), {})
def test_analyze_playability_invalid_filepath(): def test_analyze_playability_invalid_filepath():

View File

@ -18,5 +18,5 @@ def test_analyze_replaygain(filepath, replaygain):
if distro.codename() == "bionic" and str(filepath).endswith("+12.mp3"): if distro.codename() == "bionic" and str(filepath).endswith("+12.mp3"):
tolerance = 5 tolerance = 5
metadata = analyze_replaygain(filepath, dict()) metadata = analyze_replaygain(filepath, {})
assert metadata["replay_gain"] == pytest.approx(replaygain, abs=tolerance) assert metadata["replay_gain"] == pytest.approx(replaygain, abs=tolerance)