refactor: don't use f-string on logging statements
The strings are now be formatted if the logging level is enabled.
This commit is contained in:
parent
c414068c16
commit
861698987c
23 changed files with 94 additions and 84 deletions
|
@ -100,7 +100,7 @@ class MessageListener:
|
|||
Here we parse the message, spin up an analyzer process, and report the
|
||||
metadata back to the Airtime web application (or report an error).
|
||||
"""
|
||||
logger.info(f" - Received '{body}' on routing_key '{method_frame.routing_key}'")
|
||||
logger.info("Received '%s' on routing_key '%s'", body, method_frame.routing_key)
|
||||
|
||||
audio_file_path = ""
|
||||
# final_file_path = ""
|
||||
|
@ -180,7 +180,7 @@ class MessageListener:
|
|||
)
|
||||
metadata = queue.get()
|
||||
except Exception as exception:
|
||||
logger.exception(f"Analyzer pipeline exception: {exception}")
|
||||
logger.exception("Analyzer pipeline exception: %s", exception)
|
||||
metadata["import_status"] = PipelineStatus.FAILED
|
||||
|
||||
# Ensure our queue doesn't fill up and block due to unexpected behavior. Defensive code.
|
||||
|
|
|
@ -16,7 +16,7 @@ def run_(*args, **kwargs) -> CompletedProcess:
|
|||
|
||||
except OSError as exception: # executable was not found
|
||||
cmd = args[0]
|
||||
logger.warning(f"Failed to run: {cmd} - {exception}. Is {cmd} installed?")
|
||||
logger.warning("Failed to run: %s - %s. Is %s installed?", cmd, exception, cmd)
|
||||
raise exception
|
||||
|
||||
except CalledProcessError as exception: # returned an error code
|
||||
|
|
|
@ -26,7 +26,7 @@ def analyze_metadata(filepath_: str, metadata: Dict[str, Any]):
|
|||
# Get audio file metadata
|
||||
extracted = mutagen.File(filepath, easy=True)
|
||||
if extracted is None:
|
||||
logger.warning(f"no metadata were extracted for {filepath}")
|
||||
logger.warning("no metadata were extracted for %s", filepath)
|
||||
return metadata
|
||||
|
||||
metadata["mime"] = extracted.mime[0]
|
||||
|
|
|
@ -27,6 +27,6 @@ def analyze_playability(filename: str, metadata: Dict[str, Any]):
|
|||
raise UnplayableFileError() from exception
|
||||
|
||||
except OSError as exception: # liquidsoap was not found
|
||||
logger.warning(f"Failed to run: {exception}. Is liquidsoap installed?")
|
||||
logger.warning("Failed to run: %s. Is liquidsoap installed?", exception)
|
||||
|
||||
return metadata
|
||||
|
|
|
@ -43,12 +43,12 @@ def organise_file(
|
|||
return metadata
|
||||
|
||||
dest_path = dest_path.with_name(f"{dest_path.stem}_{uuid4()}{dest_path.suffix}")
|
||||
logger.warning(f"found existing file, using new filepath {dest_path}")
|
||||
logger.warning("found existing file, using new filepath %s", dest_path)
|
||||
|
||||
# Import
|
||||
dest_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
logger.debug(f"moving {filepath} to {dest_path}")
|
||||
logger.debug("moving %s to %s", filepath, dest_path)
|
||||
shutil.move(filepath, dest_path)
|
||||
|
||||
metadata["full_path"] = str(dest_path)
|
||||
|
|
|
@ -57,7 +57,7 @@ def process_http_requests(ipc_queue, http_retry_queue_path):
|
|||
# If we fail to unpickle a saved queue of failed HTTP requests, then we'll just log an error
|
||||
# and continue because those HTTP requests are lost anyways. The pickled file will be
|
||||
# overwritten the next time the analyzer is shut down too.
|
||||
logger.error(f"Failed to unpickle {http_retry_queue_path}. Continuing...")
|
||||
logger.error("Failed to unpickle %s. Continuing...", http_retry_queue_path)
|
||||
|
||||
while True:
|
||||
try:
|
||||
|
@ -93,7 +93,7 @@ def process_http_requests(ipc_queue, http_retry_queue_path):
|
|||
) as exception: # Terrible top-level exception handler to prevent the thread from dying, just in case.
|
||||
if shutdown:
|
||||
return
|
||||
logger.exception(f"Unhandled exception in StatusReporter {exception}")
|
||||
logger.exception("Unhandled exception in StatusReporter %s", exception)
|
||||
logger.info("Restarting StatusReporter thread")
|
||||
time.sleep(2) # Throttle it
|
||||
|
||||
|
@ -118,7 +118,7 @@ def send_http_request(picklable_request: PicklableHttpRequest, retry_queue):
|
|||
# The request failed with an error 500 probably, so let's check if Airtime and/or
|
||||
# the web server are broken. If not, then our request was probably causing an
|
||||
# error 500 in the media API (ie. a bug), so there's no point in retrying it.
|
||||
logger.exception(f"HTTP request failed: {exception}")
|
||||
logger.exception("HTTP request failed: %s", exception)
|
||||
parsed_url = urlparse(exception.response.request.url)
|
||||
if is_web_server_broken(parsed_url.scheme + "://" + parsed_url.netloc):
|
||||
# If the web server is having problems, retry the request later:
|
||||
|
@ -128,11 +128,12 @@ def send_http_request(picklable_request: PicklableHttpRequest, retry_queue):
|
|||
# notified by sentry.
|
||||
except requests.exceptions.ConnectionError as exception:
|
||||
logger.exception(
|
||||
f"HTTP request failed due to a connection error. Retrying later. {exception}"
|
||||
"HTTP request failed due to a connection error, retrying later: %s",
|
||||
exception,
|
||||
)
|
||||
retry_queue.append(picklable_request) # Retry it later
|
||||
except Exception as exception:
|
||||
logger.exception(f"HTTP request failed with unhandled exception. {exception}")
|
||||
logger.exception("HTTP request failed with unhandled exception. %s", exception)
|
||||
# Don't put the request into the retry queue, just give up on this one.
|
||||
# I'm doing this to protect against us getting some pathological request
|
||||
# that breaks our code. I don't want us pickling data that potentially
|
||||
|
@ -214,7 +215,7 @@ class StatusReporter:
|
|||
audio_metadata["import_status"] = import_status
|
||||
audio_metadata["comment"] = reason # hack attack
|
||||
put_payload = json.dumps(audio_metadata)
|
||||
# logger.debug("sending http put with payload: " + put_payload)
|
||||
# logger.debug("sending http put with payload: %s", put_payload)
|
||||
|
||||
StatusReporter._send_http_request(
|
||||
PicklableHttpRequest(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue