refactor: don't use f-string on logging statements
The strings are now be formatted if the logging level is enabled.
This commit is contained in:
parent
c414068c16
commit
861698987c
23 changed files with 94 additions and 84 deletions
|
@ -64,7 +64,7 @@ class PypoFetch(Thread):
|
|||
|
||||
def handle_message(self, message):
|
||||
try:
|
||||
logger.info("Received event from Pypo Message Handler: %s" % message)
|
||||
logger.info("Received event from Pypo Message Handler: %s", message)
|
||||
|
||||
try:
|
||||
message = message.decode()
|
||||
|
@ -72,7 +72,7 @@ class PypoFetch(Thread):
|
|||
pass
|
||||
m = json.loads(message)
|
||||
command = m["event_type"]
|
||||
logger.info("Handling command: " + command)
|
||||
logger.info("Handling command: %s", command)
|
||||
|
||||
if command == "update_schedule":
|
||||
self.schedule_data = m["schedule"]
|
||||
|
@ -102,7 +102,7 @@ class PypoFetch(Thread):
|
|||
m["sourcename"]
|
||||
)
|
||||
else:
|
||||
logger.info("Unknown command: %s" % command)
|
||||
logger.info("Unknown command: %s", command)
|
||||
|
||||
# update timeout value
|
||||
if command == "update_schedule":
|
||||
|
@ -113,7 +113,7 @@ class PypoFetch(Thread):
|
|||
)
|
||||
if self.listener_timeout < 0:
|
||||
self.listener_timeout = 0
|
||||
logger.info("New timeout: %s" % self.listener_timeout)
|
||||
logger.info("New timeout: %s", self.listener_timeout)
|
||||
except Exception as exception:
|
||||
logger.exception(exception)
|
||||
|
||||
|
@ -128,11 +128,11 @@ class PypoFetch(Thread):
|
|||
state = StreamState(**self.api_client.get_stream_state().json())
|
||||
|
||||
except RequestException as exception:
|
||||
logger.exception(f"Unable to get stream settings: {exception}")
|
||||
logger.exception("Unable to get stream settings: %s", exception)
|
||||
|
||||
logger.debug(f"info: {info}")
|
||||
logger.debug(f"preferences: {preferences}")
|
||||
logger.debug(f"state: {state}")
|
||||
logger.debug("info: %s", info)
|
||||
logger.debug("preferences: %s", preferences)
|
||||
logger.debug("state: %s", state)
|
||||
|
||||
try:
|
||||
self.pypo_liquidsoap.liq_client.settings_update(
|
||||
|
@ -293,31 +293,31 @@ class PypoFetch(Thread):
|
|||
|
||||
expired_files = cached_file_set - scheduled_file_set
|
||||
|
||||
logger.debug("Files to remove " + str(expired_files))
|
||||
logger.debug("Files to remove %s", str(expired_files))
|
||||
for f in expired_files:
|
||||
try:
|
||||
path = os.path.join(self.cache_dir, f)
|
||||
logger.debug("Removing %s" % path)
|
||||
logger.debug("Removing %s", path)
|
||||
|
||||
# check if this file is opened (sometimes Liquidsoap is still
|
||||
# playing the file due to our knowledge of the track length
|
||||
# being incorrect!)
|
||||
if not self.is_file_opened(path):
|
||||
os.remove(path)
|
||||
logger.info("File '%s' removed" % path)
|
||||
logger.info("File '%s' removed", path)
|
||||
else:
|
||||
logger.info("File '%s' not removed. Still busy!" % path)
|
||||
logger.info("File '%s' not removed. Still busy!", path)
|
||||
except Exception as exception:
|
||||
logger.exception(f"Problem removing file '{f}': {exception}")
|
||||
logger.exception("Problem removing file '%s': %s", f, exception)
|
||||
|
||||
def manual_schedule_fetch(self):
|
||||
try:
|
||||
self.schedule_data = get_schedule(self.api_client)
|
||||
logger.debug(f"Received event from API client: {self.schedule_data}")
|
||||
logger.debug("Received event from API client: %s", self.schedule_data)
|
||||
self.process_schedule(self.schedule_data)
|
||||
return True
|
||||
except Exception as exception:
|
||||
logger.exception(f"Unable to fetch schedule: {exception}")
|
||||
logger.exception("Unable to fetch schedule: %s", exception)
|
||||
return False
|
||||
|
||||
def persistent_manual_schedule_fetch(self, max_attempts=1):
|
||||
|
@ -358,7 +358,7 @@ class PypoFetch(Thread):
|
|||
|
||||
loops = 1
|
||||
while True:
|
||||
logger.info(f"Loop #{loops}")
|
||||
logger.info("Loop #%s", loops)
|
||||
manual_fetch_needed = False
|
||||
try:
|
||||
# our simple_queue.get() requires a timeout, in which case we
|
||||
|
@ -388,7 +388,7 @@ class PypoFetch(Thread):
|
|||
if manual_fetch_needed:
|
||||
self.persistent_manual_schedule_fetch(max_attempts=5)
|
||||
except Exception as exception:
|
||||
logger.exception(f"Failed to manually fetch the schedule: {exception}")
|
||||
logger.exception("Failed to manually fetch the schedule: %s", exception)
|
||||
|
||||
loops += 1
|
||||
|
||||
|
|
|
@ -50,14 +50,14 @@ class PypoFile(Thread):
|
|||
# become an issue here... This needs proper cache management.
|
||||
# https://github.com/libretime/libretime/issues/756#issuecomment-477853018
|
||||
# https://github.com/libretime/libretime/pull/845
|
||||
logger.debug(f"found file {file_id} in cache {dst}, skipping copy...")
|
||||
logger.debug("found file %s in cache %s, skipping copy...", file_id, dst)
|
||||
else:
|
||||
do_copy = True
|
||||
|
||||
media_item["file_ready"] = not do_copy
|
||||
|
||||
if do_copy:
|
||||
logger.info(f"copying file {file_id} to cache {dst}")
|
||||
logger.info("copying file %s to cache %s", file_id, dst)
|
||||
try:
|
||||
with open(dst, "wb") as handle:
|
||||
logger.info(media_item)
|
||||
|
@ -82,7 +82,12 @@ class PypoFile(Thread):
|
|||
|
||||
media_item["file_ready"] = True
|
||||
except Exception as exception:
|
||||
logger.exception(f"could not copy file {file_id} to {dst}: {exception}")
|
||||
logger.exception(
|
||||
"could not copy file %s to %s: %s",
|
||||
file_id,
|
||||
dst,
|
||||
exception,
|
||||
)
|
||||
|
||||
def report_file_size_and_md5_to_api(self, file_path, file_id):
|
||||
try:
|
||||
|
@ -99,7 +104,9 @@ class PypoFile(Thread):
|
|||
except OSError as exception:
|
||||
file_size = 0
|
||||
logger.exception(
|
||||
f"Error getting file size and md5 hash for file id {file_id}: {exception}"
|
||||
"Error getting file size and md5 hash for file id %s: %s",
|
||||
file_id,
|
||||
exception,
|
||||
)
|
||||
|
||||
# Make PUT request to LibreTime to update the file size and hash
|
||||
|
@ -112,7 +119,7 @@ class PypoFile(Thread):
|
|||
except (ConnectionError, Timeout):
|
||||
logger.exception(error_msg)
|
||||
except Exception as exception:
|
||||
logger.exception(f"{error_msg}: {exception}")
|
||||
logger.exception("%s: %s", error_msg, exception)
|
||||
|
||||
return file_size
|
||||
|
||||
|
@ -132,7 +139,7 @@ class PypoFile(Thread):
|
|||
highest_priority = sorted_keys[0]
|
||||
media_item = schedule[highest_priority]
|
||||
|
||||
logger.debug("Highest priority item: %s" % highest_priority)
|
||||
logger.debug("Highest priority item: %s", highest_priority)
|
||||
|
||||
# Remove this media_item from the dictionary. On the next iteration
|
||||
# (from the main function) we won't consider it for prioritization
|
||||
|
|
|
@ -157,7 +157,7 @@ class PypoLiquidsoap:
|
|||
|
||||
if not correct:
|
||||
# need to re-add
|
||||
logger.info("Track %s found to have new attr." % i)
|
||||
logger.info("Track %s found to have new attr.", i)
|
||||
to_be_removed.add(i["row_id"])
|
||||
to_be_added.add(i["row_id"])
|
||||
|
||||
|
@ -165,7 +165,7 @@ class PypoLiquidsoap:
|
|||
to_be_added.update(schedule_ids - liq_queue_ids)
|
||||
|
||||
if to_be_removed:
|
||||
logger.info("Need to remove items from Liquidsoap: %s" % to_be_removed)
|
||||
logger.info("Need to remove items from Liquidsoap: %s", to_be_removed)
|
||||
|
||||
# remove files from Liquidsoap's queue
|
||||
for i in self.liq_queue_tracker:
|
||||
|
@ -174,7 +174,7 @@ class PypoLiquidsoap:
|
|||
self.stop(i)
|
||||
|
||||
if to_be_added:
|
||||
logger.info("Need to add items to Liquidsoap *now*: %s" % to_be_added)
|
||||
logger.info("Need to add items to Liquidsoap *now*: %s", to_be_added)
|
||||
|
||||
for i in scheduled_now_files:
|
||||
if i["row_id"] in to_be_added:
|
||||
|
@ -183,7 +183,7 @@ class PypoLiquidsoap:
|
|||
|
||||
# handle webstreams
|
||||
current_stream_id = self.telnet_liquidsoap.get_current_stream_id()
|
||||
logger.debug(f"scheduled now webstream: {scheduled_now_webstream}")
|
||||
logger.debug("scheduled now webstream: %s", scheduled_now_webstream)
|
||||
if scheduled_now_webstream:
|
||||
if int(current_stream_id) != int(scheduled_now_webstream[0]["row_id"]):
|
||||
self.play(scheduled_now_webstream[0])
|
||||
|
@ -192,7 +192,7 @@ class PypoLiquidsoap:
|
|||
self.telnet_liquidsoap.stop_web_stream_buffer()
|
||||
self.telnet_liquidsoap.stop_web_stream_output()
|
||||
except KeyError as exception:
|
||||
logger.exception(f"Malformed event in schedule: {exception}")
|
||||
logger.exception("Malformed event in schedule: %s", exception)
|
||||
|
||||
def stop(self, queue):
|
||||
self.telnet_liquidsoap.queue_remove(queue)
|
||||
|
@ -211,7 +211,7 @@ class PypoLiquidsoap:
|
|||
lateness = seconds_between(link["start"], datetime.utcnow())
|
||||
|
||||
if lateness > 0:
|
||||
logger.debug(f"media item was supposed to start {lateness}s ago")
|
||||
logger.debug("media item was supposed to start %ss ago", lateness)
|
||||
cue_in_orig = timedelta(seconds=float(link["cue_in"]))
|
||||
link["cue_in"] = cue_in_orig.total_seconds() + lateness
|
||||
|
||||
|
|
|
@ -86,16 +86,16 @@ class PypoPush(Thread):
|
|||
|
||||
# Ignore track that already ended
|
||||
if media_item["type"] == "file" and media_item["end"] < tnow:
|
||||
logger.debug(f"ignoring ended media_item: {media_item}")
|
||||
logger.debug("ignoring ended media_item: %s", media_item)
|
||||
continue
|
||||
|
||||
diff_sec = (tnow - media_item["start"]).total_seconds()
|
||||
|
||||
if diff_sec >= 0:
|
||||
logger.debug(f"adding media_item to present: {media_item}")
|
||||
logger.debug("adding media_item to present: %s", media_item)
|
||||
present.append(media_item)
|
||||
else:
|
||||
logger.debug(f"adding media_item to future: {media_item}")
|
||||
logger.debug("adding media_item to future: %s", media_item)
|
||||
future[mkey] = media_item
|
||||
|
||||
return present, future
|
||||
|
|
|
@ -36,7 +36,7 @@ class PypoLiqQueue(Thread):
|
|||
media_schedule = self.queue.get(block=True)
|
||||
else:
|
||||
logger.info(
|
||||
"waiting %ss until next scheduled item" % time_until_next_play
|
||||
"waiting %ss until next scheduled item", time_until_next_play
|
||||
)
|
||||
media_schedule = self.queue.get(
|
||||
block=True, timeout=time_until_next_play
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue