sintonia/playout/libretime_playout/player/file.py

197 lines
6.8 KiB
Python
Raw Normal View History

import hashlib
2023-02-26 01:27:00 +01:00
import logging
import os
import stat
import time
from queue import Empty, Queue
from threading import Thread
from typing import Optional
2023-03-01 17:13:02 +01:00
import requests
from libretime_api_client.v2 import ApiClient
from .events import FileEvent, FileEvents
2023-02-26 01:27:00 +01:00
logger = logging.getLogger(__name__)
class PypoFile(Thread):
2022-08-14 19:55:39 +02:00
name = "file"
daemon = True
2022-08-14 19:55:39 +02:00
file_events_queue: Queue[FileEvents]
file_events: FileEvents
def __init__(
self,
file_queue: Queue[FileEvents],
api_client: ApiClient,
):
Thread.__init__(self)
self.file_events_queue = file_queue
self.file_events = {}
self.api_client = api_client
def copy_file(self, file_event: FileEvent):
"""
Copy file_event from local library directory to local cache directory.
"""
file_id = file_event["id"]
dst = file_event["dst"]
dst_exists = True
try:
dst_size = os.path.getsize(dst)
2020-01-30 14:47:36 +01:00
if dst_size == 0:
dst_exists = False
except Exception:
dst_exists = False
do_copy = False
if dst_exists:
# TODO: Check if the locally cached variant of the file is sane.
# This used to be a filesize check that didn't end up working.
# Once we have watched folders updated files from them might
# become an issue here... This needs proper cache management.
2022-03-29 13:07:38 +02:00
# https://github.com/libretime/libretime/issues/756#issuecomment-477853018
# https://github.com/libretime/libretime/pull/845
logger.debug("found file %s in cache %s, skipping copy...", file_id, dst)
else:
do_copy = True
file_event["file_ready"] = not do_copy
if do_copy:
logger.info("copying file %s to cache %s", file_id, dst)
try:
with open(dst, "wb") as handle:
logger.info(file_event)
try:
response = self.api_client.download_file(file_id, stream=True)
for chunk in response.iter_content(chunk_size=2048):
handle.write(chunk)
2023-03-01 17:13:02 +01:00
except requests.exceptions.HTTPError as exception:
2023-02-01 20:38:58 +01:00
raise RuntimeError(
f"could not download file {file_event['id']}"
) from exception
2021-05-27 16:23:02 +02:00
# make file world readable and owner writable
os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
if file_event["filesize"] == 0:
2021-09-01 15:25:38 +02:00
file_size = self.report_file_size_and_md5_to_api(
dst, file_event["id"]
2021-05-27 16:23:02 +02:00
)
file_event["filesize"] = file_size
file_event["file_ready"] = True
except Exception as exception:
logger.exception(
"could not copy file %s to %s: %s",
file_id,
dst,
exception,
)
def report_file_size_and_md5_to_api(self, file_path: str, file_id: int) -> int:
try:
file_size = os.path.getsize(file_path)
2023-03-01 17:13:02 +01:00
with open(file_path, "rb") as file_fd:
hasher = hashlib.md5(usedforsecurity=False)
while True:
2023-03-01 17:13:02 +01:00
data = file_fd.read(8192)
if not data:
break
hasher.update(data)
md5_hash = hasher.hexdigest()
except OSError as exception:
file_size = 0
logger.exception(
"Error getting file size and md5 hash for file id %s: %s",
file_id,
exception,
2021-05-27 16:23:02 +02:00
)
# Make PUT request to LibreTime to update the file size and hash
error_msg = f"Could not update media file {file_id} with file size and md5 hash"
2017-10-03 03:37:06 +02:00
try:
self.api_client.update_file(
file_id,
json={"filesize": file_size, "md5": md5_hash},
)
2023-03-01 17:13:02 +01:00
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
logger.exception(error_msg)
except Exception as exception:
logger.exception("%s: %s", error_msg, exception)
return file_size
def get_highest_priority_file_event(
self,
file_events: FileEvents,
) -> Optional[FileEvent]:
"""
Get highest priority file event in the queue. Currently the highest
priority is decided by how close the start time is to "now".
"""
if file_events is None or len(file_events) == 0:
return None
sorted_keys = sorted(file_events.keys())
if len(sorted_keys) == 0:
return None
highest_priority = sorted_keys[0]
file_event = file_events[highest_priority]
logger.debug("Highest priority item: %s", highest_priority)
# Remove this media_item from the dictionary. On the next iteration
# (from the main function) we won't consider it for prioritization
# anymore. If on the next iteration we have received a new schedule,
# it is very possible we will have to deal with the same media_items
# again. In this situation, the worst possible case is that we try to
# copy the file again and realize we already have it (thus aborting the copy).
del file_events[highest_priority]
return file_event
def main(self):
while True:
try:
if self.file_events is None or len(self.file_events) == 0:
# We have no schedule, so we have nothing else to do. Let's
# do a blocked wait on the queue
self.file_events = self.file_events_queue.get(block=True)
else:
# We have a schedule we need to process, but we also want
# to check if a newer schedule is available. In this case
# do a non-blocking queue.get and in either case (we get something
# or we don't), get back to work on preparing getting files.
try:
self.file_events = self.file_events_queue.get_nowait()
except Empty:
pass
file_event = self.get_highest_priority_file_event(self.file_events)
if file_event is not None:
self.copy_file(file_event)
except Exception as exception:
logger.exception(exception)
raise exception
def run(self):
"""
Entry point of the thread
"""
2021-05-27 16:23:02 +02:00
try:
self.main()
except Exception as exception:
logger.exception(exception)
time.sleep(5)
feat(playout): enhance playout logging (#1495) Some initial work on modernizing the playout app. This replace any custom logger or logging based logger with the logging tools from libretime_shared.logging and loguru. Removed all the thread/function assigned logger (self.logger = ...), as this makes it part of the logic (passing logger though function args) as it should not. Of a dedicated logger is required for a specific task, it should use the create_task_logger function. - refactor: remove dead code - refactor: remove py2 specific fix - feat: remove unused test command - feat: setup shared cli and logging tools - feat: replace logging with loguru - feat: setup shared cli and logging tools for notify - fix: warn method deos not exist - feat: make cli setup the entrypoint - fix: install shared modules globally in production use extra_requires to load local packages in dev environement - feat: configure log path in systemd service - feat: default behavior is to log to console only - feat: create log dir during install - chore: add comment - fix: don't create useless dir in install - fix: move notify logs to /var/log/libretime dir - fix: update setup_logger attrs - style: linting - fix: replace verbosity flag with log-level flag - feat: use shared logging tool in liquidsoap - fix: pass logger down to api client - feat: allow custom log_filepath in liquidsoap config - chore: add pylintrc to playout - refactor: fix pylint errors - feat: set liquidsoap log filepath in systemd service - fix: missing setup entrypoint update BREAKING CHANGE: for playout and liquidsoap the default log file path changed to None and will only log to the console when developing / testing. Unless you are running the app as a systemd service (production) the default logs filepaths changed: from "/var/log/airtime/pypo/pypo.log" to "/var/log/libretime/playout.log" and from "/var/log/airtime/pypo-liquidsoap/ls_script.log" to "/var/log/libretime/liquidsoap.log" BREAKING CHANGE: for playout-notify the default log file path changed from "/var/log/airtime/pypo/notify.log" to "/var/log/libretime/playout-notify.log"
2022-01-13 16:11:37 +01:00
logger.info("PypoFile thread exiting")