chore(playout): use comments instead of docstrings

This commit is contained in:
jo 2022-07-01 12:23:18 +02:00 committed by Kyle Robbertze
parent a38684212f
commit 2ad65bba8c
6 changed files with 44 additions and 70 deletions

View File

@ -170,12 +170,10 @@ def cli(log_level: str, log_filepath: Optional[Path], config_filepath: Optional[
pypo_liquidsoap = PypoLiquidsoap(telnet_lock, liquidsoap_host, liquidsoap_port)
"""
This queue is shared between pypo-fetch and pypo-file, where pypo-file
is the consumer. Pypo-fetch will send every schedule it gets to pypo-file
and pypo will parse this schedule to determine which file has the highest
priority, and retrieve it.
"""
# This queue is shared between pypo-fetch and pypo-file, where pypo-file
# is the consumer. Pypo-fetch will send every schedule it gets to pypo-file
# and pypo will parse this schedule to determine which file has the highest
# priority, and retrieve it.
media_q = Queue()
# Pass only the configuration sections needed; PypoMessageHandler only needs rabbitmq settings

View File

@ -64,10 +64,8 @@ class PypoFetch(Thread):
self.schedule_data = []
logger.info("PypoFetch: init complete")
"""
Handle a message from RabbitMQ, put it into our yucky global var.
Hopefully there is a better way to do this.
"""
# Handle a message from RabbitMQ, put it into our yucky global var.
# Hopefully there is a better way to do this.
def handle_message(self, message):
try:
@ -141,9 +139,7 @@ class PypoFetch(Thread):
return command
"""
Initialize Liquidsoap environment
"""
# Initialize Liquidsoap environment
def set_bootstrap_variables(self):
logger.debug("Getting information needed on bootstrap from Airtime")
@ -207,9 +203,7 @@ class PypoFetch(Thread):
if self.telnet_lock.locked():
self.telnet_lock.release()
"""
NOTE: This function is quite short after it was refactored.
"""
# NOTE: This function is quite short after it was refactored.
def regenerate_liquidsoap_conf(self, setting):
self.restart_liquidsoap()
@ -331,14 +325,12 @@ class PypoFetch(Thread):
except Exception as e:
logger.exception(e)
"""
Process the schedule
- Reads the scheduled entries of a given range (actual time +/- "prepare_ahead" / "cache_for")
- Saves a serialized file of the schedule
- playlists are prepared. (brought to liquidsoap format) and, if not mounted via nsf, files are copied
to the cache dir (Folder-structure: cache/YYYY-MM-DD-hh-mm-ss)
- runs the cleanup routine, to get rid of unused cached files
"""
# Process the schedule
# - Reads the scheduled entries of a given range (actual time +/- "prepare_ahead" / "cache_for")
# - Saves a serialized file of the schedule
# - playlists are prepared. (brought to liquidsoap format) and, if not mounted via nsf, files are copied
# to the cache dir (Folder-structure: cache/YYYY-MM-DD-hh-mm-ss)
# - runs the cleanup routine, to get rid of unused cached files
def process_schedule(self, schedule_data):
self.last_update_schedule_timestamp = time.time()
@ -349,9 +341,7 @@ class PypoFetch(Thread):
# Download all the media and put playlists in liquidsoap "annotate" format
try:
"""
Make sure cache_dir exists
"""
# Make sure cache_dir exists
download_dir = self.cache_dir
try:
os.makedirs(download_dir)
@ -509,19 +499,17 @@ class PypoFetch(Thread):
logger.info(f"Loop #{loops}")
manual_fetch_needed = False
try:
"""
our simple_queue.get() requires a timeout, in which case we
fetch the Airtime schedule manually. It is important to fetch
the schedule periodically because if we didn't, we would only
get schedule updates via RabbitMq if the user was constantly
using the Airtime interface.
# our simple_queue.get() requires a timeout, in which case we
# fetch the Airtime schedule manually. It is important to fetch
# the schedule periodically because if we didn't, we would only
# get schedule updates via RabbitMq if the user was constantly
# using the Airtime interface.
If the user is not using the interface, RabbitMq messages are not
sent, and we will have very stale (or non-existent!) data about the
schedule.
# If the user is not using the interface, RabbitMq messages are not
# sent, and we will have very stale (or non-existent!) data about the
# schedule.
Currently we are checking every POLL_INTERVAL seconds
"""
# Currently we are checking every POLL_INTERVAL seconds
message = self.fetch_queue.get(
block=True, timeout=self.listener_timeout

View File

@ -137,14 +137,12 @@ class PypoFile(Thread):
logger.debug("Highest priority item: %s" % highest_priority)
"""
Remove this media_item from the dictionary. On the next iteration
(from the main function) we won't consider it for prioritization
anymore. If on the next iteration we have received a new schedule,
it is very possible we will have to deal with the same media_items
again. In this situation, the worst possible case is that we try to
copy the file again and realize we already have it (thus aborting the copy).
"""
# Remove this media_item from the dictionary. On the next iteration
# (from the main function) we won't consider it for prioritization
# anymore. If on the next iteration we have received a new schedule,
# it is very possible we will have to deal with the same media_items
# again. In this situation, the worst possible case is that we try to
# copy the file again and realize we already have it (thus aborting the copy).
del schedule[highest_priority]
return media_item
@ -153,18 +151,14 @@ class PypoFile(Thread):
while True:
try:
if self.media is None or len(self.media) == 0:
"""
We have no schedule, so we have nothing else to do. Let's
do a blocked wait on the queue
"""
# We have no schedule, so we have nothing else to do. Let's
# do a blocked wait on the queue
self.media = self.media_queue.get(block=True)
else:
"""
We have a schedule we need to process, but we also want
to check if a newer schedule is available. In this case
do a non-blocking queue.get and in either case (we get something
or we don't), get back to work on preparing getting files.
"""
# We have a schedule we need to process, but we also want
# to check if a newer schedule is available. In this case
# do a non-blocking queue.get and in either case (we get something
# or we don't), get back to work on preparing getting files.
try:
self.media = self.media_queue.get_nowait()
except Empty as e:

View File

@ -52,10 +52,8 @@ class PypoMessageHandler(Thread):
except Exception as e:
logger.error(e)
"""
Handle a message from RabbitMQ, put it into our yucky global var.
Hopefully there is a better way to do this.
"""
# Handle a message from RabbitMQ, put it into our yucky global var.
# Hopefully there is a better way to do this.
def handle_message(self, message):
try:
@ -111,11 +109,9 @@ class PypoMessageHandler(Thread):
logger.error("Error connecting to RabbitMQ Server. Trying again in few seconds")
time.sleep(5)
"""
Main loop of the thread:
Wait for schedule updates from RabbitMQ, but in case there aren't any,
poll the server to get the upcoming schedule.
"""
# Main loop of the thread:
# Wait for schedule updates from RabbitMQ, but in case there aren't any,
# poll the server to get the upcoming schedule.
def run(self):
while True:

View File

@ -350,9 +350,7 @@ class Recorder(Thread):
while True:
if self.loops * PUSH_INTERVAL > 3600:
self.loops = 0
"""
Fetch recorder schedule
"""
# Fetch recorder schedule
try:
temp = self.api_client.get_shows_to_record()
if temp is not None:

View File

@ -22,8 +22,8 @@ def __timeout(func, timeout_duration, default, args, kwargs):
it.join(timeout_duration)
if it.is_alive():
"""Restart Liquidsoap and try the command one more time. If it
fails again then there is something critically wrong..."""
# Restart Liquidsoap and try the command one more time. If it
# fails again then there is something critically wrong...
if first_attempt:
# restart liquidsoap
pypofetch.PypoFetch.ref.restart_liquidsoap()