chore(playout): use comments instead of docstrings

This commit is contained in:
jo 2022-07-01 12:23:18 +02:00 committed by Kyle Robbertze
parent a38684212f
commit 2ad65bba8c
6 changed files with 44 additions and 70 deletions

View File

@ -170,12 +170,10 @@ def cli(log_level: str, log_filepath: Optional[Path], config_filepath: Optional[
pypo_liquidsoap = PypoLiquidsoap(telnet_lock, liquidsoap_host, liquidsoap_port) pypo_liquidsoap = PypoLiquidsoap(telnet_lock, liquidsoap_host, liquidsoap_port)
""" # This queue is shared between pypo-fetch and pypo-file, where pypo-file
This queue is shared between pypo-fetch and pypo-file, where pypo-file # is the consumer. Pypo-fetch will send every schedule it gets to pypo-file
is the consumer. Pypo-fetch will send every schedule it gets to pypo-file # and pypo will parse this schedule to determine which file has the highest
and pypo will parse this schedule to determine which file has the highest # priority, and retrieve it.
priority, and retrieve it.
"""
media_q = Queue() media_q = Queue()
# Pass only the configuration sections needed; PypoMessageHandler only needs rabbitmq settings # Pass only the configuration sections needed; PypoMessageHandler only needs rabbitmq settings

View File

@ -64,10 +64,8 @@ class PypoFetch(Thread):
self.schedule_data = [] self.schedule_data = []
logger.info("PypoFetch: init complete") logger.info("PypoFetch: init complete")
""" # Handle a message from RabbitMQ, put it into our yucky global var.
Handle a message from RabbitMQ, put it into our yucky global var. # Hopefully there is a better way to do this.
Hopefully there is a better way to do this.
"""
def handle_message(self, message): def handle_message(self, message):
try: try:
@ -141,9 +139,7 @@ class PypoFetch(Thread):
return command return command
""" # Initialize Liquidsoap environment
Initialize Liquidsoap environment
"""
def set_bootstrap_variables(self): def set_bootstrap_variables(self):
logger.debug("Getting information needed on bootstrap from Airtime") logger.debug("Getting information needed on bootstrap from Airtime")
@ -207,9 +203,7 @@ class PypoFetch(Thread):
if self.telnet_lock.locked(): if self.telnet_lock.locked():
self.telnet_lock.release() self.telnet_lock.release()
""" # NOTE: This function is quite short after it was refactored.
NOTE: This function is quite short after it was refactored.
"""
def regenerate_liquidsoap_conf(self, setting): def regenerate_liquidsoap_conf(self, setting):
self.restart_liquidsoap() self.restart_liquidsoap()
@ -331,14 +325,12 @@ class PypoFetch(Thread):
except Exception as e: except Exception as e:
logger.exception(e) logger.exception(e)
""" # Process the schedule
Process the schedule # - Reads the scheduled entries of a given range (actual time +/- "prepare_ahead" / "cache_for")
- Reads the scheduled entries of a given range (actual time +/- "prepare_ahead" / "cache_for") # - Saves a serialized file of the schedule
- Saves a serialized file of the schedule # - playlists are prepared. (brought to liquidsoap format) and, if not mounted via nsf, files are copied
- playlists are prepared. (brought to liquidsoap format) and, if not mounted via nsf, files are copied # to the cache dir (Folder-structure: cache/YYYY-MM-DD-hh-mm-ss)
to the cache dir (Folder-structure: cache/YYYY-MM-DD-hh-mm-ss) # - runs the cleanup routine, to get rid of unused cached files
- runs the cleanup routine, to get rid of unused cached files
"""
def process_schedule(self, schedule_data): def process_schedule(self, schedule_data):
self.last_update_schedule_timestamp = time.time() self.last_update_schedule_timestamp = time.time()
@ -349,9 +341,7 @@ class PypoFetch(Thread):
# Download all the media and put playlists in liquidsoap "annotate" format # Download all the media and put playlists in liquidsoap "annotate" format
try: try:
""" # Make sure cache_dir exists
Make sure cache_dir exists
"""
download_dir = self.cache_dir download_dir = self.cache_dir
try: try:
os.makedirs(download_dir) os.makedirs(download_dir)
@ -509,19 +499,17 @@ class PypoFetch(Thread):
logger.info(f"Loop #{loops}") logger.info(f"Loop #{loops}")
manual_fetch_needed = False manual_fetch_needed = False
try: try:
""" # our simple_queue.get() requires a timeout, in which case we
our simple_queue.get() requires a timeout, in which case we # fetch the Airtime schedule manually. It is important to fetch
fetch the Airtime schedule manually. It is important to fetch # the schedule periodically because if we didn't, we would only
the schedule periodically because if we didn't, we would only # get schedule updates via RabbitMq if the user was constantly
get schedule updates via RabbitMq if the user was constantly # using the Airtime interface.
using the Airtime interface.
If the user is not using the interface, RabbitMq messages are not # If the user is not using the interface, RabbitMq messages are not
sent, and we will have very stale (or non-existent!) data about the # sent, and we will have very stale (or non-existent!) data about the
schedule. # schedule.
Currently we are checking every POLL_INTERVAL seconds # Currently we are checking every POLL_INTERVAL seconds
"""
message = self.fetch_queue.get( message = self.fetch_queue.get(
block=True, timeout=self.listener_timeout block=True, timeout=self.listener_timeout

View File

@ -137,14 +137,12 @@ class PypoFile(Thread):
logger.debug("Highest priority item: %s" % highest_priority) logger.debug("Highest priority item: %s" % highest_priority)
""" # Remove this media_item from the dictionary. On the next iteration
Remove this media_item from the dictionary. On the next iteration # (from the main function) we won't consider it for prioritization
(from the main function) we won't consider it for prioritization # anymore. If on the next iteration we have received a new schedule,
anymore. If on the next iteration we have received a new schedule, # it is very possible we will have to deal with the same media_items
it is very possible we will have to deal with the same media_items # again. In this situation, the worst possible case is that we try to
again. In this situation, the worst possible case is that we try to # copy the file again and realize we already have it (thus aborting the copy).
copy the file again and realize we already have it (thus aborting the copy).
"""
del schedule[highest_priority] del schedule[highest_priority]
return media_item return media_item
@ -153,18 +151,14 @@ class PypoFile(Thread):
while True: while True:
try: try:
if self.media is None or len(self.media) == 0: if self.media is None or len(self.media) == 0:
""" # We have no schedule, so we have nothing else to do. Let's
We have no schedule, so we have nothing else to do. Let's # do a blocked wait on the queue
do a blocked wait on the queue
"""
self.media = self.media_queue.get(block=True) self.media = self.media_queue.get(block=True)
else: else:
""" # We have a schedule we need to process, but we also want
We have a schedule we need to process, but we also want # to check if a newer schedule is available. In this case
to check if a newer schedule is available. In this case # do a non-blocking queue.get and in either case (we get something
do a non-blocking queue.get and in either case (we get something # or we don't), get back to work on preparing getting files.
or we don't), get back to work on preparing getting files.
"""
try: try:
self.media = self.media_queue.get_nowait() self.media = self.media_queue.get_nowait()
except Empty as e: except Empty as e:

View File

@ -52,10 +52,8 @@ class PypoMessageHandler(Thread):
except Exception as e: except Exception as e:
logger.error(e) logger.error(e)
""" # Handle a message from RabbitMQ, put it into our yucky global var.
Handle a message from RabbitMQ, put it into our yucky global var. # Hopefully there is a better way to do this.
Hopefully there is a better way to do this.
"""
def handle_message(self, message): def handle_message(self, message):
try: try:
@ -111,11 +109,9 @@ class PypoMessageHandler(Thread):
logger.error("Error connecting to RabbitMQ Server. Trying again in few seconds") logger.error("Error connecting to RabbitMQ Server. Trying again in few seconds")
time.sleep(5) time.sleep(5)
""" # Main loop of the thread:
Main loop of the thread: # Wait for schedule updates from RabbitMQ, but in case there aren't any,
Wait for schedule updates from RabbitMQ, but in case there aren't any, # poll the server to get the upcoming schedule.
poll the server to get the upcoming schedule.
"""
def run(self): def run(self):
while True: while True:

View File

@ -350,9 +350,7 @@ class Recorder(Thread):
while True: while True:
if self.loops * PUSH_INTERVAL > 3600: if self.loops * PUSH_INTERVAL > 3600:
self.loops = 0 self.loops = 0
""" # Fetch recorder schedule
Fetch recorder schedule
"""
try: try:
temp = self.api_client.get_shows_to_record() temp = self.api_client.get_shows_to_record()
if temp is not None: if temp is not None:

View File

@ -22,8 +22,8 @@ def __timeout(func, timeout_duration, default, args, kwargs):
it.join(timeout_duration) it.join(timeout_duration)
if it.is_alive(): if it.is_alive():
"""Restart Liquidsoap and try the command one more time. If it # Restart Liquidsoap and try the command one more time. If it
fails again then there is something critically wrong...""" # fails again then there is something critically wrong...
if first_attempt: if first_attempt:
# restart liquidsoap # restart liquidsoap
pypofetch.PypoFetch.ref.restart_liquidsoap() pypofetch.PypoFetch.ref.restart_liquidsoap()