Format code using black
This commit is contained in:
parent
efe4fa027e
commit
c27f020d73
85 changed files with 3238 additions and 2243 deletions
|
@ -10,24 +10,25 @@ from . import config_file
|
|||
from functools import partial
|
||||
from .metadata_analyzer import MetadataAnalyzer
|
||||
from .replaygain_analyzer import ReplayGainAnalyzer
|
||||
from .status_reporter import StatusReporter
|
||||
from .status_reporter import StatusReporter
|
||||
from .message_listener import MessageListener
|
||||
|
||||
|
||||
class AirtimeAnalyzerServer:
|
||||
"""A server for importing uploads to Airtime as background jobs.
|
||||
"""
|
||||
"""A server for importing uploads to Airtime as background jobs."""
|
||||
|
||||
# Constants
|
||||
# Constants
|
||||
_LOG_PATH = "/var/log/airtime/airtime_analyzer.log"
|
||||
|
||||
|
||||
# Variables
|
||||
_log_level = logging.INFO
|
||||
|
||||
def __init__(self, rmq_config_path, http_retry_queue_path, debug=False):
|
||||
|
||||
# Dump a stacktrace with 'kill -SIGUSR2 <PID>'
|
||||
signal.signal(signal.SIGUSR2, lambda sig, frame: AirtimeAnalyzerServer.dump_stacktrace())
|
||||
signal.signal(
|
||||
signal.SIGUSR2, lambda sig, frame: AirtimeAnalyzerServer.dump_stacktrace()
|
||||
)
|
||||
|
||||
# Configure logging
|
||||
self.setup_logging(debug)
|
||||
|
@ -43,11 +44,10 @@ class AirtimeAnalyzerServer:
|
|||
self._msg_listener = MessageListener(rmq_config)
|
||||
|
||||
StatusReporter.stop_thread()
|
||||
|
||||
|
||||
def setup_logging(self, debug):
|
||||
"""Set up nicely formatted logging and log rotation.
|
||||
|
||||
|
||||
Keyword arguments:
|
||||
debug -- a boolean indicating whether to enable super verbose logging
|
||||
to the screen and disk.
|
||||
|
@ -55,27 +55,30 @@ class AirtimeAnalyzerServer:
|
|||
if debug:
|
||||
self._log_level = logging.DEBUG
|
||||
else:
|
||||
#Disable most pika/rabbitmq logging:
|
||||
pika_logger = logging.getLogger('pika')
|
||||
# Disable most pika/rabbitmq logging:
|
||||
pika_logger = logging.getLogger("pika")
|
||||
pika_logger.setLevel(logging.CRITICAL)
|
||||
|
||||
|
||||
# Set up logging
|
||||
logFormatter = logging.Formatter("%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s")
|
||||
logFormatter = logging.Formatter(
|
||||
"%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s"
|
||||
)
|
||||
rootLogger = logging.getLogger()
|
||||
rootLogger.setLevel(self._log_level)
|
||||
|
||||
fileHandler = logging.handlers.RotatingFileHandler(filename=self._LOG_PATH, maxBytes=1024*1024*30,
|
||||
backupCount=8)
|
||||
fileHandler = logging.handlers.RotatingFileHandler(
|
||||
filename=self._LOG_PATH, maxBytes=1024 * 1024 * 30, backupCount=8
|
||||
)
|
||||
fileHandler.setFormatter(logFormatter)
|
||||
rootLogger.addHandler(fileHandler)
|
||||
|
||||
consoleHandler = logging.StreamHandler()
|
||||
consoleHandler.setFormatter(logFormatter)
|
||||
rootLogger.addHandler(consoleHandler)
|
||||
|
||||
|
||||
@classmethod
|
||||
def dump_stacktrace(stack):
|
||||
''' Dump a stacktrace for all threads '''
|
||||
"""Dump a stacktrace for all threads"""
|
||||
code = []
|
||||
for threadId, stack in list(sys._current_frames().items()):
|
||||
code.append("\n# ThreadID: %s" % threadId)
|
||||
|
@ -83,4 +86,4 @@ class AirtimeAnalyzerServer:
|
|||
code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
|
||||
if line:
|
||||
code.append(" %s" % (line.strip()))
|
||||
logging.info('\n'.join(code))
|
||||
logging.info("\n".join(code))
|
||||
|
|
|
@ -3,8 +3,7 @@
|
|||
|
||||
|
||||
class Analyzer:
|
||||
""" Abstract base class for all "analyzers".
|
||||
"""
|
||||
"""Abstract base class for all "analyzers"."""
|
||||
|
||||
@staticmethod
|
||||
def analyze(filename, metadata):
|
||||
|
|
|
@ -12,20 +12,28 @@ from .cuepoint_analyzer import CuePointAnalyzer
|
|||
from .replaygain_analyzer import ReplayGainAnalyzer
|
||||
from .playability_analyzer import *
|
||||
|
||||
class AnalyzerPipeline:
|
||||
""" Analyzes and imports an audio file into the Airtime library.
|
||||
|
||||
This currently performs metadata extraction (eg. gets the ID3 tags from an MP3),
|
||||
then moves the file to the Airtime music library (stor/imported), and returns
|
||||
the results back to the parent process. This class is used in an isolated process
|
||||
so that if it crashes, it does not kill the entire airtime_analyzer daemon and
|
||||
the failure to import can be reported back to the web application.
|
||||
class AnalyzerPipeline:
|
||||
"""Analyzes and imports an audio file into the Airtime library.
|
||||
|
||||
This currently performs metadata extraction (eg. gets the ID3 tags from an MP3),
|
||||
then moves the file to the Airtime music library (stor/imported), and returns
|
||||
the results back to the parent process. This class is used in an isolated process
|
||||
so that if it crashes, it does not kill the entire airtime_analyzer daemon and
|
||||
the failure to import can be reported back to the web application.
|
||||
"""
|
||||
|
||||
IMPORT_STATUS_FAILED = 2
|
||||
|
||||
@staticmethod
|
||||
def run_analysis(queue, audio_file_path, import_directory, original_filename, storage_backend, file_prefix):
|
||||
def run_analysis(
|
||||
queue,
|
||||
audio_file_path,
|
||||
import_directory,
|
||||
original_filename,
|
||||
storage_backend,
|
||||
file_prefix,
|
||||
):
|
||||
"""Analyze and import an audio file, and put all extracted metadata into queue.
|
||||
|
||||
Keyword arguments:
|
||||
|
@ -50,14 +58,29 @@ class AnalyzerPipeline:
|
|||
if not isinstance(queue, Queue):
|
||||
raise TypeError("queue must be a Queue.Queue()")
|
||||
if not isinstance(audio_file_path, str):
|
||||
raise TypeError("audio_file_path must be unicode. Was of type " + type(audio_file_path).__name__ + " instead.")
|
||||
raise TypeError(
|
||||
"audio_file_path must be unicode. Was of type "
|
||||
+ type(audio_file_path).__name__
|
||||
+ " instead."
|
||||
)
|
||||
if not isinstance(import_directory, str):
|
||||
raise TypeError("import_directory must be unicode. Was of type " + type(import_directory).__name__ + " instead.")
|
||||
raise TypeError(
|
||||
"import_directory must be unicode. Was of type "
|
||||
+ type(import_directory).__name__
|
||||
+ " instead."
|
||||
)
|
||||
if not isinstance(original_filename, str):
|
||||
raise TypeError("original_filename must be unicode. Was of type " + type(original_filename).__name__ + " instead.")
|
||||
raise TypeError(
|
||||
"original_filename must be unicode. Was of type "
|
||||
+ type(original_filename).__name__
|
||||
+ " instead."
|
||||
)
|
||||
if not isinstance(file_prefix, str):
|
||||
raise TypeError("file_prefix must be unicode. Was of type " + type(file_prefix).__name__ + " instead.")
|
||||
|
||||
raise TypeError(
|
||||
"file_prefix must be unicode. Was of type "
|
||||
+ type(file_prefix).__name__
|
||||
+ " instead."
|
||||
)
|
||||
|
||||
# Analyze the audio file we were told to analyze:
|
||||
# First, we extract the ID3 tags and other metadata:
|
||||
|
@ -69,9 +92,11 @@ class AnalyzerPipeline:
|
|||
metadata = ReplayGainAnalyzer.analyze(audio_file_path, metadata)
|
||||
metadata = PlayabilityAnalyzer.analyze(audio_file_path, metadata)
|
||||
|
||||
metadata = FileMoverAnalyzer.move(audio_file_path, import_directory, original_filename, metadata)
|
||||
metadata = FileMoverAnalyzer.move(
|
||||
audio_file_path, import_directory, original_filename, metadata
|
||||
)
|
||||
|
||||
metadata["import_status"] = 0 # Successfully imported
|
||||
metadata["import_status"] = 0 # Successfully imported
|
||||
|
||||
# Note that the queue we're putting the results into is our interprocess communication
|
||||
# back to the main process.
|
||||
|
@ -93,9 +118,8 @@ class AnalyzerPipeline:
|
|||
def python_logger_deadlock_workaround():
|
||||
# Workaround for: http://bugs.python.org/issue6721#msg140215
|
||||
logger_names = list(logging.Logger.manager.loggerDict.keys())
|
||||
logger_names.append(None) # Root logger
|
||||
logger_names.append(None) # Root logger
|
||||
for name in logger_names:
|
||||
for handler in logging.getLogger(name).handlers:
|
||||
handler.createLock()
|
||||
logging._lock = threading.RLock()
|
||||
|
||||
|
|
|
@ -9,21 +9,32 @@ import os
|
|||
import airtime_analyzer.airtime_analyzer as aa
|
||||
|
||||
VERSION = "1.0"
|
||||
LIBRETIME_CONF_DIR = os.getenv('LIBRETIME_CONF_DIR', '/etc/airtime')
|
||||
DEFAULT_RMQ_CONFIG_PATH = os.path.join(LIBRETIME_CONF_DIR, 'airtime.conf')
|
||||
DEFAULT_HTTP_RETRY_PATH = '/tmp/airtime_analyzer_http_retries'
|
||||
LIBRETIME_CONF_DIR = os.getenv("LIBRETIME_CONF_DIR", "/etc/airtime")
|
||||
DEFAULT_RMQ_CONFIG_PATH = os.path.join(LIBRETIME_CONF_DIR, "airtime.conf")
|
||||
DEFAULT_HTTP_RETRY_PATH = "/tmp/airtime_analyzer_http_retries"
|
||||
|
||||
|
||||
def main():
|
||||
'''Entry-point for this application'''
|
||||
"""Entry-point for this application"""
|
||||
print("LibreTime Analyzer {}".format(VERSION))
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-d", "--daemon", help="run as a daemon", action="store_true")
|
||||
parser.add_argument("--debug", help="log full debugging output", action="store_true")
|
||||
parser.add_argument("--rmq-config-file", help="specify a configuration file with RabbitMQ settings (default is %s)" % DEFAULT_RMQ_CONFIG_PATH)
|
||||
parser.add_argument("--http-retry-queue-file", help="specify where incompleted HTTP requests will be serialized (default is %s)" % DEFAULT_HTTP_RETRY_PATH)
|
||||
parser.add_argument(
|
||||
"--debug", help="log full debugging output", action="store_true"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--rmq-config-file",
|
||||
help="specify a configuration file with RabbitMQ settings (default is %s)"
|
||||
% DEFAULT_RMQ_CONFIG_PATH,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--http-retry-queue-file",
|
||||
help="specify where incompleted HTTP requests will be serialized (default is %s)"
|
||||
% DEFAULT_HTTP_RETRY_PATH,
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
#Default config file path
|
||||
# Default config file path
|
||||
rmq_config_path = DEFAULT_RMQ_CONFIG_PATH
|
||||
http_retry_queue_path = DEFAULT_HTTP_RETRY_PATH
|
||||
if args.rmq_config_file:
|
||||
|
@ -33,14 +44,19 @@ def main():
|
|||
|
||||
if args.daemon:
|
||||
with daemon.DaemonContext():
|
||||
aa.AirtimeAnalyzerServer(rmq_config_path=rmq_config_path,
|
||||
http_retry_queue_path=http_retry_queue_path,
|
||||
debug=args.debug)
|
||||
aa.AirtimeAnalyzerServer(
|
||||
rmq_config_path=rmq_config_path,
|
||||
http_retry_queue_path=http_retry_queue_path,
|
||||
debug=args.debug,
|
||||
)
|
||||
else:
|
||||
# Run without daemonizing
|
||||
aa.AirtimeAnalyzerServer(rmq_config_path=rmq_config_path,
|
||||
http_retry_queue_path=http_retry_queue_path,
|
||||
debug=args.debug)
|
||||
aa.AirtimeAnalyzerServer(
|
||||
rmq_config_path=rmq_config_path,
|
||||
http_retry_queue_path=http_retry_queue_path,
|
||||
debug=args.debug,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
import configparser
|
||||
|
||||
|
||||
def read_config_file(config_path):
|
||||
"""Parse the application's config file located at config_path."""
|
||||
config = configparser.SafeConfigParser()
|
||||
|
|
|
@ -8,26 +8,38 @@ from .analyzer import Analyzer
|
|||
|
||||
|
||||
class CuePointAnalyzer(Analyzer):
|
||||
''' This class extracts the cue-in time, cue-out time, and length of a track using silan. '''
|
||||
"""This class extracts the cue-in time, cue-out time, and length of a track using silan."""
|
||||
|
||||
SILAN_EXECUTABLE = 'silan'
|
||||
SILAN_EXECUTABLE = "silan"
|
||||
|
||||
@staticmethod
|
||||
def analyze(filename, metadata):
|
||||
''' Extracts the cue-in and cue-out times along and sets the file duration based on that.
|
||||
"""Extracts the cue-in and cue-out times along and sets the file duration based on that.
|
||||
The cue points are there to skip the silence at the start and end of a track, and are determined
|
||||
using "silan", which analyzes the loudness in a track.
|
||||
:param filename: The full path to the file to analyzer
|
||||
:param metadata: A metadata dictionary where the results will be put
|
||||
:return: The metadata dictionary
|
||||
'''
|
||||
''' The silan -F 0.99 parameter tweaks the highpass filter. The default is 0.98, but at that setting,
|
||||
"""
|
||||
""" The silan -F 0.99 parameter tweaks the highpass filter. The default is 0.98, but at that setting,
|
||||
the unit test on the short m4a file fails. With the new setting, it gets the correct cue-in time and
|
||||
all the unit tests pass.
|
||||
'''
|
||||
command = [CuePointAnalyzer.SILAN_EXECUTABLE, '-b', '-F', '0.99', '-f', 'JSON', '-t', '1.0', filename]
|
||||
"""
|
||||
command = [
|
||||
CuePointAnalyzer.SILAN_EXECUTABLE,
|
||||
"-b",
|
||||
"-F",
|
||||
"0.99",
|
||||
"-f",
|
||||
"JSON",
|
||||
"-t",
|
||||
"1.0",
|
||||
filename,
|
||||
]
|
||||
try:
|
||||
results_json = subprocess.check_output(command, stderr=subprocess.STDOUT, close_fds=True)
|
||||
results_json = subprocess.check_output(
|
||||
command, stderr=subprocess.STDOUT, close_fds=True
|
||||
)
|
||||
try:
|
||||
results_json = results_json.decode()
|
||||
except (UnicodeDecodeError, AttributeError):
|
||||
|
@ -35,40 +47,51 @@ class CuePointAnalyzer(Analyzer):
|
|||
silan_results = json.loads(results_json)
|
||||
|
||||
# Defensive coding against Silan wildly miscalculating the cue in and out times:
|
||||
silan_length_seconds = float(silan_results['file duration'])
|
||||
silan_cuein = format(silan_results['sound'][0][0], 'f')
|
||||
silan_cueout = format(silan_results['sound'][0][1], 'f')
|
||||
silan_length_seconds = float(silan_results["file duration"])
|
||||
silan_cuein = format(silan_results["sound"][0][0], "f")
|
||||
silan_cueout = format(silan_results["sound"][0][1], "f")
|
||||
|
||||
# Sanity check the results against any existing metadata passed to us (presumably extracted by Mutagen):
|
||||
if 'length_seconds' in metadata:
|
||||
if "length_seconds" in metadata:
|
||||
# Silan has a rare bug where it can massively overestimate the length or cue out time sometimes.
|
||||
if (silan_length_seconds - metadata['length_seconds'] > 3) or (float(silan_cueout) - metadata['length_seconds'] > 2):
|
||||
if (silan_length_seconds - metadata["length_seconds"] > 3) or (
|
||||
float(silan_cueout) - metadata["length_seconds"] > 2
|
||||
):
|
||||
# Don't trust anything silan says then...
|
||||
raise Exception("Silan cue out {0} or length {1} differs too much from the Mutagen length {2}. Ignoring Silan values."
|
||||
.format(silan_cueout, silan_length_seconds, metadata['length_seconds']))
|
||||
raise Exception(
|
||||
"Silan cue out {0} or length {1} differs too much from the Mutagen length {2}. Ignoring Silan values.".format(
|
||||
silan_cueout,
|
||||
silan_length_seconds,
|
||||
metadata["length_seconds"],
|
||||
)
|
||||
)
|
||||
# Don't allow silan to trim more than the greater of 3 seconds or 5% off the start of a track
|
||||
if float(silan_cuein) > max(silan_length_seconds*0.05, 3):
|
||||
raise Exception("Silan cue in time {0} too big, ignoring.".format(silan_cuein))
|
||||
if float(silan_cuein) > max(silan_length_seconds * 0.05, 3):
|
||||
raise Exception(
|
||||
"Silan cue in time {0} too big, ignoring.".format(silan_cuein)
|
||||
)
|
||||
else:
|
||||
# Only use the Silan track length in the worst case, where Mutagen didn't give us one for some reason.
|
||||
# (This is mostly to make the unit tests still pass.)
|
||||
# Convert the length into a formatted time string.
|
||||
metadata['length_seconds'] = silan_length_seconds #
|
||||
track_length = datetime.timedelta(seconds=metadata['length_seconds'])
|
||||
metadata["length_seconds"] = silan_length_seconds #
|
||||
track_length = datetime.timedelta(seconds=metadata["length_seconds"])
|
||||
metadata["length"] = str(track_length)
|
||||
|
||||
|
||||
''' XXX: I've commented out the track_length stuff below because Mutagen seems more accurate than silan
|
||||
""" XXX: I've commented out the track_length stuff below because Mutagen seems more accurate than silan
|
||||
as of Mutagen version 1.31. We are always going to use Mutagen's length now because Silan's
|
||||
length can be off by a few seconds reasonably often.
|
||||
'''
|
||||
"""
|
||||
|
||||
metadata['cuein'] = silan_cuein
|
||||
metadata['cueout'] = silan_cueout
|
||||
metadata["cuein"] = silan_cuein
|
||||
metadata["cueout"] = silan_cueout
|
||||
|
||||
except OSError as e: # silan was not found
|
||||
logging.warn("Failed to run: %s - %s. %s" % (command[0], e.strerror, "Do you have silan installed?"))
|
||||
except subprocess.CalledProcessError as e: # silan returned an error code
|
||||
except OSError as e: # silan was not found
|
||||
logging.warn(
|
||||
"Failed to run: %s - %s. %s"
|
||||
% (command[0], e.strerror, "Do you have silan installed?")
|
||||
)
|
||||
except subprocess.CalledProcessError as e: # silan returned an error code
|
||||
logging.warn("%s %s %s", e.cmd, e.output, e.returncode)
|
||||
except Exception as e:
|
||||
logging.warn(e)
|
||||
|
|
|
@ -9,10 +9,12 @@ import uuid
|
|||
|
||||
from .analyzer import Analyzer
|
||||
|
||||
|
||||
class FileMoverAnalyzer(Analyzer):
|
||||
"""This analyzer copies a file over from a temporary directory (stor/organize)
|
||||
into the Airtime library (stor/imported).
|
||||
into the Airtime library (stor/imported).
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def analyze(audio_file_path, metadata):
|
||||
"""Dummy method because we need more info than analyze gets passed to it"""
|
||||
|
@ -21,27 +23,38 @@ class FileMoverAnalyzer(Analyzer):
|
|||
@staticmethod
|
||||
def move(audio_file_path, import_directory, original_filename, metadata):
|
||||
"""Move the file at audio_file_path over into the import_directory/import,
|
||||
renaming it to original_filename.
|
||||
renaming it to original_filename.
|
||||
|
||||
Keyword arguments:
|
||||
audio_file_path: Path to the file to be imported.
|
||||
import_directory: Path to the "import" directory inside the Airtime stor directory.
|
||||
(eg. /srv/airtime/stor/import)
|
||||
original_filename: The filename of the file when it was uploaded to Airtime.
|
||||
metadata: A dictionary where the "full_path" of where the file is moved to will be added.
|
||||
Keyword arguments:
|
||||
audio_file_path: Path to the file to be imported.
|
||||
import_directory: Path to the "import" directory inside the Airtime stor directory.
|
||||
(eg. /srv/airtime/stor/import)
|
||||
original_filename: The filename of the file when it was uploaded to Airtime.
|
||||
metadata: A dictionary where the "full_path" of where the file is moved to will be added.
|
||||
"""
|
||||
if not isinstance(audio_file_path, str):
|
||||
raise TypeError("audio_file_path must be string. Was of type " + type(audio_file_path).__name__)
|
||||
raise TypeError(
|
||||
"audio_file_path must be string. Was of type "
|
||||
+ type(audio_file_path).__name__
|
||||
)
|
||||
if not isinstance(import_directory, str):
|
||||
raise TypeError("import_directory must be string. Was of type " + type(import_directory).__name__)
|
||||
raise TypeError(
|
||||
"import_directory must be string. Was of type "
|
||||
+ type(import_directory).__name__
|
||||
)
|
||||
if not isinstance(original_filename, str):
|
||||
raise TypeError("original_filename must be string. Was of type " + type(original_filename).__name__)
|
||||
raise TypeError(
|
||||
"original_filename must be string. Was of type "
|
||||
+ type(original_filename).__name__
|
||||
)
|
||||
if not isinstance(metadata, dict):
|
||||
raise TypeError("metadata must be a dict. Was of type " + type(metadata).__name__)
|
||||
raise TypeError(
|
||||
"metadata must be a dict. Was of type " + type(metadata).__name__
|
||||
)
|
||||
if not os.path.exists(audio_file_path):
|
||||
raise FileNotFoundError("audio file not found: {}".format(audio_file_path))
|
||||
|
||||
#Import the file over to it's final location.
|
||||
# Import the file over to it's final location.
|
||||
# TODO: Also, handle the case where the move fails and write some code
|
||||
# to possibly move the file to problem_files.
|
||||
|
||||
|
@ -50,52 +63,65 @@ class FileMoverAnalyzer(Analyzer):
|
|||
final_file_path = import_directory
|
||||
orig_file_basename, orig_file_extension = os.path.splitext(original_filename)
|
||||
if "artist_name" in metadata:
|
||||
final_file_path += "/" + metadata["artist_name"][0:max_dir_len] # truncating with array slicing
|
||||
final_file_path += (
|
||||
"/" + metadata["artist_name"][0:max_dir_len]
|
||||
) # truncating with array slicing
|
||||
if "album_title" in metadata:
|
||||
final_file_path += "/" + metadata["album_title"][0:max_dir_len]
|
||||
# Note that orig_file_extension includes the "." already
|
||||
final_file_path += "/" + orig_file_basename[0:max_file_len] + orig_file_extension
|
||||
final_file_path += (
|
||||
"/" + orig_file_basename[0:max_file_len] + orig_file_extension
|
||||
)
|
||||
|
||||
#Ensure any redundant slashes are stripped
|
||||
# Ensure any redundant slashes are stripped
|
||||
final_file_path = os.path.normpath(final_file_path)
|
||||
|
||||
#If a file with the same name already exists in the "import" directory, then
|
||||
#we add a unique string to the end of this one. We never overwrite a file on import
|
||||
#because if we did that, it would mean Airtime's database would have
|
||||
#the wrong information for the file we just overwrote (eg. the song length would be wrong!)
|
||||
#If the final file path is the same as the file we've been told to import (which
|
||||
#you often do when you're debugging), then don't move the file at all.
|
||||
# If a file with the same name already exists in the "import" directory, then
|
||||
# we add a unique string to the end of this one. We never overwrite a file on import
|
||||
# because if we did that, it would mean Airtime's database would have
|
||||
# the wrong information for the file we just overwrote (eg. the song length would be wrong!)
|
||||
# If the final file path is the same as the file we've been told to import (which
|
||||
# you often do when you're debugging), then don't move the file at all.
|
||||
|
||||
if os.path.exists(final_file_path):
|
||||
if os.path.samefile(audio_file_path, final_file_path):
|
||||
metadata["full_path"] = final_file_path
|
||||
return metadata
|
||||
base_file_path, file_extension = os.path.splitext(final_file_path)
|
||||
final_file_path = "%s_%s%s" % (base_file_path, time.strftime("%m-%d-%Y-%H-%M-%S", time.localtime()), file_extension)
|
||||
final_file_path = "%s_%s%s" % (
|
||||
base_file_path,
|
||||
time.strftime("%m-%d-%Y-%H-%M-%S", time.localtime()),
|
||||
file_extension,
|
||||
)
|
||||
|
||||
#If THAT path exists, append a UUID instead:
|
||||
# If THAT path exists, append a UUID instead:
|
||||
while os.path.exists(final_file_path):
|
||||
base_file_path, file_extension = os.path.splitext(final_file_path)
|
||||
final_file_path = "%s_%s%s" % (base_file_path, str(uuid.uuid4()), file_extension)
|
||||
final_file_path = "%s_%s%s" % (
|
||||
base_file_path,
|
||||
str(uuid.uuid4()),
|
||||
file_extension,
|
||||
)
|
||||
|
||||
#Ensure the full path to the file exists
|
||||
# Ensure the full path to the file exists
|
||||
mkdir_p(os.path.dirname(final_file_path))
|
||||
|
||||
#Move the file into its final destination directory
|
||||
# Move the file into its final destination directory
|
||||
logging.debug("Moving %s to %s" % (audio_file_path, final_file_path))
|
||||
shutil.move(audio_file_path, final_file_path)
|
||||
|
||||
metadata["full_path"] = final_file_path
|
||||
return metadata
|
||||
|
||||
|
||||
def mkdir_p(path):
|
||||
""" Make all directories in a tree (like mkdir -p)"""
|
||||
"""Make all directories in a tree (like mkdir -p)"""
|
||||
if path == "":
|
||||
return
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as exc: # Python >2.5
|
||||
except OSError as exc: # Python >2.5
|
||||
if exc.errno == errno.EEXIST and os.path.isdir(path):
|
||||
pass
|
||||
else: raise
|
||||
|
||||
else:
|
||||
raise
|
||||
|
|
|
@ -5,8 +5,8 @@ import json
|
|||
import time
|
||||
import select
|
||||
import signal
|
||||
import logging
|
||||
import multiprocessing
|
||||
import logging
|
||||
import multiprocessing
|
||||
import queue
|
||||
from .analyzer_pipeline import AnalyzerPipeline
|
||||
from .status_reporter import StatusReporter
|
||||
|
@ -54,29 +54,30 @@ QUEUE = "airtime-uploads"
|
|||
So that is a quick overview of the design constraints for this application, and
|
||||
why airtime_analyzer is written this way.
|
||||
"""
|
||||
class MessageListener:
|
||||
|
||||
|
||||
class MessageListener:
|
||||
def __init__(self, rmq_config):
|
||||
''' Start listening for file upload notification messages
|
||||
from RabbitMQ
|
||||
|
||||
Keyword arguments:
|
||||
rmq_config: A ConfigParser object containing the [rabbitmq] configuration.
|
||||
'''
|
||||
|
||||
"""Start listening for file upload notification messages
|
||||
from RabbitMQ
|
||||
|
||||
Keyword arguments:
|
||||
rmq_config: A ConfigParser object containing the [rabbitmq] configuration.
|
||||
"""
|
||||
|
||||
self._shutdown = False
|
||||
|
||||
# Read the RabbitMQ connection settings from the rmq_config file
|
||||
# The exceptions throw here by default give good error messages.
|
||||
# The exceptions throw here by default give good error messages.
|
||||
RMQ_CONFIG_SECTION = "rabbitmq"
|
||||
self._host = rmq_config.get(RMQ_CONFIG_SECTION, 'host')
|
||||
self._port = rmq_config.getint(RMQ_CONFIG_SECTION, 'port')
|
||||
self._username = rmq_config.get(RMQ_CONFIG_SECTION, 'user')
|
||||
self._password = rmq_config.get(RMQ_CONFIG_SECTION, 'password')
|
||||
self._vhost = rmq_config.get(RMQ_CONFIG_SECTION, 'vhost')
|
||||
self._host = rmq_config.get(RMQ_CONFIG_SECTION, "host")
|
||||
self._port = rmq_config.getint(RMQ_CONFIG_SECTION, "port")
|
||||
self._username = rmq_config.get(RMQ_CONFIG_SECTION, "user")
|
||||
self._password = rmq_config.get(RMQ_CONFIG_SECTION, "password")
|
||||
self._vhost = rmq_config.get(RMQ_CONFIG_SECTION, "vhost")
|
||||
|
||||
# Set up a signal handler so we can shutdown gracefully
|
||||
# For some reason, this signal handler must be set up here. I'd rather
|
||||
# For some reason, this signal handler must be set up here. I'd rather
|
||||
# put it in AirtimeAnalyzerServer, but it doesn't work there (something to do
|
||||
# with pika's SIGTERM handler interfering with it, I think...)
|
||||
signal.signal(signal.SIGTERM, self.graceful_shutdown)
|
||||
|
@ -86,9 +87,9 @@ class MessageListener:
|
|||
self.connect_to_messaging_server()
|
||||
self.wait_for_messages()
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
break # Break out of the while loop and exit the application
|
||||
break # Break out of the while loop and exit the application
|
||||
except select.error:
|
||||
pass
|
||||
pass
|
||||
except pika.exceptions.AMQPError as e:
|
||||
if self._shutdown:
|
||||
break
|
||||
|
@ -100,27 +101,37 @@ class MessageListener:
|
|||
self.disconnect_from_messaging_server()
|
||||
logging.info("Exiting cleanly.")
|
||||
|
||||
|
||||
def connect_to_messaging_server(self):
|
||||
'''Connect to the RabbitMQ server and start listening for messages.'''
|
||||
self._connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._host,
|
||||
port=self._port, virtual_host=self._vhost,
|
||||
credentials=pika.credentials.PlainCredentials(self._username, self._password)))
|
||||
"""Connect to the RabbitMQ server and start listening for messages."""
|
||||
self._connection = pika.BlockingConnection(
|
||||
pika.ConnectionParameters(
|
||||
host=self._host,
|
||||
port=self._port,
|
||||
virtual_host=self._vhost,
|
||||
credentials=pika.credentials.PlainCredentials(
|
||||
self._username, self._password
|
||||
),
|
||||
)
|
||||
)
|
||||
self._channel = self._connection.channel()
|
||||
self._channel.exchange_declare(exchange=EXCHANGE, exchange_type=EXCHANGE_TYPE, durable=True)
|
||||
self._channel.exchange_declare(
|
||||
exchange=EXCHANGE, exchange_type=EXCHANGE_TYPE, durable=True
|
||||
)
|
||||
result = self._channel.queue_declare(queue=QUEUE, durable=True)
|
||||
|
||||
self._channel.queue_bind(exchange=EXCHANGE, queue=QUEUE, routing_key=ROUTING_KEY)
|
||||
|
||||
self._channel.queue_bind(
|
||||
exchange=EXCHANGE, queue=QUEUE, routing_key=ROUTING_KEY
|
||||
)
|
||||
|
||||
logging.info(" Listening for messages...")
|
||||
self._channel.basic_consume(QUEUE, self.msg_received_callback, auto_ack=False)
|
||||
|
||||
def wait_for_messages(self):
|
||||
'''Wait until we've received a RabbitMQ message.'''
|
||||
"""Wait until we've received a RabbitMQ message."""
|
||||
self._channel.start_consuming()
|
||||
|
||||
def disconnect_from_messaging_server(self):
|
||||
'''Stop consuming RabbitMQ messages and disconnect'''
|
||||
"""Stop consuming RabbitMQ messages and disconnect"""
|
||||
# If you try to close a connection that's already closed, you're going to have a bad time.
|
||||
# We're breaking EAFP because this can be called multiple times depending on exception
|
||||
# handling flow here.
|
||||
|
@ -128,43 +139,45 @@ class MessageListener:
|
|||
self._channel.stop_consuming()
|
||||
if not self._connection.is_closed and not self._connection.is_closing:
|
||||
self._connection.close()
|
||||
|
||||
|
||||
def graceful_shutdown(self, signum, frame):
|
||||
'''Disconnect and break out of the message listening loop'''
|
||||
"""Disconnect and break out of the message listening loop"""
|
||||
self._shutdown = True
|
||||
self.disconnect_from_messaging_server()
|
||||
|
||||
def msg_received_callback(self, channel, method_frame, header_frame, body):
|
||||
''' A callback method that runs when a RabbitMQ message is received.
|
||||
|
||||
Here we parse the message, spin up an analyzer process, and report the
|
||||
metadata back to the Airtime web application (or report an error).
|
||||
'''
|
||||
logging.info(" - Received '%s' on routing_key '%s'" % (body, method_frame.routing_key))
|
||||
|
||||
#Declare all variables here so they exist in the exception handlers below, no matter what.
|
||||
"""A callback method that runs when a RabbitMQ message is received.
|
||||
|
||||
Here we parse the message, spin up an analyzer process, and report the
|
||||
metadata back to the Airtime web application (or report an error).
|
||||
"""
|
||||
logging.info(
|
||||
" - Received '%s' on routing_key '%s'" % (body, method_frame.routing_key)
|
||||
)
|
||||
|
||||
# Declare all variables here so they exist in the exception handlers below, no matter what.
|
||||
audio_file_path = ""
|
||||
#final_file_path = ""
|
||||
# final_file_path = ""
|
||||
import_directory = ""
|
||||
original_filename = ""
|
||||
callback_url = ""
|
||||
api_key = ""
|
||||
callback_url = ""
|
||||
api_key = ""
|
||||
file_prefix = ""
|
||||
|
||||
''' Spin up a worker process. We use the multiprocessing module and multiprocessing.Queue
|
||||
""" Spin up a worker process. We use the multiprocessing module and multiprocessing.Queue
|
||||
to pass objects between the processes so that if the analyzer process crashes, it does not
|
||||
take down the rest of the daemon and we NACK that message so that it doesn't get
|
||||
propagated to other airtime_analyzer daemons (eg. running on other servers).
|
||||
We avoid cascading failure this way.
|
||||
'''
|
||||
"""
|
||||
try:
|
||||
try:
|
||||
body = body.decode()
|
||||
except (UnicodeDecodeError, AttributeError):
|
||||
pass
|
||||
msg_dict = json.loads(body)
|
||||
api_key = msg_dict["api_key"]
|
||||
callback_url = msg_dict["callback_url"]
|
||||
api_key = msg_dict["api_key"]
|
||||
callback_url = msg_dict["callback_url"]
|
||||
|
||||
audio_file_path = msg_dict["tmp_file_path"]
|
||||
import_directory = msg_dict["import_directory"]
|
||||
|
@ -172,48 +185,71 @@ class MessageListener:
|
|||
file_prefix = msg_dict["file_prefix"]
|
||||
storage_backend = msg_dict["storage_backend"]
|
||||
|
||||
audio_metadata = MessageListener.spawn_analyzer_process(audio_file_path, import_directory, original_filename, storage_backend, file_prefix)
|
||||
StatusReporter.report_success_to_callback_url(callback_url, api_key, audio_metadata)
|
||||
audio_metadata = MessageListener.spawn_analyzer_process(
|
||||
audio_file_path,
|
||||
import_directory,
|
||||
original_filename,
|
||||
storage_backend,
|
||||
file_prefix,
|
||||
)
|
||||
StatusReporter.report_success_to_callback_url(
|
||||
callback_url, api_key, audio_metadata
|
||||
)
|
||||
|
||||
except KeyError as e:
|
||||
# A field in msg_dict that we needed was missing (eg. audio_file_path)
|
||||
logging.exception("A mandatory airtime_analyzer message field was missing from the message.")
|
||||
logging.exception(
|
||||
"A mandatory airtime_analyzer message field was missing from the message."
|
||||
)
|
||||
# See the huge comment about NACK below.
|
||||
channel.basic_nack(delivery_tag=method_frame.delivery_tag, multiple=False,
|
||||
requeue=False) #Important that it doesn't requeue the message
|
||||
|
||||
channel.basic_nack(
|
||||
delivery_tag=method_frame.delivery_tag, multiple=False, requeue=False
|
||||
) # Important that it doesn't requeue the message
|
||||
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
''' If ANY exception happens while processing a file, we're going to NACK to the
|
||||
""" If ANY exception happens while processing a file, we're going to NACK to the
|
||||
messaging server and tell it to remove the message from the queue.
|
||||
(NACK is a negative acknowledgement. We could use ACK instead, but this might come
|
||||
in handy in the future.)
|
||||
Exceptions in this context are unexpected, unhandled errors. We try to recover
|
||||
from as many errors as possible in AnalyzerPipeline, but we're safeguarding ourselves
|
||||
here from any catastrophic or genuinely unexpected errors:
|
||||
'''
|
||||
channel.basic_nack(delivery_tag=method_frame.delivery_tag, multiple=False,
|
||||
requeue=False) #Important that it doesn't requeue the message
|
||||
"""
|
||||
channel.basic_nack(
|
||||
delivery_tag=method_frame.delivery_tag, multiple=False, requeue=False
|
||||
) # Important that it doesn't requeue the message
|
||||
|
||||
#
|
||||
# TODO: If the JSON was invalid or the web server is down,
|
||||
# TODO: If the JSON was invalid or the web server is down,
|
||||
# then don't report that failure to the REST API
|
||||
#TODO: Catch exceptions from this HTTP request too:
|
||||
if callback_url: # If we got an invalid message, there might be no callback_url in the JSON
|
||||
# TODO: Catch exceptions from this HTTP request too:
|
||||
if (
|
||||
callback_url
|
||||
): # If we got an invalid message, there might be no callback_url in the JSON
|
||||
# Report this as a failed upload to the File Upload REST API.
|
||||
StatusReporter.report_failure_to_callback_url(callback_url, api_key, import_status=2,
|
||||
reason='An error occurred while importing this file')
|
||||
|
||||
StatusReporter.report_failure_to_callback_url(
|
||||
callback_url,
|
||||
api_key,
|
||||
import_status=2,
|
||||
reason="An error occurred while importing this file",
|
||||
)
|
||||
|
||||
else:
|
||||
# ACK at the very end, after the message has been successfully processed.
|
||||
# If we don't ack, then RabbitMQ will redeliver the message in the future.
|
||||
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def spawn_analyzer_process(audio_file_path, import_directory, original_filename, storage_backend, file_prefix):
|
||||
''' Spawn a child process to analyze and import a new audio file. '''
|
||||
'''
|
||||
def spawn_analyzer_process(
|
||||
audio_file_path,
|
||||
import_directory,
|
||||
original_filename,
|
||||
storage_backend,
|
||||
file_prefix,
|
||||
):
|
||||
"""Spawn a child process to analyze and import a new audio file."""
|
||||
"""
|
||||
q = multiprocessing.Queue()
|
||||
p = multiprocessing.Process(target=AnalyzerPipeline.run_analysis,
|
||||
args=(q, audio_file_path, import_directory, original_filename, storage_backend, file_prefix))
|
||||
|
@ -225,12 +261,19 @@ class MessageListener:
|
|||
logging.info(results)
|
||||
else:
|
||||
raise Exception("Analyzer process terminated unexpectedly.")
|
||||
'''
|
||||
"""
|
||||
metadata = {}
|
||||
|
||||
q = queue.Queue()
|
||||
try:
|
||||
AnalyzerPipeline.run_analysis(q, audio_file_path, import_directory, original_filename, storage_backend, file_prefix)
|
||||
AnalyzerPipeline.run_analysis(
|
||||
q,
|
||||
audio_file_path,
|
||||
import_directory,
|
||||
original_filename,
|
||||
storage_backend,
|
||||
file_prefix,
|
||||
)
|
||||
metadata = q.get()
|
||||
except Exception as e:
|
||||
logging.error("Analyzer pipeline exception: %s" % str(e))
|
||||
|
@ -241,4 +284,3 @@ class MessageListener:
|
|||
q.get()
|
||||
|
||||
return metadata
|
||||
|
||||
|
|
|
@ -9,32 +9,36 @@ import os
|
|||
import hashlib
|
||||
from .analyzer import Analyzer
|
||||
|
||||
class MetadataAnalyzer(Analyzer):
|
||||
|
||||
class MetadataAnalyzer(Analyzer):
|
||||
@staticmethod
|
||||
def analyze(filename, metadata):
|
||||
''' Extract audio metadata from tags embedded in the file (eg. ID3 tags)
|
||||
"""Extract audio metadata from tags embedded in the file (eg. ID3 tags)
|
||||
|
||||
Keyword arguments:
|
||||
filename: The path to the audio file to extract metadata from.
|
||||
metadata: A dictionary that the extracted metadata will be added to.
|
||||
'''
|
||||
Keyword arguments:
|
||||
filename: The path to the audio file to extract metadata from.
|
||||
metadata: A dictionary that the extracted metadata will be added to.
|
||||
"""
|
||||
if not isinstance(filename, str):
|
||||
raise TypeError("filename must be string. Was of type " + type(filename).__name__)
|
||||
raise TypeError(
|
||||
"filename must be string. Was of type " + type(filename).__name__
|
||||
)
|
||||
if not isinstance(metadata, dict):
|
||||
raise TypeError("metadata must be a dict. Was of type " + type(metadata).__name__)
|
||||
raise TypeError(
|
||||
"metadata must be a dict. Was of type " + type(metadata).__name__
|
||||
)
|
||||
if not os.path.exists(filename):
|
||||
raise FileNotFoundError("audio file not found: {}".format(filename))
|
||||
|
||||
#Airtime <= 2.5.x nonsense:
|
||||
# Airtime <= 2.5.x nonsense:
|
||||
metadata["ftype"] = "audioclip"
|
||||
#Other fields we'll want to set for Airtime:
|
||||
# Other fields we'll want to set for Airtime:
|
||||
metadata["hidden"] = False
|
||||
|
||||
# Get file size and md5 hash of the file
|
||||
metadata["filesize"] = os.path.getsize(filename)
|
||||
|
||||
with open(filename, 'rb') as fh:
|
||||
with open(filename, "rb") as fh:
|
||||
m = hashlib.md5()
|
||||
while True:
|
||||
data = fh.read(8192)
|
||||
|
@ -46,37 +50,41 @@ class MetadataAnalyzer(Analyzer):
|
|||
# Mutagen doesn't handle WAVE files so we use a different package
|
||||
ms = magic.open(magic.MIME_TYPE)
|
||||
ms.load()
|
||||
with open(filename, 'rb') as fh:
|
||||
with open(filename, "rb") as fh:
|
||||
mime_check = ms.buffer(fh.read(2014))
|
||||
metadata["mime"] = mime_check
|
||||
if mime_check == 'audio/x-wav':
|
||||
if mime_check == "audio/x-wav":
|
||||
return MetadataAnalyzer._analyze_wave(filename, metadata)
|
||||
|
||||
#Extract metadata from an audio file using mutagen
|
||||
# Extract metadata from an audio file using mutagen
|
||||
audio_file = mutagen.File(filename, easy=True)
|
||||
|
||||
#Bail if the file couldn't be parsed. The title should stay as the filename
|
||||
#inside Airtime.
|
||||
if audio_file == None: # Don't use "if not" here. It is wrong due to mutagen's design.
|
||||
# Bail if the file couldn't be parsed. The title should stay as the filename
|
||||
# inside Airtime.
|
||||
if (
|
||||
audio_file == None
|
||||
): # Don't use "if not" here. It is wrong due to mutagen's design.
|
||||
return metadata
|
||||
# Note that audio_file can equal {} if the file is valid but there's no metadata tags.
|
||||
# We can still try to grab the info variables below.
|
||||
|
||||
#Grab other file information that isn't encoded in a tag, but instead usually
|
||||
#in the file header. Mutagen breaks that out into a separate "info" object:
|
||||
# Grab other file information that isn't encoded in a tag, but instead usually
|
||||
# in the file header. Mutagen breaks that out into a separate "info" object:
|
||||
info = audio_file.info
|
||||
if hasattr(info, "sample_rate"): # Mutagen is annoying and inconsistent
|
||||
if hasattr(info, "sample_rate"): # Mutagen is annoying and inconsistent
|
||||
metadata["sample_rate"] = info.sample_rate
|
||||
if hasattr(info, "length"):
|
||||
metadata["length_seconds"] = info.length
|
||||
#Converting the length in seconds (float) to a formatted time string
|
||||
# Converting the length in seconds (float) to a formatted time string
|
||||
track_length = datetime.timedelta(seconds=info.length)
|
||||
metadata["length"] = str(track_length) #time.strftime("%H:%M:%S.%f", track_length)
|
||||
metadata["length"] = str(
|
||||
track_length
|
||||
) # time.strftime("%H:%M:%S.%f", track_length)
|
||||
# Other fields for Airtime
|
||||
metadata["cueout"] = metadata["length"]
|
||||
|
||||
# Set a default cue in time in seconds
|
||||
metadata["cuein"] = 0.0;
|
||||
metadata["cuein"] = 0.0
|
||||
|
||||
if hasattr(info, "bitrate"):
|
||||
metadata["bit_rate"] = info.bitrate
|
||||
|
@ -86,11 +94,11 @@ class MetadataAnalyzer(Analyzer):
|
|||
if audio_file.mime:
|
||||
metadata["mime"] = audio_file.mime[0]
|
||||
|
||||
#Try to get the number of channels if mutagen can...
|
||||
# Try to get the number of channels if mutagen can...
|
||||
try:
|
||||
#Special handling for getting the # of channels from MP3s. It's in the "mode" field
|
||||
#which is 0=Stereo, 1=Joint Stereo, 2=Dual Channel, 3=Mono. Part of the ID3 spec...
|
||||
if metadata["mime"] in ["audio/mpeg", 'audio/mp3']:
|
||||
# Special handling for getting the # of channels from MP3s. It's in the "mode" field
|
||||
# which is 0=Stereo, 1=Joint Stereo, 2=Dual Channel, 3=Mono. Part of the ID3 spec...
|
||||
if metadata["mime"] in ["audio/mpeg", "audio/mp3"]:
|
||||
if info.mode == 3:
|
||||
metadata["channels"] = 1
|
||||
else:
|
||||
|
@ -98,54 +106,54 @@ class MetadataAnalyzer(Analyzer):
|
|||
else:
|
||||
metadata["channels"] = info.channels
|
||||
except (AttributeError, KeyError):
|
||||
#If mutagen can't figure out the number of channels, we'll just leave it out...
|
||||
# If mutagen can't figure out the number of channels, we'll just leave it out...
|
||||
pass
|
||||
|
||||
#Try to extract the number of tracks on the album if we can (the "track total")
|
||||
# Try to extract the number of tracks on the album if we can (the "track total")
|
||||
try:
|
||||
track_number = audio_file["tracknumber"]
|
||||
if isinstance(track_number, list): # Sometimes tracknumber is a list, ugh
|
||||
if isinstance(track_number, list): # Sometimes tracknumber is a list, ugh
|
||||
track_number = track_number[0]
|
||||
track_number_tokens = track_number
|
||||
if '/' in track_number:
|
||||
track_number_tokens = track_number.split('/')
|
||||
if "/" in track_number:
|
||||
track_number_tokens = track_number.split("/")
|
||||
track_number = track_number_tokens[0]
|
||||
elif '-' in track_number:
|
||||
track_number_tokens = track_number.split('-')
|
||||
elif "-" in track_number:
|
||||
track_number_tokens = track_number.split("-")
|
||||
track_number = track_number_tokens[0]
|
||||
metadata["track_number"] = track_number
|
||||
track_total = track_number_tokens[1]
|
||||
metadata["track_total"] = track_total
|
||||
except (AttributeError, KeyError, IndexError):
|
||||
#If we couldn't figure out the track_number or track_total, just ignore it...
|
||||
# If we couldn't figure out the track_number or track_total, just ignore it...
|
||||
pass
|
||||
|
||||
#We normalize the mutagen tags slightly here, so in case mutagen changes,
|
||||
#we find the
|
||||
# We normalize the mutagen tags slightly here, so in case mutagen changes,
|
||||
# we find the
|
||||
mutagen_to_airtime_mapping = {
|
||||
'title': 'track_title',
|
||||
'artist': 'artist_name',
|
||||
'album': 'album_title',
|
||||
'bpm': 'bpm',
|
||||
'composer': 'composer',
|
||||
'conductor': 'conductor',
|
||||
'copyright': 'copyright',
|
||||
'comment': 'comment',
|
||||
'encoded_by': 'encoder',
|
||||
'genre': 'genre',
|
||||
'isrc': 'isrc',
|
||||
'label': 'label',
|
||||
'organization': 'label',
|
||||
"title": "track_title",
|
||||
"artist": "artist_name",
|
||||
"album": "album_title",
|
||||
"bpm": "bpm",
|
||||
"composer": "composer",
|
||||
"conductor": "conductor",
|
||||
"copyright": "copyright",
|
||||
"comment": "comment",
|
||||
"encoded_by": "encoder",
|
||||
"genre": "genre",
|
||||
"isrc": "isrc",
|
||||
"label": "label",
|
||||
"organization": "label",
|
||||
#'length': 'length',
|
||||
'language': 'language',
|
||||
'last_modified':'last_modified',
|
||||
'mood': 'mood',
|
||||
'bit_rate': 'bit_rate',
|
||||
'replay_gain': 'replaygain',
|
||||
"language": "language",
|
||||
"last_modified": "last_modified",
|
||||
"mood": "mood",
|
||||
"bit_rate": "bit_rate",
|
||||
"replay_gain": "replaygain",
|
||||
#'tracknumber': 'track_number',
|
||||
#'track_total': 'track_total',
|
||||
'website': 'website',
|
||||
'date': 'year',
|
||||
"website": "website",
|
||||
"date": "year",
|
||||
#'mime_type': 'mime',
|
||||
}
|
||||
|
||||
|
@ -158,7 +166,7 @@ class MetadataAnalyzer(Analyzer):
|
|||
if isinstance(metadata[airtime_tag], list):
|
||||
if metadata[airtime_tag]:
|
||||
metadata[airtime_tag] = metadata[airtime_tag][0]
|
||||
else: # Handle empty lists
|
||||
else: # Handle empty lists
|
||||
metadata[airtime_tag] = ""
|
||||
|
||||
except KeyError:
|
||||
|
@ -169,13 +177,15 @@ class MetadataAnalyzer(Analyzer):
|
|||
@staticmethod
|
||||
def _analyze_wave(filename, metadata):
|
||||
try:
|
||||
reader = wave.open(filename, 'rb')
|
||||
reader = wave.open(filename, "rb")
|
||||
metadata["channels"] = reader.getnchannels()
|
||||
metadata["sample_rate"] = reader.getframerate()
|
||||
length_seconds = float(reader.getnframes()) / float(metadata["sample_rate"])
|
||||
#Converting the length in seconds (float) to a formatted time string
|
||||
# Converting the length in seconds (float) to a formatted time string
|
||||
track_length = datetime.timedelta(seconds=length_seconds)
|
||||
metadata["length"] = str(track_length) #time.strftime("%H:%M:%S.%f", track_length)
|
||||
metadata["length"] = str(
|
||||
track_length
|
||||
) # time.strftime("%H:%M:%S.%f", track_length)
|
||||
metadata["length_seconds"] = length_seconds
|
||||
metadata["cueout"] = metadata["length"]
|
||||
except wave.Error as ex:
|
||||
|
|
|
@ -1,32 +1,47 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
__author__ = 'asantoni'
|
||||
__author__ = "asantoni"
|
||||
|
||||
import subprocess
|
||||
import logging
|
||||
from .analyzer import Analyzer
|
||||
|
||||
|
||||
class UnplayableFileError(Exception):
|
||||
pass
|
||||
|
||||
class PlayabilityAnalyzer(Analyzer):
|
||||
''' This class checks if a file can actually be played with Liquidsoap. '''
|
||||
|
||||
LIQUIDSOAP_EXECUTABLE = 'liquidsoap'
|
||||
class PlayabilityAnalyzer(Analyzer):
|
||||
"""This class checks if a file can actually be played with Liquidsoap."""
|
||||
|
||||
LIQUIDSOAP_EXECUTABLE = "liquidsoap"
|
||||
|
||||
@staticmethod
|
||||
def analyze(filename, metadata):
|
||||
''' Checks if a file can be played by Liquidsoap.
|
||||
"""Checks if a file can be played by Liquidsoap.
|
||||
:param filename: The full path to the file to analyzer
|
||||
:param metadata: A metadata dictionary where the results will be put
|
||||
:return: The metadata dictionary
|
||||
'''
|
||||
command = [PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE, '-v', '-c', "output.dummy(audio_to_stereo(single(argv(1))))", '--', filename]
|
||||
"""
|
||||
command = [
|
||||
PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE,
|
||||
"-v",
|
||||
"-c",
|
||||
"output.dummy(audio_to_stereo(single(argv(1))))",
|
||||
"--",
|
||||
filename,
|
||||
]
|
||||
try:
|
||||
subprocess.check_output(command, stderr=subprocess.STDOUT, close_fds=True)
|
||||
|
||||
except OSError as e: # liquidsoap was not found
|
||||
logging.warn("Failed to run: %s - %s. %s" % (command[0], e.strerror, "Do you have liquidsoap installed?"))
|
||||
except (subprocess.CalledProcessError, Exception) as e: # liquidsoap returned an error code
|
||||
except OSError as e: # liquidsoap was not found
|
||||
logging.warn(
|
||||
"Failed to run: %s - %s. %s"
|
||||
% (command[0], e.strerror, "Do you have liquidsoap installed?")
|
||||
)
|
||||
except (
|
||||
subprocess.CalledProcessError,
|
||||
Exception,
|
||||
) as e: # liquidsoap returned an error code
|
||||
logging.warn(e)
|
||||
raise UnplayableFileError()
|
||||
|
||||
|
|
|
@ -6,30 +6,39 @@ import re
|
|||
|
||||
|
||||
class ReplayGainAnalyzer(Analyzer):
|
||||
''' This class extracts the ReplayGain using a tool from the python-rgain package. '''
|
||||
"""This class extracts the ReplayGain using a tool from the python-rgain package."""
|
||||
|
||||
REPLAYGAIN_EXECUTABLE = 'replaygain' # From the rgain3 python package
|
||||
REPLAYGAIN_EXECUTABLE = "replaygain" # From the rgain3 python package
|
||||
|
||||
@staticmethod
|
||||
def analyze(filename, metadata):
|
||||
''' Extracts the Replaygain loudness normalization factor of a track.
|
||||
"""Extracts the Replaygain loudness normalization factor of a track.
|
||||
:param filename: The full path to the file to analyzer
|
||||
:param metadata: A metadata dictionary where the results will be put
|
||||
:return: The metadata dictionary
|
||||
'''
|
||||
''' The -d flag means do a dry-run, ie. don't modify the file directly.
|
||||
'''
|
||||
command = [ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE, '-d', filename]
|
||||
"""
|
||||
""" The -d flag means do a dry-run, ie. don't modify the file directly.
|
||||
"""
|
||||
command = [ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE, "-d", filename]
|
||||
try:
|
||||
results = subprocess.check_output(command, stderr=subprocess.STDOUT,
|
||||
close_fds=True, universal_newlines=True)
|
||||
gain_match = r'Calculating Replay Gain information \.\.\.(?:\n|.)*?:([\d.-]*) dB'
|
||||
results = subprocess.check_output(
|
||||
command,
|
||||
stderr=subprocess.STDOUT,
|
||||
close_fds=True,
|
||||
universal_newlines=True,
|
||||
)
|
||||
gain_match = (
|
||||
r"Calculating Replay Gain information \.\.\.(?:\n|.)*?:([\d.-]*) dB"
|
||||
)
|
||||
replaygain = re.search(gain_match, results).group(1)
|
||||
metadata['replay_gain'] = float(replaygain)
|
||||
metadata["replay_gain"] = float(replaygain)
|
||||
|
||||
except OSError as e: # replaygain was not found
|
||||
logging.warn("Failed to run: %s - %s. %s" % (command[0], e.strerror, "Do you have python-rgain installed?"))
|
||||
except subprocess.CalledProcessError as e: # replaygain returned an error code
|
||||
except OSError as e: # replaygain was not found
|
||||
logging.warn(
|
||||
"Failed to run: %s - %s. %s"
|
||||
% (command[0], e.strerror, "Do you have python-rgain installed?")
|
||||
)
|
||||
except subprocess.CalledProcessError as e: # replaygain returned an error code
|
||||
logging.warn("%s %s %s", e.cmd, e.output, e.returncode)
|
||||
except Exception as e:
|
||||
logging.warn(e)
|
||||
|
|
|
@ -7,14 +7,15 @@ import queue
|
|||
import time
|
||||
import traceback
|
||||
import pickle
|
||||
import threading
|
||||
import threading
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# Disable urllib3 warnings because these can cause a rare deadlock due to Python 2's crappy internal non-reentrant locking
|
||||
# around POSIX stuff. See SAAS-714. The hasattr() is for compatibility with older versions of requests.
|
||||
if hasattr(requests, 'packages'):
|
||||
if hasattr(requests, "packages"):
|
||||
requests.packages.urllib3.disable_warnings()
|
||||
|
||||
|
||||
class PicklableHttpRequest:
|
||||
def __init__(self, method, url, data, api_key):
|
||||
self.method = method
|
||||
|
@ -23,18 +24,23 @@ class PicklableHttpRequest:
|
|||
self.api_key = api_key
|
||||
|
||||
def create_request(self):
|
||||
return requests.Request(method=self.method, url=self.url, data=self.data,
|
||||
auth=requests.auth.HTTPBasicAuth(self.api_key, ''))
|
||||
return requests.Request(
|
||||
method=self.method,
|
||||
url=self.url,
|
||||
data=self.data,
|
||||
auth=requests.auth.HTTPBasicAuth(self.api_key, ""),
|
||||
)
|
||||
|
||||
|
||||
def process_http_requests(ipc_queue, http_retry_queue_path):
|
||||
''' Runs in a separate thread and performs all the HTTP requests where we're
|
||||
reporting extracted audio file metadata or errors back to the Airtime web application.
|
||||
"""Runs in a separate thread and performs all the HTTP requests where we're
|
||||
reporting extracted audio file metadata or errors back to the Airtime web application.
|
||||
|
||||
This process also checks every 5 seconds if there's failed HTTP requests that we
|
||||
need to retry. We retry failed HTTP requests so that we don't lose uploads if the
|
||||
web server is temporarily down.
|
||||
This process also checks every 5 seconds if there's failed HTTP requests that we
|
||||
need to retry. We retry failed HTTP requests so that we don't lose uploads if the
|
||||
web server is temporarily down.
|
||||
|
||||
'''
|
||||
"""
|
||||
|
||||
# Store any failed requests (eg. due to web server errors or downtime) to be
|
||||
# retried later:
|
||||
|
@ -45,7 +51,7 @@ def process_http_requests(ipc_queue, http_retry_queue_path):
|
|||
# if airtime_analyzer is shut down while the web server is down or unreachable,
|
||||
# and there were failed HTTP requests pending, waiting to be retried.
|
||||
try:
|
||||
with open(http_retry_queue_path, 'rb') as pickle_file:
|
||||
with open(http_retry_queue_path, "rb") as pickle_file:
|
||||
retry_queue = pickle.load(pickle_file)
|
||||
except IOError as e:
|
||||
if e.errno == 2:
|
||||
|
@ -64,11 +70,16 @@ def process_http_requests(ipc_queue, http_retry_queue_path):
|
|||
while not shutdown:
|
||||
try:
|
||||
request = ipc_queue.get(block=True, timeout=5)
|
||||
if isinstance(request, str) and request == "shutdown": # Bit of a cheat
|
||||
if (
|
||||
isinstance(request, str) and request == "shutdown"
|
||||
): # Bit of a cheat
|
||||
shutdown = True
|
||||
break
|
||||
if not isinstance(request, PicklableHttpRequest):
|
||||
raise TypeError("request must be a PicklableHttpRequest. Was of type " + type(request).__name__)
|
||||
raise TypeError(
|
||||
"request must be a PicklableHttpRequest. Was of type "
|
||||
+ type(request).__name__
|
||||
)
|
||||
except queue.Empty:
|
||||
request = None
|
||||
|
||||
|
@ -85,32 +96,40 @@ def process_http_requests(ipc_queue, http_retry_queue_path):
|
|||
logging.info("Shutting down status_reporter")
|
||||
# Pickle retry_queue to disk so that we don't lose uploads if we're shut down while
|
||||
# while the web server is down or unreachable.
|
||||
with open(http_retry_queue_path, 'wb') as pickle_file:
|
||||
with open(http_retry_queue_path, "wb") as pickle_file:
|
||||
pickle.dump(retry_queue, pickle_file)
|
||||
return
|
||||
except Exception as e: # Terrible top-level exception handler to prevent the thread from dying, just in case.
|
||||
except Exception as e: # Terrible top-level exception handler to prevent the thread from dying, just in case.
|
||||
if shutdown:
|
||||
return
|
||||
logging.exception("Unhandled exception in StatusReporter")
|
||||
logging.exception(e)
|
||||
logging.info("Restarting StatusReporter thread")
|
||||
time.sleep(2) # Throttle it
|
||||
time.sleep(2) # Throttle it
|
||||
|
||||
|
||||
def send_http_request(picklable_request, retry_queue):
|
||||
if not isinstance(picklable_request, PicklableHttpRequest):
|
||||
raise TypeError("picklable_request must be a PicklableHttpRequest. Was of type " + type(picklable_request).__name__)
|
||||
try:
|
||||
raise TypeError(
|
||||
"picklable_request must be a PicklableHttpRequest. Was of type "
|
||||
+ type(picklable_request).__name__
|
||||
)
|
||||
try:
|
||||
bare_request = picklable_request.create_request()
|
||||
s = requests.Session()
|
||||
prepared_request = s.prepare_request(bare_request)
|
||||
r = s.send(prepared_request, timeout=StatusReporter._HTTP_REQUEST_TIMEOUT, verify=False) # SNI is a pain in the ass
|
||||
r.raise_for_status() # Raise an exception if there was an http error code returned
|
||||
r = s.send(
|
||||
prepared_request, timeout=StatusReporter._HTTP_REQUEST_TIMEOUT, verify=False
|
||||
) # SNI is a pain in the ass
|
||||
r.raise_for_status() # Raise an exception if there was an http error code returned
|
||||
logging.info("HTTP request sent successfully.")
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code == 422:
|
||||
# Do no retry the request if there was a metadata validation error
|
||||
logging.error("HTTP request failed due to an HTTP exception. Exception was: %s" % str(e))
|
||||
logging.error(
|
||||
"HTTP request failed due to an HTTP exception. Exception was: %s"
|
||||
% str(e)
|
||||
)
|
||||
else:
|
||||
# The request failed with an error 500 probably, so let's check if Airtime and/or
|
||||
# the web server are broken. If not, then our request was probably causing an
|
||||
|
@ -124,8 +143,10 @@ def send_http_request(picklable_request, retry_queue):
|
|||
# You will have to find these bad requests in logs or you'll be
|
||||
# notified by sentry.
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
logging.error("HTTP request failed due to a connection error. Retrying later. %s" % str(e))
|
||||
retry_queue.append(picklable_request) # Retry it later
|
||||
logging.error(
|
||||
"HTTP request failed due to a connection error. Retrying later. %s" % str(e)
|
||||
)
|
||||
retry_queue.append(picklable_request) # Retry it later
|
||||
except Exception as e:
|
||||
logging.error("HTTP request failed with unhandled exception. %s" % str(e))
|
||||
logging.error(traceback.format_exc())
|
||||
|
@ -134,12 +155,13 @@ def send_http_request(picklable_request, retry_queue):
|
|||
# that breaks our code. I don't want us pickling data that potentially
|
||||
# breaks airtime_analyzer.
|
||||
|
||||
|
||||
def is_web_server_broken(url):
|
||||
''' Do a naive test to check if the web server we're trying to access is down.
|
||||
We use this to try to differentiate between error 500s that are coming
|
||||
from (for example) a bug in the Airtime Media REST API and error 500s
|
||||
caused by Airtime or the webserver itself being broken temporarily.
|
||||
'''
|
||||
"""Do a naive test to check if the web server we're trying to access is down.
|
||||
We use this to try to differentiate between error 500s that are coming
|
||||
from (for example) a bug in the Airtime Media REST API and error 500s
|
||||
caused by Airtime or the webserver itself being broken temporarily.
|
||||
"""
|
||||
try:
|
||||
test_req = requests.get(url, verify=False)
|
||||
test_req.raise_for_status()
|
||||
|
@ -147,35 +169,38 @@ def is_web_server_broken(url):
|
|||
return True
|
||||
else:
|
||||
# The request worked fine, so the web server and Airtime are still up.
|
||||
return False
|
||||
return False
|
||||
return False
|
||||
|
||||
|
||||
class StatusReporter():
|
||||
''' Reports the extracted audio file metadata and job status back to the
|
||||
Airtime web application.
|
||||
'''
|
||||
class StatusReporter:
|
||||
"""Reports the extracted audio file metadata and job status back to the
|
||||
Airtime web application.
|
||||
"""
|
||||
|
||||
_HTTP_REQUEST_TIMEOUT = 30
|
||||
|
||||
''' We use multiprocessing.Process again here because we need a thread for this stuff
|
||||
|
||||
""" We use multiprocessing.Process again here because we need a thread for this stuff
|
||||
anyways, and Python gives us process isolation for free (crash safety).
|
||||
'''
|
||||
"""
|
||||
_ipc_queue = queue.Queue()
|
||||
#_http_thread = multiprocessing.Process(target=process_http_requests,
|
||||
# _http_thread = multiprocessing.Process(target=process_http_requests,
|
||||
# args=(_ipc_queue,))
|
||||
_http_thread = None
|
||||
|
||||
@classmethod
|
||||
def start_thread(self, http_retry_queue_path):
|
||||
StatusReporter._http_thread = threading.Thread(target=process_http_requests,
|
||||
args=(StatusReporter._ipc_queue,http_retry_queue_path))
|
||||
StatusReporter._http_thread = threading.Thread(
|
||||
target=process_http_requests,
|
||||
args=(StatusReporter._ipc_queue, http_retry_queue_path),
|
||||
)
|
||||
StatusReporter._http_thread.start()
|
||||
|
||||
@classmethod
|
||||
def stop_thread(self):
|
||||
logging.info("Terminating status_reporter process")
|
||||
#StatusReporter._http_thread.terminate() # Triggers SIGTERM on the child process
|
||||
StatusReporter._ipc_queue.put("shutdown") # Special trigger
|
||||
# StatusReporter._http_thread.terminate() # Triggers SIGTERM on the child process
|
||||
StatusReporter._ipc_queue.put("shutdown") # Special trigger
|
||||
StatusReporter._http_thread.join()
|
||||
|
||||
@classmethod
|
||||
|
@ -184,30 +209,33 @@ class StatusReporter():
|
|||
|
||||
@classmethod
|
||||
def report_success_to_callback_url(self, callback_url, api_key, audio_metadata):
|
||||
''' Report the extracted metadata and status of the successfully imported file
|
||||
to the callback URL (which should be the Airtime File Upload API)
|
||||
'''
|
||||
"""Report the extracted metadata and status of the successfully imported file
|
||||
to the callback URL (which should be the Airtime File Upload API)
|
||||
"""
|
||||
put_payload = json.dumps(audio_metadata)
|
||||
#r = requests.Request(method='PUT', url=callback_url, data=put_payload,
|
||||
# r = requests.Request(method='PUT', url=callback_url, data=put_payload,
|
||||
# auth=requests.auth.HTTPBasicAuth(api_key, ''))
|
||||
'''
|
||||
"""
|
||||
r = requests.Request(method='PUT', url=callback_url, data=put_payload,
|
||||
auth=requests.auth.HTTPBasicAuth(api_key, ''))
|
||||
|
||||
StatusReporter._send_http_request(r)
|
||||
'''
|
||||
"""
|
||||
|
||||
StatusReporter._send_http_request(PicklableHttpRequest(method='PUT', url=callback_url,
|
||||
data=put_payload, api_key=api_key))
|
||||
StatusReporter._send_http_request(
|
||||
PicklableHttpRequest(
|
||||
method="PUT", url=callback_url, data=put_payload, api_key=api_key
|
||||
)
|
||||
)
|
||||
|
||||
'''
|
||||
"""
|
||||
try:
|
||||
r.raise_for_status() # Raise an exception if there was an http error code returned
|
||||
except requests.exceptions.RequestException:
|
||||
StatusReporter._ipc_queue.put(r.prepare())
|
||||
'''
|
||||
"""
|
||||
|
||||
'''
|
||||
"""
|
||||
# Encode the audio metadata as json and post it back to the callback_url
|
||||
put_payload = json.dumps(audio_metadata)
|
||||
logging.debug("sending http put with payload: " + put_payload)
|
||||
|
@ -219,31 +247,38 @@ class StatusReporter():
|
|||
|
||||
#TODO: queue up failed requests and try them again later.
|
||||
r.raise_for_status() # Raise an exception if there was an http error code returned
|
||||
'''
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def report_failure_to_callback_url(self, callback_url, api_key, import_status, reason):
|
||||
if not isinstance(import_status, int ):
|
||||
raise TypeError("import_status must be an integer. Was of type " + type(import_status).__name__)
|
||||
def report_failure_to_callback_url(
|
||||
self, callback_url, api_key, import_status, reason
|
||||
):
|
||||
if not isinstance(import_status, int):
|
||||
raise TypeError(
|
||||
"import_status must be an integer. Was of type "
|
||||
+ type(import_status).__name__
|
||||
)
|
||||
|
||||
logging.debug("Reporting import failure to Airtime REST API...")
|
||||
audio_metadata = dict()
|
||||
audio_metadata["import_status"] = import_status
|
||||
audio_metadata["comment"] = reason # hack attack
|
||||
put_payload = json.dumps(audio_metadata)
|
||||
#logging.debug("sending http put with payload: " + put_payload)
|
||||
'''
|
||||
# logging.debug("sending http put with payload: " + put_payload)
|
||||
"""
|
||||
r = requests.put(callback_url, data=put_payload,
|
||||
auth=requests.auth.HTTPBasicAuth(api_key, ''),
|
||||
timeout=StatusReporter._HTTP_REQUEST_TIMEOUT)
|
||||
'''
|
||||
StatusReporter._send_http_request(PicklableHttpRequest(method='PUT', url=callback_url,
|
||||
data=put_payload, api_key=api_key))
|
||||
'''
|
||||
"""
|
||||
StatusReporter._send_http_request(
|
||||
PicklableHttpRequest(
|
||||
method="PUT", url=callback_url, data=put_payload, api_key=api_key
|
||||
)
|
||||
)
|
||||
"""
|
||||
logging.debug("HTTP request returned status: " + str(r.status_code))
|
||||
logging.debug(r.text) # log the response body
|
||||
|
||||
#TODO: queue up failed requests and try them again later.
|
||||
r.raise_for_status() # raise an exception if there was an http error code returned
|
||||
'''
|
||||
|
||||
"""
|
||||
|
|
|
@ -2,12 +2,14 @@
|
|||
from nose.tools import *
|
||||
import airtime_analyzer
|
||||
|
||||
|
||||
def setup():
|
||||
pass
|
||||
|
||||
|
||||
def teardown():
|
||||
pass
|
||||
|
||||
|
||||
def test_basic():
|
||||
pass
|
||||
|
||||
|
|
|
@ -8,48 +8,58 @@ import datetime
|
|||
from airtime_analyzer.analyzer_pipeline import AnalyzerPipeline
|
||||
from airtime_analyzer import config_file
|
||||
|
||||
DEFAULT_AUDIO_FILE = u'tests/test_data/44100Hz-16bit-mono.mp3'
|
||||
DEFAULT_IMPORT_DEST = u'Test Artist/Test Album/44100Hz-16bit-mono.mp3'
|
||||
DEFAULT_AUDIO_FILE = u"tests/test_data/44100Hz-16bit-mono.mp3"
|
||||
DEFAULT_IMPORT_DEST = u"Test Artist/Test Album/44100Hz-16bit-mono.mp3"
|
||||
|
||||
|
||||
def setup():
|
||||
pass
|
||||
|
||||
|
||||
def teardown():
|
||||
#Move the file back
|
||||
# Move the file back
|
||||
shutil.move(DEFAULT_IMPORT_DEST, DEFAULT_AUDIO_FILE)
|
||||
assert os.path.exists(DEFAULT_AUDIO_FILE)
|
||||
|
||||
|
||||
def test_basic():
|
||||
filename = os.path.basename(DEFAULT_AUDIO_FILE)
|
||||
q = Queue()
|
||||
file_prefix = u''
|
||||
file_prefix = u""
|
||||
storage_backend = "file"
|
||||
#This actually imports the file into the "./Test Artist" directory.
|
||||
AnalyzerPipeline.run_analysis(q, DEFAULT_AUDIO_FILE, u'.', filename, storage_backend, file_prefix)
|
||||
# This actually imports the file into the "./Test Artist" directory.
|
||||
AnalyzerPipeline.run_analysis(
|
||||
q, DEFAULT_AUDIO_FILE, u".", filename, storage_backend, file_prefix
|
||||
)
|
||||
metadata = q.get()
|
||||
assert metadata['track_title'] == u'Test Title'
|
||||
assert metadata['artist_name'] == u'Test Artist'
|
||||
assert metadata['album_title'] == u'Test Album'
|
||||
assert metadata['year'] == u'1999'
|
||||
assert metadata['genre'] == u'Test Genre'
|
||||
assert metadata['mime'] == 'audio/mp3' # Not unicode because MIMEs aren't.
|
||||
assert abs(metadata['length_seconds'] - 3.9) < 0.1
|
||||
assert metadata["length"] == str(datetime.timedelta(seconds=metadata["length_seconds"]))
|
||||
assert metadata["track_title"] == u"Test Title"
|
||||
assert metadata["artist_name"] == u"Test Artist"
|
||||
assert metadata["album_title"] == u"Test Album"
|
||||
assert metadata["year"] == u"1999"
|
||||
assert metadata["genre"] == u"Test Genre"
|
||||
assert metadata["mime"] == "audio/mp3" # Not unicode because MIMEs aren't.
|
||||
assert abs(metadata["length_seconds"] - 3.9) < 0.1
|
||||
assert metadata["length"] == str(
|
||||
datetime.timedelta(seconds=metadata["length_seconds"])
|
||||
)
|
||||
assert os.path.exists(DEFAULT_IMPORT_DEST)
|
||||
|
||||
|
||||
@raises(TypeError)
|
||||
def test_wrong_type_queue_param():
|
||||
AnalyzerPipeline.run_analysis(Queue(), u'', u'', u'')
|
||||
AnalyzerPipeline.run_analysis(Queue(), u"", u"", u"")
|
||||
|
||||
|
||||
@raises(TypeError)
|
||||
def test_wrong_type_string_param2():
|
||||
AnalyzerPipeline.run_analysis(Queue(), '', u'', u'')
|
||||
AnalyzerPipeline.run_analysis(Queue(), "", u"", u"")
|
||||
|
||||
|
||||
@raises(TypeError)
|
||||
def test_wrong_type_string_param3():
|
||||
AnalyzerPipeline.run_analysis(Queue(), u'', '', u'')
|
||||
AnalyzerPipeline.run_analysis(Queue(), u"", "", u"")
|
||||
|
||||
|
||||
@raises(TypeError)
|
||||
def test_wrong_type_string_param4():
|
||||
AnalyzerPipeline.run_analysis(Queue(), u'', u'', '')
|
||||
|
||||
AnalyzerPipeline.run_analysis(Queue(), u"", u"", "")
|
||||
|
|
|
@ -2,13 +2,16 @@
|
|||
from nose.tools import *
|
||||
from airtime_analyzer.analyzer import Analyzer
|
||||
|
||||
|
||||
def setup():
|
||||
pass
|
||||
|
||||
|
||||
def teardown():
|
||||
pass
|
||||
|
||||
|
||||
@raises(NotImplementedError)
|
||||
def test_analyze():
|
||||
abstract_analyzer = Analyzer()
|
||||
abstract_analyzer.analyze(u'foo', dict())
|
||||
abstract_analyzer.analyze(u"foo", dict())
|
||||
|
|
|
@ -2,63 +2,97 @@
|
|||
from nose.tools import *
|
||||
from airtime_analyzer.cuepoint_analyzer import CuePointAnalyzer
|
||||
|
||||
|
||||
def check_default_metadata(metadata):
|
||||
''' Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect.
|
||||
"""Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect.
|
||||
:param metadata: a metadata dictionary
|
||||
:return: Nothing
|
||||
'''
|
||||
"""
|
||||
# We give silan some leeway here by specifying a tolerance
|
||||
tolerance_seconds = 0.1
|
||||
length_seconds = 3.9
|
||||
assert abs(metadata['length_seconds'] - length_seconds) < tolerance_seconds
|
||||
assert abs(float(metadata['cuein'])) < tolerance_seconds
|
||||
assert abs(float(metadata['cueout']) - length_seconds) < tolerance_seconds
|
||||
assert abs(metadata["length_seconds"] - length_seconds) < tolerance_seconds
|
||||
assert abs(float(metadata["cuein"])) < tolerance_seconds
|
||||
assert abs(float(metadata["cueout"]) - length_seconds) < tolerance_seconds
|
||||
|
||||
|
||||
def test_missing_silan():
|
||||
old_silan = CuePointAnalyzer.SILAN_EXECUTABLE
|
||||
CuePointAnalyzer.SILAN_EXECUTABLE = 'foosdaf'
|
||||
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
|
||||
CuePointAnalyzer.SILAN_EXECUTABLE = old_silan # Need to put this back
|
||||
CuePointAnalyzer.SILAN_EXECUTABLE = "foosdaf"
|
||||
metadata = CuePointAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
|
||||
)
|
||||
CuePointAnalyzer.SILAN_EXECUTABLE = old_silan # Need to put this back
|
||||
|
||||
|
||||
def test_invalid_filepath():
|
||||
metadata = CuePointAnalyzer.analyze(u'non-existent-file', dict())
|
||||
metadata = CuePointAnalyzer.analyze(u"non-existent-file", dict())
|
||||
|
||||
|
||||
def test_mp3_utf8():
|
||||
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
|
||||
metadata = CuePointAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
def test_mp3_dualmono():
|
||||
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-dualmono.mp3', dict())
|
||||
metadata = CuePointAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-dualmono.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
def test_mp3_jointstereo():
|
||||
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-jointstereo.mp3', dict())
|
||||
metadata = CuePointAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-jointstereo.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
def test_mp3_simplestereo():
|
||||
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-simplestereo.mp3', dict())
|
||||
metadata = CuePointAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-simplestereo.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
def test_mp3_stereo():
|
||||
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.mp3', dict())
|
||||
metadata = CuePointAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
def test_mp3_mono():
|
||||
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mono.mp3', dict())
|
||||
metadata = CuePointAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-mono.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
def test_ogg_stereo():
|
||||
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.ogg', dict())
|
||||
metadata = CuePointAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo.ogg", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
def test_invalid_wma():
|
||||
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict())
|
||||
metadata = CuePointAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo-invalid.wma", dict()
|
||||
)
|
||||
|
||||
|
||||
def test_m4a_stereo():
|
||||
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.m4a', dict())
|
||||
metadata = CuePointAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo.m4a", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
def test_wav_stereo():
|
||||
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.wav', dict())
|
||||
metadata = CuePointAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo.wav", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
|
|
@ -8,109 +8,125 @@ import mock
|
|||
from pprint import pprint
|
||||
from airtime_analyzer.filemover_analyzer import FileMoverAnalyzer
|
||||
|
||||
DEFAULT_AUDIO_FILE = u'tests/test_data/44100Hz-16bit-mono.mp3'
|
||||
DEFAULT_IMPORT_DEST = u'Test Artist/Test Album/44100Hz-16bit-mono.mp3'
|
||||
DEFAULT_AUDIO_FILE = u"tests/test_data/44100Hz-16bit-mono.mp3"
|
||||
DEFAULT_IMPORT_DEST = u"Test Artist/Test Album/44100Hz-16bit-mono.mp3"
|
||||
|
||||
|
||||
def setup():
|
||||
pass
|
||||
|
||||
|
||||
def teardown():
|
||||
pass
|
||||
|
||||
|
||||
@raises(Exception)
|
||||
def test_dont_use_analyze():
|
||||
FileMoverAnalyzer.analyze(u'foo', dict())
|
||||
FileMoverAnalyzer.analyze(u"foo", dict())
|
||||
|
||||
|
||||
@raises(TypeError)
|
||||
def test_move_wrong_string_param1():
|
||||
FileMoverAnalyzer.move(42, '', '', dict())
|
||||
FileMoverAnalyzer.move(42, "", "", dict())
|
||||
|
||||
|
||||
@raises(TypeError)
|
||||
def test_move_wrong_string_param2():
|
||||
FileMoverAnalyzer.move(u'', 23, u'', dict())
|
||||
FileMoverAnalyzer.move(u"", 23, u"", dict())
|
||||
|
||||
|
||||
@raises(TypeError)
|
||||
def test_move_wrong_string_param3():
|
||||
FileMoverAnalyzer.move('', '', 5, dict())
|
||||
FileMoverAnalyzer.move("", "", 5, dict())
|
||||
|
||||
|
||||
@raises(TypeError)
|
||||
def test_move_wrong_dict_param():
|
||||
FileMoverAnalyzer.move('', '', '', 12345)
|
||||
FileMoverAnalyzer.move("", "", "", 12345)
|
||||
|
||||
|
||||
@raises(FileNotFoundError)
|
||||
def test_move_wrong_string_param3():
|
||||
FileMoverAnalyzer.move('', '', '', dict())
|
||||
FileMoverAnalyzer.move("", "", "", dict())
|
||||
|
||||
|
||||
def test_basic():
|
||||
filename = os.path.basename(DEFAULT_AUDIO_FILE)
|
||||
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, dict())
|
||||
#Move the file back
|
||||
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u".", filename, dict())
|
||||
# Move the file back
|
||||
shutil.move("./" + filename, DEFAULT_AUDIO_FILE)
|
||||
assert os.path.exists(DEFAULT_AUDIO_FILE)
|
||||
|
||||
|
||||
def test_basic_samefile():
|
||||
filename = os.path.basename(DEFAULT_AUDIO_FILE)
|
||||
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'tests/test_data', filename, dict())
|
||||
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u"tests/test_data", filename, dict())
|
||||
assert os.path.exists(DEFAULT_AUDIO_FILE)
|
||||
|
||||
|
||||
def test_duplicate_file():
|
||||
filename = os.path.basename(DEFAULT_AUDIO_FILE)
|
||||
#Import the file once
|
||||
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, dict())
|
||||
#Copy it back to the original location
|
||||
# Import the file once
|
||||
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u".", filename, dict())
|
||||
# Copy it back to the original location
|
||||
shutil.copy("./" + filename, DEFAULT_AUDIO_FILE)
|
||||
#Import it again. It shouldn't overwrite the old file and instead create a new
|
||||
# Import it again. It shouldn't overwrite the old file and instead create a new
|
||||
metadata = dict()
|
||||
metadata = FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, metadata)
|
||||
#Cleanup: move the file (eg. 44100Hz-16bit-mono.mp3) back
|
||||
metadata = FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u".", filename, metadata)
|
||||
# Cleanup: move the file (eg. 44100Hz-16bit-mono.mp3) back
|
||||
shutil.move("./" + filename, DEFAULT_AUDIO_FILE)
|
||||
#Remove the renamed duplicate, eg. 44100Hz-16bit-mono_03-26-2014-11-58.mp3
|
||||
# Remove the renamed duplicate, eg. 44100Hz-16bit-mono_03-26-2014-11-58.mp3
|
||||
os.remove(metadata["full_path"])
|
||||
assert os.path.exists(DEFAULT_AUDIO_FILE)
|
||||
|
||||
''' If you import three copies of the same file, the behaviour is:
|
||||
|
||||
""" If you import three copies of the same file, the behaviour is:
|
||||
- The filename is of the first file preserved.
|
||||
- The filename of the second file has the timestamp attached to it.
|
||||
- The filename of the third file has a UUID placed after the timestamp, but ONLY IF
|
||||
it's imported within 1 second of the second file (ie. if the timestamp is the same).
|
||||
'''
|
||||
"""
|
||||
|
||||
|
||||
def test_double_duplicate_files():
|
||||
# Here we use mock to patch out the time.localtime() function so that it
|
||||
# always returns the same value. This allows us to consistently simulate this test cases
|
||||
# where the last two of the three files are imported at the same time as the timestamp.
|
||||
with mock.patch('airtime_analyzer.filemover_analyzer.time') as mock_time:
|
||||
mock_time.localtime.return_value = time.localtime()#date(2010, 10, 8)
|
||||
with mock.patch("airtime_analyzer.filemover_analyzer.time") as mock_time:
|
||||
mock_time.localtime.return_value = time.localtime() # date(2010, 10, 8)
|
||||
mock_time.side_effect = lambda *args, **kw: time(*args, **kw)
|
||||
|
||||
filename = os.path.basename(DEFAULT_AUDIO_FILE)
|
||||
#Import the file once
|
||||
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, dict())
|
||||
#Copy it back to the original location
|
||||
# Import the file once
|
||||
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u".", filename, dict())
|
||||
# Copy it back to the original location
|
||||
shutil.copy("./" + filename, DEFAULT_AUDIO_FILE)
|
||||
#Import it again. It shouldn't overwrite the old file and instead create a new
|
||||
# Import it again. It shouldn't overwrite the old file and instead create a new
|
||||
first_dup_metadata = dict()
|
||||
first_dup_metadata = FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename,
|
||||
first_dup_metadata)
|
||||
#Copy it back again!
|
||||
first_dup_metadata = FileMoverAnalyzer.move(
|
||||
DEFAULT_AUDIO_FILE, u".", filename, first_dup_metadata
|
||||
)
|
||||
# Copy it back again!
|
||||
shutil.copy("./" + filename, DEFAULT_AUDIO_FILE)
|
||||
#Reimport for the third time, which should have the same timestamp as the second one
|
||||
#thanks to us mocking out time.localtime()
|
||||
# Reimport for the third time, which should have the same timestamp as the second one
|
||||
# thanks to us mocking out time.localtime()
|
||||
second_dup_metadata = dict()
|
||||
second_dup_metadata = FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename,
|
||||
second_dup_metadata)
|
||||
#Cleanup: move the file (eg. 44100Hz-16bit-mono.mp3) back
|
||||
second_dup_metadata = FileMoverAnalyzer.move(
|
||||
DEFAULT_AUDIO_FILE, u".", filename, second_dup_metadata
|
||||
)
|
||||
# Cleanup: move the file (eg. 44100Hz-16bit-mono.mp3) back
|
||||
shutil.move("./" + filename, DEFAULT_AUDIO_FILE)
|
||||
#Remove the renamed duplicate, eg. 44100Hz-16bit-mono_03-26-2014-11-58.mp3
|
||||
# Remove the renamed duplicate, eg. 44100Hz-16bit-mono_03-26-2014-11-58.mp3
|
||||
os.remove(first_dup_metadata["full_path"])
|
||||
os.remove(second_dup_metadata["full_path"])
|
||||
assert os.path.exists(DEFAULT_AUDIO_FILE)
|
||||
|
||||
|
||||
@raises(OSError)
|
||||
def test_bad_permissions_destination_dir():
|
||||
filename = os.path.basename(DEFAULT_AUDIO_FILE)
|
||||
dest_dir = u'/sys/foobar' # /sys is using sysfs on Linux, which is unwritable
|
||||
dest_dir = u"/sys/foobar" # /sys is using sysfs on Linux, which is unwritable
|
||||
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, dest_dir, filename, dict())
|
||||
#Move the file back
|
||||
# Move the file back
|
||||
shutil.move(os.path.join(dest_dir, filename), DEFAULT_AUDIO_FILE)
|
||||
assert os.path.exists(DEFAULT_AUDIO_FILE)
|
||||
|
||||
|
|
|
@ -6,78 +6,101 @@ import mock
|
|||
from nose.tools import *
|
||||
from airtime_analyzer.metadata_analyzer import MetadataAnalyzer
|
||||
|
||||
|
||||
def setup():
|
||||
pass
|
||||
|
||||
|
||||
def teardown():
|
||||
pass
|
||||
|
||||
|
||||
def check_default_metadata(metadata):
|
||||
assert metadata['track_title'] == 'Test Title'
|
||||
assert metadata['artist_name'] == 'Test Artist'
|
||||
assert metadata['album_title'] == 'Test Album'
|
||||
assert metadata['year'] == '1999'
|
||||
assert metadata['genre'] == 'Test Genre'
|
||||
assert metadata['track_number'] == '1'
|
||||
assert metadata["length"] == str(datetime.timedelta(seconds=metadata["length_seconds"]))
|
||||
assert metadata["track_title"] == "Test Title"
|
||||
assert metadata["artist_name"] == "Test Artist"
|
||||
assert metadata["album_title"] == "Test Album"
|
||||
assert metadata["year"] == "1999"
|
||||
assert metadata["genre"] == "Test Genre"
|
||||
assert metadata["track_number"] == "1"
|
||||
assert metadata["length"] == str(
|
||||
datetime.timedelta(seconds=metadata["length_seconds"])
|
||||
)
|
||||
|
||||
|
||||
def test_mp3_mono():
|
||||
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-mono.mp3', dict())
|
||||
metadata = MetadataAnalyzer.analyze(
|
||||
"tests/test_data/44100Hz-16bit-mono.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
assert metadata['channels'] == 1
|
||||
assert metadata['bit_rate'] == 63998
|
||||
assert abs(metadata['length_seconds'] - 3.9) < 0.1
|
||||
assert metadata['mime'] == 'audio/mp3' # Not unicode because MIMEs aren't.
|
||||
assert metadata['track_total'] == '10' # MP3s can have a track_total
|
||||
#Mutagen doesn't extract comments from mp3s it seems
|
||||
assert metadata["channels"] == 1
|
||||
assert metadata["bit_rate"] == 63998
|
||||
assert abs(metadata["length_seconds"] - 3.9) < 0.1
|
||||
assert metadata["mime"] == "audio/mp3" # Not unicode because MIMEs aren't.
|
||||
assert metadata["track_total"] == "10" # MP3s can have a track_total
|
||||
# Mutagen doesn't extract comments from mp3s it seems
|
||||
|
||||
|
||||
def test_mp3_jointstereo():
|
||||
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-jointstereo.mp3', dict())
|
||||
metadata = MetadataAnalyzer.analyze(
|
||||
"tests/test_data/44100Hz-16bit-jointstereo.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
assert metadata['channels'] == 2
|
||||
assert metadata['bit_rate'] == 127998
|
||||
assert abs(metadata['length_seconds'] - 3.9) < 0.1
|
||||
assert metadata['mime'] == 'audio/mp3'
|
||||
assert metadata['track_total'] == '10' # MP3s can have a track_total
|
||||
assert metadata["channels"] == 2
|
||||
assert metadata["bit_rate"] == 127998
|
||||
assert abs(metadata["length_seconds"] - 3.9) < 0.1
|
||||
assert metadata["mime"] == "audio/mp3"
|
||||
assert metadata["track_total"] == "10" # MP3s can have a track_total
|
||||
|
||||
|
||||
def test_mp3_simplestereo():
|
||||
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-simplestereo.mp3', dict())
|
||||
metadata = MetadataAnalyzer.analyze(
|
||||
"tests/test_data/44100Hz-16bit-simplestereo.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
assert metadata['channels'] == 2
|
||||
assert metadata['bit_rate'] == 127998
|
||||
assert abs(metadata['length_seconds'] - 3.9) < 0.1
|
||||
assert metadata['mime'] == 'audio/mp3'
|
||||
assert metadata['track_total'] == '10' # MP3s can have a track_total
|
||||
assert metadata["channels"] == 2
|
||||
assert metadata["bit_rate"] == 127998
|
||||
assert abs(metadata["length_seconds"] - 3.9) < 0.1
|
||||
assert metadata["mime"] == "audio/mp3"
|
||||
assert metadata["track_total"] == "10" # MP3s can have a track_total
|
||||
|
||||
|
||||
def test_mp3_dualmono():
|
||||
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-dualmono.mp3', dict())
|
||||
metadata = MetadataAnalyzer.analyze(
|
||||
"tests/test_data/44100Hz-16bit-dualmono.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
assert metadata['channels'] == 2
|
||||
assert metadata['bit_rate'] == 127998
|
||||
assert abs(metadata['length_seconds'] - 3.9) < 0.1
|
||||
assert metadata['mime'] == 'audio/mp3'
|
||||
assert metadata['track_total'] == '10' # MP3s can have a track_total
|
||||
assert metadata["channels"] == 2
|
||||
assert metadata["bit_rate"] == 127998
|
||||
assert abs(metadata["length_seconds"] - 3.9) < 0.1
|
||||
assert metadata["mime"] == "audio/mp3"
|
||||
assert metadata["track_total"] == "10" # MP3s can have a track_total
|
||||
|
||||
|
||||
def test_ogg_mono():
|
||||
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-mono.ogg', dict())
|
||||
metadata = MetadataAnalyzer.analyze(
|
||||
"tests/test_data/44100Hz-16bit-mono.ogg", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
assert metadata['channels'] == 1
|
||||
assert metadata['bit_rate'] == 80000
|
||||
assert abs(metadata['length_seconds'] - 3.8) < 0.1
|
||||
assert metadata['mime'] == 'audio/vorbis'
|
||||
assert metadata['comment'] == 'Test Comment'
|
||||
assert metadata["channels"] == 1
|
||||
assert metadata["bit_rate"] == 80000
|
||||
assert abs(metadata["length_seconds"] - 3.8) < 0.1
|
||||
assert metadata["mime"] == "audio/vorbis"
|
||||
assert metadata["comment"] == "Test Comment"
|
||||
|
||||
|
||||
def test_ogg_stereo():
|
||||
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo.ogg', dict())
|
||||
metadata = MetadataAnalyzer.analyze(
|
||||
"tests/test_data/44100Hz-16bit-stereo.ogg", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
assert metadata['channels'] == 2
|
||||
assert metadata['bit_rate'] == 112000
|
||||
assert abs(metadata['length_seconds'] - 3.8) < 0.1
|
||||
assert metadata['mime'] == 'audio/vorbis'
|
||||
assert metadata['comment'] == 'Test Comment'
|
||||
assert metadata["channels"] == 2
|
||||
assert metadata["bit_rate"] == 112000
|
||||
assert abs(metadata["length_seconds"] - 3.8) < 0.1
|
||||
assert metadata["mime"] == "audio/vorbis"
|
||||
assert metadata["comment"] == "Test Comment"
|
||||
|
||||
''' faac and avconv can't seem to create a proper mono AAC file... ugh
|
||||
|
||||
""" faac and avconv can't seem to create a proper mono AAC file... ugh
|
||||
def test_aac_mono():
|
||||
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-mono.m4a')
|
||||
print("Mono AAC metadata:")
|
||||
|
@ -88,78 +111,93 @@ def test_aac_mono():
|
|||
assert abs(metadata['length_seconds'] - 3.8) < 0.1
|
||||
assert metadata['mime'] == 'audio/mp4'
|
||||
assert metadata['comment'] == 'Test Comment'
|
||||
'''
|
||||
"""
|
||||
|
||||
|
||||
def test_aac_stereo():
|
||||
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo.m4a', dict())
|
||||
metadata = MetadataAnalyzer.analyze(
|
||||
"tests/test_data/44100Hz-16bit-stereo.m4a", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
assert metadata['channels'] == 2
|
||||
assert metadata['bit_rate'] == 102619
|
||||
assert abs(metadata['length_seconds'] - 3.8) < 0.1
|
||||
assert metadata['mime'] == 'audio/mp4'
|
||||
assert metadata['comment'] == 'Test Comment'
|
||||
assert metadata["channels"] == 2
|
||||
assert metadata["bit_rate"] == 102619
|
||||
assert abs(metadata["length_seconds"] - 3.8) < 0.1
|
||||
assert metadata["mime"] == "audio/mp4"
|
||||
assert metadata["comment"] == "Test Comment"
|
||||
|
||||
|
||||
def test_mp3_utf8():
|
||||
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
|
||||
metadata = MetadataAnalyzer.analyze(
|
||||
"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
|
||||
)
|
||||
# Using a bunch of different UTF-8 codepages here. Test data is from:
|
||||
# http://winrus.com/utf8-jap.htm
|
||||
assert metadata['track_title'] == 'アイウエオカキクケコサシスセソタチツテ'
|
||||
assert metadata['artist_name'] == 'てすと'
|
||||
assert metadata['album_title'] == 'Ä ä Ü ü ß'
|
||||
assert metadata['year'] == '1999'
|
||||
assert metadata['genre'] == 'Я Б Г Д Ж Й'
|
||||
assert metadata['track_number'] == '1'
|
||||
assert metadata['channels'] == 2
|
||||
assert metadata['bit_rate'] < 130000
|
||||
assert metadata['bit_rate'] > 127000
|
||||
assert abs(metadata['length_seconds'] - 3.9) < 0.1
|
||||
assert metadata['mime'] == 'audio/mp3'
|
||||
assert metadata['track_total'] == '10' # MP3s can have a track_total
|
||||
assert metadata["track_title"] == "アイウエオカキクケコサシスセソタチツテ"
|
||||
assert metadata["artist_name"] == "てすと"
|
||||
assert metadata["album_title"] == "Ä ä Ü ü ß"
|
||||
assert metadata["year"] == "1999"
|
||||
assert metadata["genre"] == "Я Б Г Д Ж Й"
|
||||
assert metadata["track_number"] == "1"
|
||||
assert metadata["channels"] == 2
|
||||
assert metadata["bit_rate"] < 130000
|
||||
assert metadata["bit_rate"] > 127000
|
||||
assert abs(metadata["length_seconds"] - 3.9) < 0.1
|
||||
assert metadata["mime"] == "audio/mp3"
|
||||
assert metadata["track_total"] == "10" # MP3s can have a track_total
|
||||
|
||||
|
||||
def test_invalid_wma():
|
||||
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict())
|
||||
assert metadata['mime'] == 'audio/x-ms-wma'
|
||||
metadata = MetadataAnalyzer.analyze(
|
||||
"tests/test_data/44100Hz-16bit-stereo-invalid.wma", dict()
|
||||
)
|
||||
assert metadata["mime"] == "audio/x-ms-wma"
|
||||
|
||||
|
||||
def test_wav_stereo():
|
||||
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo.wav', dict())
|
||||
assert metadata['mime'] == 'audio/x-wav'
|
||||
assert abs(metadata['length_seconds'] - 3.9) < 0.1
|
||||
assert metadata['channels'] == 2
|
||||
assert metadata['sample_rate'] == 44100
|
||||
metadata = MetadataAnalyzer.analyze(
|
||||
"tests/test_data/44100Hz-16bit-stereo.wav", dict()
|
||||
)
|
||||
assert metadata["mime"] == "audio/x-wav"
|
||||
assert abs(metadata["length_seconds"] - 3.9) < 0.1
|
||||
assert metadata["channels"] == 2
|
||||
assert metadata["sample_rate"] == 44100
|
||||
|
||||
|
||||
# Make sure the parameter checking works
|
||||
@raises(FileNotFoundError)
|
||||
def test_move_wrong_string_param1():
|
||||
not_unicode = 'asdfasdf'
|
||||
not_unicode = "asdfasdf"
|
||||
MetadataAnalyzer.analyze(not_unicode, dict())
|
||||
|
||||
|
||||
@raises(TypeError)
|
||||
def test_move_wrong_metadata_dict():
|
||||
not_a_dict = list()
|
||||
MetadataAnalyzer.analyze('asdfasdf', not_a_dict)
|
||||
MetadataAnalyzer.analyze("asdfasdf", not_a_dict)
|
||||
|
||||
|
||||
# Test an mp3 file where the number of channels is invalid or missing:
|
||||
def test_mp3_bad_channels():
|
||||
filename = 'tests/test_data/44100Hz-16bit-mono.mp3'
|
||||
'''
|
||||
filename = "tests/test_data/44100Hz-16bit-mono.mp3"
|
||||
"""
|
||||
It'd be a pain in the ass to construct a real MP3 with an invalid number
|
||||
of channels by hand because that value is stored in every MP3 frame in the file
|
||||
'''
|
||||
"""
|
||||
audio_file = mutagen.File(filename, easy=True)
|
||||
audio_file.info.mode = 1777
|
||||
with mock.patch('airtime_analyzer.metadata_analyzer.mutagen') as mock_mutagen:
|
||||
with mock.patch("airtime_analyzer.metadata_analyzer.mutagen") as mock_mutagen:
|
||||
mock_mutagen.File.return_value = audio_file
|
||||
#mock_mutagen.side_effect = lambda *args, **kw: audio_file #File(*args, **kw)
|
||||
# mock_mutagen.side_effect = lambda *args, **kw: audio_file #File(*args, **kw)
|
||||
|
||||
metadata = MetadataAnalyzer.analyze(filename, dict())
|
||||
check_default_metadata(metadata)
|
||||
assert metadata['channels'] == 1
|
||||
assert metadata['bit_rate'] == 63998
|
||||
assert abs(metadata['length_seconds'] - 3.9) < 0.1
|
||||
assert metadata['mime'] == 'audio/mp3' # Not unicode because MIMEs aren't.
|
||||
assert metadata['track_total'] == '10' # MP3s can have a track_total
|
||||
#Mutagen doesn't extract comments from mp3s it seems
|
||||
assert metadata["channels"] == 1
|
||||
assert metadata["bit_rate"] == 63998
|
||||
assert abs(metadata["length_seconds"] - 3.9) < 0.1
|
||||
assert metadata["mime"] == "audio/mp3" # Not unicode because MIMEs aren't.
|
||||
assert metadata["track_total"] == "10" # MP3s can have a track_total
|
||||
# Mutagen doesn't extract comments from mp3s it seems
|
||||
|
||||
|
||||
def test_unparsable_file():
|
||||
MetadataAnalyzer.analyze('tests/test_data/unparsable.txt', dict())
|
||||
MetadataAnalyzer.analyze("tests/test_data/unparsable.txt", dict())
|
||||
|
|
|
@ -2,61 +2,97 @@
|
|||
from nose.tools import *
|
||||
from airtime_analyzer.playability_analyzer import *
|
||||
|
||||
|
||||
def check_default_metadata(metadata):
|
||||
''' Stub function for now in case we need it later.'''
|
||||
"""Stub function for now in case we need it later."""
|
||||
pass
|
||||
|
||||
|
||||
def test_missing_liquidsoap():
|
||||
old_ls = PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE
|
||||
PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = 'foosdaf'
|
||||
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
|
||||
PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = old_ls # Need to put this back
|
||||
PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = "foosdaf"
|
||||
metadata = PlayabilityAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
|
||||
)
|
||||
PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = old_ls # Need to put this back
|
||||
|
||||
|
||||
@raises(UnplayableFileError)
|
||||
def test_invalid_filepath():
|
||||
metadata = PlayabilityAnalyzer.analyze(u'non-existent-file', dict())
|
||||
metadata = PlayabilityAnalyzer.analyze(u"non-existent-file", dict())
|
||||
|
||||
|
||||
def test_mp3_utf8():
|
||||
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
|
||||
metadata = PlayabilityAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
def test_mp3_dualmono():
|
||||
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-dualmono.mp3', dict())
|
||||
metadata = PlayabilityAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-dualmono.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
def test_mp3_jointstereo():
|
||||
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-jointstereo.mp3', dict())
|
||||
metadata = PlayabilityAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-jointstereo.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
def test_mp3_simplestereo():
|
||||
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-simplestereo.mp3', dict())
|
||||
metadata = PlayabilityAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-simplestereo.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
def test_mp3_stereo():
|
||||
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.mp3', dict())
|
||||
metadata = PlayabilityAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
def test_mp3_mono():
|
||||
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mono.mp3', dict())
|
||||
metadata = PlayabilityAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-mono.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
def test_ogg_stereo():
|
||||
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.ogg', dict())
|
||||
metadata = PlayabilityAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo.ogg", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
@raises(UnplayableFileError)
|
||||
def test_invalid_wma():
|
||||
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict())
|
||||
metadata = PlayabilityAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo-invalid.wma", dict()
|
||||
)
|
||||
|
||||
|
||||
def test_m4a_stereo():
|
||||
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.m4a', dict())
|
||||
metadata = PlayabilityAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo.m4a", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
def test_wav_stereo():
|
||||
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.wav', dict())
|
||||
metadata = PlayabilityAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo.wav", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
@raises(UnplayableFileError)
|
||||
def test_unknown():
|
||||
metadata = PlayabilityAnalyzer.analyze(u'http://www.google.com', dict())
|
||||
check_default_metadata(metadata)
|
||||
metadata = PlayabilityAnalyzer.analyze(u"http://www.google.com", dict())
|
||||
check_default_metadata(metadata)
|
||||
|
|
|
@ -5,80 +5,134 @@ from airtime_analyzer.replaygain_analyzer import ReplayGainAnalyzer
|
|||
|
||||
|
||||
def check_default_metadata(metadata):
|
||||
''' Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect.
|
||||
"""Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect.
|
||||
:param metadata: a metadata dictionary
|
||||
:return: Nothing
|
||||
'''
|
||||
'''
|
||||
"""
|
||||
"""
|
||||
# We give python-rgain some leeway here by specifying a tolerance. It's not perfectly consistent across codecs...
|
||||
assert abs(metadata['cuein']) < tolerance_seconds
|
||||
assert abs(metadata['cueout'] - length_seconds) < tolerance_seconds
|
||||
'''
|
||||
"""
|
||||
tolerance = 0.60
|
||||
expected_replaygain = 5.2
|
||||
print(metadata['replay_gain'])
|
||||
assert abs(metadata['replay_gain'] - expected_replaygain) < tolerance
|
||||
print(metadata["replay_gain"])
|
||||
assert abs(metadata["replay_gain"] - expected_replaygain) < tolerance
|
||||
|
||||
|
||||
def test_missing_replaygain():
|
||||
old_rg = ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE
|
||||
ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = 'foosdaf'
|
||||
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
|
||||
ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = old_rg # Need to put this back
|
||||
ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = "foosdaf"
|
||||
metadata = ReplayGainAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
|
||||
)
|
||||
ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = old_rg # Need to put this back
|
||||
|
||||
|
||||
def test_invalid_filepath():
|
||||
metadata = ReplayGainAnalyzer.analyze(u'non-existent-file', dict())
|
||||
metadata = ReplayGainAnalyzer.analyze(u"non-existent-file", dict())
|
||||
|
||||
|
||||
def test_mp3_utf8():
|
||||
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
|
||||
metadata = ReplayGainAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
test_mp3_utf8.rgain = True
|
||||
|
||||
|
||||
def test_mp3_dualmono():
|
||||
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-dualmono.mp3', dict())
|
||||
metadata = ReplayGainAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-dualmono.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
test_mp3_dualmono.rgain = True
|
||||
|
||||
|
||||
def test_mp3_jointstereo():
|
||||
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-jointstereo.mp3', dict())
|
||||
metadata = ReplayGainAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-jointstereo.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
test_mp3_jointstereo.rgain = True
|
||||
|
||||
|
||||
def test_mp3_simplestereo():
|
||||
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-simplestereo.mp3', dict())
|
||||
metadata = ReplayGainAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-simplestereo.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
test_mp3_simplestereo.rgain = True
|
||||
|
||||
|
||||
def test_mp3_stereo():
|
||||
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.mp3', dict())
|
||||
metadata = ReplayGainAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
test_mp3_stereo.rgain = True
|
||||
|
||||
|
||||
def test_mp3_mono():
|
||||
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mono.mp3', dict())
|
||||
metadata = ReplayGainAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-mono.mp3", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
test_mp3_mono.rgain = True
|
||||
|
||||
|
||||
def test_ogg_stereo():
|
||||
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.ogg', dict())
|
||||
metadata = ReplayGainAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo.ogg", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
test_ogg_stereo = True
|
||||
|
||||
|
||||
def test_invalid_wma():
|
||||
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict())
|
||||
metadata = ReplayGainAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo-invalid.wma", dict()
|
||||
)
|
||||
|
||||
|
||||
test_invalid_wma.rgain = True
|
||||
|
||||
|
||||
def test_mp3_missing_id3_header():
|
||||
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mp3-missingid3header.mp3', dict())
|
||||
metadata = ReplayGainAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-mp3-missingid3header.mp3", dict()
|
||||
)
|
||||
|
||||
|
||||
test_mp3_missing_id3_header.rgain = True
|
||||
|
||||
|
||||
def test_m4a_stereo():
|
||||
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.m4a', dict())
|
||||
metadata = ReplayGainAnalyzer.analyze(
|
||||
u"tests/test_data/44100Hz-16bit-stereo.m4a", dict()
|
||||
)
|
||||
check_default_metadata(metadata)
|
||||
|
||||
|
||||
test_m4a_stereo.rgain = True
|
||||
|
||||
''' WAVE is not supported by python-rgain yet
|
||||
""" WAVE is not supported by python-rgain yet
|
||||
def test_wav_stereo():
|
||||
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.wav', dict())
|
||||
check_default_metadata(metadata)
|
||||
test_wav_stereo.rgain = True
|
||||
'''
|
||||
"""
|
||||
|
|
|
@ -6,23 +6,28 @@ import socket
|
|||
import requests
|
||||
from requests.auth import AuthBase
|
||||
|
||||
|
||||
def get_protocol(config):
|
||||
positive_values = ['Yes', 'yes', 'True', 'true', True]
|
||||
port = config['general'].get('base_port', 80)
|
||||
force_ssl = config['general'].get('force_ssl', False)
|
||||
positive_values = ["Yes", "yes", "True", "true", True]
|
||||
port = config["general"].get("base_port", 80)
|
||||
force_ssl = config["general"].get("force_ssl", False)
|
||||
if force_ssl in positive_values:
|
||||
protocol = 'https'
|
||||
protocol = "https"
|
||||
else:
|
||||
protocol = config['general'].get('protocol')
|
||||
protocol = config["general"].get("protocol")
|
||||
if not protocol:
|
||||
protocol = str(("http", "https")[int(port) == 443])
|
||||
return protocol
|
||||
|
||||
|
||||
class UrlParamDict(dict):
|
||||
def __missing__(self, key):
|
||||
return '{' + key + '}'
|
||||
return "{" + key + "}"
|
||||
|
||||
|
||||
class UrlException(Exception):
|
||||
pass
|
||||
|
||||
class UrlException(Exception): pass
|
||||
|
||||
class IncompleteUrl(UrlException):
|
||||
def __init__(self, url):
|
||||
|
@ -31,6 +36,7 @@ class IncompleteUrl(UrlException):
|
|||
def __str__(self):
|
||||
return "Incomplete url: '{}'".format(self.url)
|
||||
|
||||
|
||||
class UrlBadParam(UrlException):
|
||||
def __init__(self, url, param):
|
||||
self.url = url
|
||||
|
@ -39,17 +45,20 @@ class UrlBadParam(UrlException):
|
|||
def __str__(self):
|
||||
return "Bad param '{}' passed into url: '{}'".format(self.param, self.url)
|
||||
|
||||
|
||||
class KeyAuth(AuthBase):
|
||||
def __init__(self, key):
|
||||
self.key = key
|
||||
|
||||
def __call__(self, r):
|
||||
r.headers['Authorization'] = "Api-Key {}".format(self.key)
|
||||
r.headers["Authorization"] = "Api-Key {}".format(self.key)
|
||||
return r
|
||||
|
||||
|
||||
class ApcUrl:
|
||||
""" A safe abstraction and testable for filling in parameters in
|
||||
"""A safe abstraction and testable for filling in parameters in
|
||||
api_client.cfg"""
|
||||
|
||||
def __init__(self, base_url):
|
||||
self.base_url = base_url
|
||||
|
||||
|
@ -63,17 +72,18 @@ class ApcUrl:
|
|||
return ApcUrl(temp_url)
|
||||
|
||||
def url(self):
|
||||
if '{' in self.base_url:
|
||||
if "{" in self.base_url:
|
||||
raise IncompleteUrl(self.base_url)
|
||||
else:
|
||||
return self.base_url
|
||||
|
||||
|
||||
class ApiRequest:
|
||||
API_HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout
|
||||
API_HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout
|
||||
|
||||
def __init__(self, name, url, logger=None, api_key=None):
|
||||
self.name = name
|
||||
self.url = url
|
||||
self.url = url
|
||||
self.__req = None
|
||||
if logger is None:
|
||||
self.logger = logging
|
||||
|
@ -86,36 +96,45 @@ class ApiRequest:
|
|||
self.logger.debug(final_url)
|
||||
try:
|
||||
if _post_data:
|
||||
response = requests.post(final_url,
|
||||
data=_post_data, auth=self.auth,
|
||||
timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT)
|
||||
response = requests.post(
|
||||
final_url,
|
||||
data=_post_data,
|
||||
auth=self.auth,
|
||||
timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT,
|
||||
)
|
||||
else:
|
||||
response = requests.get(final_url, params=params, auth=self.auth,
|
||||
timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT)
|
||||
if 'application/json' in response.headers['content-type']:
|
||||
response = requests.get(
|
||||
final_url,
|
||||
params=params,
|
||||
auth=self.auth,
|
||||
timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT,
|
||||
)
|
||||
if "application/json" in response.headers["content-type"]:
|
||||
return response.json()
|
||||
return response
|
||||
except requests.exceptions.Timeout:
|
||||
self.logger.error('HTTP request to %s timed out', final_url)
|
||||
self.logger.error("HTTP request to %s timed out", final_url)
|
||||
raise
|
||||
|
||||
def req(self, *args, **kwargs):
|
||||
self.__req = lambda : self(*args, **kwargs)
|
||||
self.__req = lambda: self(*args, **kwargs)
|
||||
return self
|
||||
|
||||
def retry(self, n, delay=5):
|
||||
"""Try to send request n times. If after n times it fails then
|
||||
we finally raise exception"""
|
||||
for i in range(0,n-1):
|
||||
for i in range(0, n - 1):
|
||||
try:
|
||||
return self.__req()
|
||||
except Exception:
|
||||
time.sleep(delay)
|
||||
return self.__req()
|
||||
|
||||
|
||||
class RequestProvider:
|
||||
""" Creates the available ApiRequest instance that can be read from
|
||||
a config file """
|
||||
"""Creates the available ApiRequest instance that can be read from
|
||||
a config file"""
|
||||
|
||||
def __init__(self, cfg, endpoints):
|
||||
self.config = cfg
|
||||
self.requests = {}
|
||||
|
@ -123,27 +142,29 @@ class RequestProvider:
|
|||
self.config["general"]["base_dir"] = self.config["general"]["base_dir"][1:]
|
||||
|
||||
protocol = get_protocol(self.config)
|
||||
base_port = self.config['general']['base_port']
|
||||
base_url = self.config['general']['base_url']
|
||||
base_dir = self.config['general']['base_dir']
|
||||
api_base = self.config['api_base']
|
||||
base_port = self.config["general"]["base_port"]
|
||||
base_url = self.config["general"]["base_url"]
|
||||
base_dir = self.config["general"]["base_dir"]
|
||||
api_base = self.config["api_base"]
|
||||
api_url = "{protocol}://{base_url}:{base_port}/{base_dir}{api_base}/{action}".format_map(
|
||||
UrlParamDict(protocol=protocol,
|
||||
base_url=base_url,
|
||||
base_port=base_port,
|
||||
base_dir=base_dir,
|
||||
api_base=api_base
|
||||
))
|
||||
UrlParamDict(
|
||||
protocol=protocol,
|
||||
base_url=base_url,
|
||||
base_port=base_port,
|
||||
base_dir=base_dir,
|
||||
api_base=api_base,
|
||||
)
|
||||
)
|
||||
self.url = ApcUrl(api_url)
|
||||
|
||||
# Now we must discover the possible actions
|
||||
for action_name, action_value in endpoints.items():
|
||||
new_url = self.url.params(action=action_value)
|
||||
if '{api_key}' in action_value:
|
||||
new_url = new_url.params(api_key=self.config["general"]['api_key'])
|
||||
self.requests[action_name] = ApiRequest(action_name,
|
||||
new_url,
|
||||
api_key=self.config['general']['api_key'])
|
||||
if "{api_key}" in action_value:
|
||||
new_url = new_url.params(api_key=self.config["general"]["api_key"])
|
||||
self.requests[action_name] = ApiRequest(
|
||||
action_name, new_url, api_key=self.config["general"]["api_key"]
|
||||
)
|
||||
|
||||
def available_requests(self):
|
||||
return list(self.requests.keys())
|
||||
|
@ -157,15 +178,20 @@ class RequestProvider:
|
|||
else:
|
||||
return super(RequestProvider, self).__getattribute__(attr)
|
||||
|
||||
|
||||
def time_in_seconds(time):
|
||||
return time.hour * 60 * 60 + \
|
||||
time.minute * 60 + \
|
||||
time.second + \
|
||||
time.microsecond / 1000000.0
|
||||
return (
|
||||
time.hour * 60 * 60
|
||||
+ time.minute * 60
|
||||
+ time.second
|
||||
+ time.microsecond / 1000000.0
|
||||
)
|
||||
|
||||
|
||||
def time_in_milliseconds(time):
|
||||
return time_in_seconds(time) * 1000
|
||||
|
||||
|
||||
def fromisoformat(time_string):
|
||||
"""
|
||||
This is required for Python 3.6 support. datetime.time.fromisoformat was
|
||||
|
|
|
@ -26,58 +26,112 @@ api_config = {}
|
|||
api_endpoints = {}
|
||||
|
||||
# URL to get the version number of the server API
|
||||
api_endpoints['version_url'] = 'version/api_key/{api_key}'
|
||||
#URL to register a components IP Address with the central web server
|
||||
api_endpoints['register_component'] = 'register-component/format/json/api_key/{api_key}/component/{component}'
|
||||
api_endpoints["version_url"] = "version/api_key/{api_key}"
|
||||
# URL to register a components IP Address with the central web server
|
||||
api_endpoints[
|
||||
"register_component"
|
||||
] = "register-component/format/json/api_key/{api_key}/component/{component}"
|
||||
|
||||
#media-monitor
|
||||
api_endpoints['media_setup_url'] = 'media-monitor-setup/format/json/api_key/{api_key}'
|
||||
api_endpoints['upload_recorded'] = 'upload-recorded/format/json/api_key/{api_key}/fileid/{fileid}/showinstanceid/{showinstanceid}'
|
||||
api_endpoints['update_media_url'] = 'reload-metadata/format/json/api_key/{api_key}/mode/{mode}'
|
||||
api_endpoints['list_all_db_files'] = 'list-all-files/format/json/api_key/{api_key}/dir_id/{dir_id}/all/{all}'
|
||||
api_endpoints['list_all_watched_dirs'] = 'list-all-watched-dirs/format/json/api_key/{api_key}'
|
||||
api_endpoints['add_watched_dir'] = 'add-watched-dir/format/json/api_key/{api_key}/path/{path}'
|
||||
api_endpoints['remove_watched_dir'] = 'remove-watched-dir/format/json/api_key/{api_key}/path/{path}'
|
||||
api_endpoints['set_storage_dir'] = 'set-storage-dir/format/json/api_key/{api_key}/path/{path}'
|
||||
api_endpoints['update_fs_mount'] = 'update-file-system-mount/format/json/api_key/{api_key}'
|
||||
api_endpoints['reload_metadata_group'] = 'reload-metadata-group/format/json/api_key/{api_key}'
|
||||
api_endpoints['handle_watched_dir_missing'] = 'handle-watched-dir-missing/format/json/api_key/{api_key}/dir/{dir}'
|
||||
#show-recorder
|
||||
api_endpoints['show_schedule_url'] = 'recorded-shows/format/json/api_key/{api_key}'
|
||||
api_endpoints['upload_file_url'] = 'rest/media'
|
||||
api_endpoints['upload_retries'] = '3'
|
||||
api_endpoints['upload_wait'] = '60'
|
||||
#pypo
|
||||
api_endpoints['export_url'] = 'schedule/api_key/{api_key}'
|
||||
api_endpoints['get_media_url'] = 'get-media/file/{file}/api_key/{api_key}'
|
||||
api_endpoints['update_item_url'] = 'notify-schedule-group-play/api_key/{api_key}/schedule_id/{schedule_id}'
|
||||
api_endpoints['update_start_playing_url'] = 'notify-media-item-start-play/api_key/{api_key}/media_id/{media_id}/'
|
||||
api_endpoints['get_stream_setting'] = 'get-stream-setting/format/json/api_key/{api_key}/'
|
||||
api_endpoints['update_liquidsoap_status'] = 'update-liquidsoap-status/format/json/api_key/{api_key}/msg/{msg}/stream_id/{stream_id}/boot_time/{boot_time}'
|
||||
api_endpoints['update_source_status'] = 'update-source-status/format/json/api_key/{api_key}/sourcename/{sourcename}/status/{status}'
|
||||
api_endpoints['check_live_stream_auth'] = 'check-live-stream-auth/format/json/api_key/{api_key}/username/{username}/password/{password}/djtype/{djtype}'
|
||||
api_endpoints['get_bootstrap_info'] = 'get-bootstrap-info/format/json/api_key/{api_key}'
|
||||
api_endpoints['get_files_without_replay_gain'] = 'get-files-without-replay-gain/api_key/{api_key}/dir_id/{dir_id}'
|
||||
api_endpoints['update_replay_gain_value'] = 'update-replay-gain-value/format/json/api_key/{api_key}'
|
||||
api_endpoints['notify_webstream_data'] = 'notify-webstream-data/api_key/{api_key}/media_id/{media_id}/format/json'
|
||||
api_endpoints['notify_liquidsoap_started'] = 'rabbitmq-do-push/api_key/{api_key}/format/json'
|
||||
api_endpoints['get_stream_parameters'] = 'get-stream-parameters/api_key/{api_key}/format/json'
|
||||
api_endpoints['push_stream_stats'] = 'push-stream-stats/api_key/{api_key}/format/json'
|
||||
api_endpoints['update_stream_setting_table'] = 'update-stream-setting-table/api_key/{api_key}/format/json'
|
||||
api_endpoints['get_files_without_silan_value'] = 'get-files-without-silan-value/api_key/{api_key}'
|
||||
api_endpoints['update_cue_values_by_silan'] = 'update-cue-values-by-silan/api_key/{api_key}'
|
||||
api_endpoints['update_metadata_on_tunein'] = 'update-metadata-on-tunein/api_key/{api_key}'
|
||||
api_config['api_base'] = 'api'
|
||||
api_config['bin_dir'] = '/usr/lib/airtime/api_clients/'
|
||||
# media-monitor
|
||||
api_endpoints["media_setup_url"] = "media-monitor-setup/format/json/api_key/{api_key}"
|
||||
api_endpoints[
|
||||
"upload_recorded"
|
||||
] = "upload-recorded/format/json/api_key/{api_key}/fileid/{fileid}/showinstanceid/{showinstanceid}"
|
||||
api_endpoints[
|
||||
"update_media_url"
|
||||
] = "reload-metadata/format/json/api_key/{api_key}/mode/{mode}"
|
||||
api_endpoints[
|
||||
"list_all_db_files"
|
||||
] = "list-all-files/format/json/api_key/{api_key}/dir_id/{dir_id}/all/{all}"
|
||||
api_endpoints[
|
||||
"list_all_watched_dirs"
|
||||
] = "list-all-watched-dirs/format/json/api_key/{api_key}"
|
||||
api_endpoints[
|
||||
"add_watched_dir"
|
||||
] = "add-watched-dir/format/json/api_key/{api_key}/path/{path}"
|
||||
api_endpoints[
|
||||
"remove_watched_dir"
|
||||
] = "remove-watched-dir/format/json/api_key/{api_key}/path/{path}"
|
||||
api_endpoints[
|
||||
"set_storage_dir"
|
||||
] = "set-storage-dir/format/json/api_key/{api_key}/path/{path}"
|
||||
api_endpoints[
|
||||
"update_fs_mount"
|
||||
] = "update-file-system-mount/format/json/api_key/{api_key}"
|
||||
api_endpoints[
|
||||
"reload_metadata_group"
|
||||
] = "reload-metadata-group/format/json/api_key/{api_key}"
|
||||
api_endpoints[
|
||||
"handle_watched_dir_missing"
|
||||
] = "handle-watched-dir-missing/format/json/api_key/{api_key}/dir/{dir}"
|
||||
# show-recorder
|
||||
api_endpoints["show_schedule_url"] = "recorded-shows/format/json/api_key/{api_key}"
|
||||
api_endpoints["upload_file_url"] = "rest/media"
|
||||
api_endpoints["upload_retries"] = "3"
|
||||
api_endpoints["upload_wait"] = "60"
|
||||
# pypo
|
||||
api_endpoints["export_url"] = "schedule/api_key/{api_key}"
|
||||
api_endpoints["get_media_url"] = "get-media/file/{file}/api_key/{api_key}"
|
||||
api_endpoints[
|
||||
"update_item_url"
|
||||
] = "notify-schedule-group-play/api_key/{api_key}/schedule_id/{schedule_id}"
|
||||
api_endpoints[
|
||||
"update_start_playing_url"
|
||||
] = "notify-media-item-start-play/api_key/{api_key}/media_id/{media_id}/"
|
||||
api_endpoints[
|
||||
"get_stream_setting"
|
||||
] = "get-stream-setting/format/json/api_key/{api_key}/"
|
||||
api_endpoints[
|
||||
"update_liquidsoap_status"
|
||||
] = "update-liquidsoap-status/format/json/api_key/{api_key}/msg/{msg}/stream_id/{stream_id}/boot_time/{boot_time}"
|
||||
api_endpoints[
|
||||
"update_source_status"
|
||||
] = "update-source-status/format/json/api_key/{api_key}/sourcename/{sourcename}/status/{status}"
|
||||
api_endpoints[
|
||||
"check_live_stream_auth"
|
||||
] = "check-live-stream-auth/format/json/api_key/{api_key}/username/{username}/password/{password}/djtype/{djtype}"
|
||||
api_endpoints["get_bootstrap_info"] = "get-bootstrap-info/format/json/api_key/{api_key}"
|
||||
api_endpoints[
|
||||
"get_files_without_replay_gain"
|
||||
] = "get-files-without-replay-gain/api_key/{api_key}/dir_id/{dir_id}"
|
||||
api_endpoints[
|
||||
"update_replay_gain_value"
|
||||
] = "update-replay-gain-value/format/json/api_key/{api_key}"
|
||||
api_endpoints[
|
||||
"notify_webstream_data"
|
||||
] = "notify-webstream-data/api_key/{api_key}/media_id/{media_id}/format/json"
|
||||
api_endpoints[
|
||||
"notify_liquidsoap_started"
|
||||
] = "rabbitmq-do-push/api_key/{api_key}/format/json"
|
||||
api_endpoints[
|
||||
"get_stream_parameters"
|
||||
] = "get-stream-parameters/api_key/{api_key}/format/json"
|
||||
api_endpoints["push_stream_stats"] = "push-stream-stats/api_key/{api_key}/format/json"
|
||||
api_endpoints[
|
||||
"update_stream_setting_table"
|
||||
] = "update-stream-setting-table/api_key/{api_key}/format/json"
|
||||
api_endpoints[
|
||||
"get_files_without_silan_value"
|
||||
] = "get-files-without-silan-value/api_key/{api_key}"
|
||||
api_endpoints[
|
||||
"update_cue_values_by_silan"
|
||||
] = "update-cue-values-by-silan/api_key/{api_key}"
|
||||
api_endpoints[
|
||||
"update_metadata_on_tunein"
|
||||
] = "update-metadata-on-tunein/api_key/{api_key}"
|
||||
api_config["api_base"] = "api"
|
||||
api_config["bin_dir"] = "/usr/lib/airtime/api_clients/"
|
||||
|
||||
|
||||
################################################################################
|
||||
# Airtime API Version 1 Client
|
||||
################################################################################
|
||||
class AirtimeApiClient(object):
|
||||
def __init__(self, logger=None,config_path='/etc/airtime/airtime.conf'):
|
||||
if logger is None: self.logger = logging
|
||||
else: self.logger = logger
|
||||
def __init__(self, logger=None, config_path="/etc/airtime/airtime.conf"):
|
||||
if logger is None:
|
||||
self.logger = logging
|
||||
else:
|
||||
self.logger = logger
|
||||
|
||||
# loading config file
|
||||
try:
|
||||
|
@ -85,16 +139,18 @@ class AirtimeApiClient(object):
|
|||
self.config.update(api_config)
|
||||
self.services = RequestProvider(self.config, api_endpoints)
|
||||
except Exception as e:
|
||||
self.logger.exception('Error loading config file: %s', config_path)
|
||||
self.logger.exception("Error loading config file: %s", config_path)
|
||||
sys.exit(1)
|
||||
|
||||
def __get_airtime_version(self):
|
||||
try: return self.services.version_url()['airtime_version']
|
||||
except Exception: return -1
|
||||
try:
|
||||
return self.services.version_url()["airtime_version"]
|
||||
except Exception:
|
||||
return -1
|
||||
|
||||
def __get_api_version(self):
|
||||
try:
|
||||
return self.services.version_url()['api_version']
|
||||
return self.services.version_url()["api_version"]
|
||||
except Exception as e:
|
||||
self.logger.exception(e)
|
||||
return -1
|
||||
|
@ -105,25 +161,30 @@ class AirtimeApiClient(object):
|
|||
# logger.info('Airtime version found: ' + str(version))
|
||||
if api_version == -1:
|
||||
if verbose:
|
||||
logger.info('Unable to get Airtime API version number.\n')
|
||||
logger.info("Unable to get Airtime API version number.\n")
|
||||
return False
|
||||
elif api_version[0:3] != AIRTIME_API_VERSION[0:3]:
|
||||
if verbose:
|
||||
logger.info('Airtime API version found: ' + str(api_version))
|
||||
logger.info('pypo is only compatible with API version: ' + AIRTIME_API_VERSION)
|
||||
logger.info("Airtime API version found: " + str(api_version))
|
||||
logger.info(
|
||||
"pypo is only compatible with API version: " + AIRTIME_API_VERSION
|
||||
)
|
||||
return False
|
||||
else:
|
||||
if verbose:
|
||||
logger.info('Airtime API version found: ' + str(api_version))
|
||||
logger.info('pypo is only compatible with API version: ' + AIRTIME_API_VERSION)
|
||||
logger.info("Airtime API version found: " + str(api_version))
|
||||
logger.info(
|
||||
"pypo is only compatible with API version: " + AIRTIME_API_VERSION
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
def get_schedule(self):
|
||||
# TODO : properly refactor this routine
|
||||
# For now the return type is a little messed up for compatibility reasons
|
||||
try: return (True, self.services.export_url())
|
||||
except: return (False, None)
|
||||
try:
|
||||
return (True, self.services.export_url())
|
||||
except:
|
||||
return (False, None)
|
||||
|
||||
def notify_liquidsoap_started(self):
|
||||
try:
|
||||
|
@ -132,9 +193,9 @@ class AirtimeApiClient(object):
|
|||
self.logger.exception(e)
|
||||
|
||||
def notify_media_item_start_playing(self, media_id):
|
||||
""" This is a callback from liquidsoap, we use this to notify
|
||||
"""This is a callback from liquidsoap, we use this to notify
|
||||
about the currently playing *song*. We get passed a JSON string
|
||||
which we handed to liquidsoap in get_liquidsoap_data(). """
|
||||
which we handed to liquidsoap in get_liquidsoap_data()."""
|
||||
try:
|
||||
return self.services.update_start_playing_url(media_id=media_id)
|
||||
except Exception as e:
|
||||
|
@ -150,7 +211,7 @@ class AirtimeApiClient(object):
|
|||
|
||||
def upload_recorded_show(self, files, show_id):
|
||||
logger = self.logger
|
||||
response = ''
|
||||
response = ""
|
||||
|
||||
retries = int(self.config["upload_retries"])
|
||||
retries_wait = int(self.config["upload_wait"])
|
||||
|
@ -165,7 +226,9 @@ class AirtimeApiClient(object):
|
|||
logger.debug(ApiRequest.API_HTTP_REQUEST_TIMEOUT)
|
||||
|
||||
try:
|
||||
request = requests.post(url, files=files, timeout=float(ApiRequest.API_HTTP_REQUEST_TIMEOUT))
|
||||
request = requests.post(
|
||||
url, files=files, timeout=float(ApiRequest.API_HTTP_REQUEST_TIMEOUT)
|
||||
)
|
||||
response = request.json()
|
||||
logger.debug(response)
|
||||
|
||||
|
@ -199,7 +262,7 @@ class AirtimeApiClient(object):
|
|||
except Exception as e:
|
||||
self.logger.exception(e)
|
||||
|
||||
#wait some time before next retry
|
||||
# wait some time before next retry
|
||||
time.sleep(retries_wait)
|
||||
|
||||
return response
|
||||
|
@ -207,42 +270,49 @@ class AirtimeApiClient(object):
|
|||
def check_live_stream_auth(self, username, password, dj_type):
|
||||
try:
|
||||
return self.services.check_live_stream_auth(
|
||||
username=username, password=password, djtype=dj_type)
|
||||
username=username, password=password, djtype=dj_type
|
||||
)
|
||||
except Exception as e:
|
||||
self.logger.exception(e)
|
||||
return {}
|
||||
|
||||
def construct_url(self,config_action_key):
|
||||
def construct_url(self, config_action_key):
|
||||
"""Constructs the base url for every request"""
|
||||
# TODO : Make other methods in this class use this this method.
|
||||
if self.config["general"]["base_dir"].startswith("/"):
|
||||
self.config["general"]["base_dir"] = self.config["general"]["base_dir"][1:]
|
||||
protocol = get_protocol(self.config)
|
||||
url = "%s://%s:%s/%s%s/%s" % \
|
||||
(protocol,
|
||||
self.config["general"]["base_url"], str(self.config["general"]["base_port"]),
|
||||
self.config["general"]["base_dir"], self.config["api_base"],
|
||||
self.config[config_action_key])
|
||||
url = "%s://%s:%s/%s%s/%s" % (
|
||||
protocol,
|
||||
self.config["general"]["base_url"],
|
||||
str(self.config["general"]["base_port"]),
|
||||
self.config["general"]["base_dir"],
|
||||
self.config["api_base"],
|
||||
self.config[config_action_key],
|
||||
)
|
||||
url = url.replace("%%api_key%%", self.config["general"]["api_key"])
|
||||
return url
|
||||
|
||||
def construct_rest_url(self,config_action_key):
|
||||
def construct_rest_url(self, config_action_key):
|
||||
"""Constructs the base url for RESTful requests"""
|
||||
if self.config["general"]["base_dir"].startswith("/"):
|
||||
self.config["general"]["base_dir"] = self.config["general"]["base_dir"][1:]
|
||||
protocol = get_protocol(self.config)
|
||||
url = "%s://%s:@%s:%s/%s/%s" % \
|
||||
(protocol, self.config["general"]["api_key"],
|
||||
self.config["general"]["base_url"], str(self.config["general"]["base_port"]),
|
||||
self.config["general"]["base_dir"],
|
||||
self.config[config_action_key])
|
||||
url = "%s://%s:@%s:%s/%s/%s" % (
|
||||
protocol,
|
||||
self.config["general"]["api_key"],
|
||||
self.config["general"]["base_url"],
|
||||
str(self.config["general"]["base_port"]),
|
||||
self.config["general"]["base_dir"],
|
||||
self.config[config_action_key],
|
||||
)
|
||||
return url
|
||||
|
||||
|
||||
"""
|
||||
Caller of this method needs to catch any exceptions such as
|
||||
ValueError thrown by json.loads or URLError by urllib2.urlopen
|
||||
"""
|
||||
|
||||
def setup_media_monitor(self):
|
||||
return self.services.media_setup_url()
|
||||
|
||||
|
@ -264,49 +334,55 @@ class AirtimeApiClient(object):
|
|||
# filter but here we prefer a little more verbosity to help
|
||||
# debugging
|
||||
for action in action_list:
|
||||
if not 'mode' in action:
|
||||
self.logger.debug("Warning: Trying to send a request element without a 'mode'")
|
||||
self.logger.debug("Here is the the request: '%s'" % str(action) )
|
||||
if not "mode" in action:
|
||||
self.logger.debug(
|
||||
"Warning: Trying to send a request element without a 'mode'"
|
||||
)
|
||||
self.logger.debug("Here is the the request: '%s'" % str(action))
|
||||
else:
|
||||
# We alias the value of is_record to true or false no
|
||||
# matter what it is based on if it's absent in the action
|
||||
if 'is_record' not in action:
|
||||
action['is_record'] = 0
|
||||
if "is_record" not in action:
|
||||
action["is_record"] = 0
|
||||
valid_actions.append(action)
|
||||
# Note that we must prefix every key with: mdX where x is a number
|
||||
# Is there a way to format the next line a little better? The
|
||||
# parenthesis make the code almost unreadable
|
||||
md_list = dict((("md%d" % i), json.dumps(md)) \
|
||||
for i,md in enumerate(valid_actions))
|
||||
md_list = dict(
|
||||
(("md%d" % i), json.dumps(md)) for i, md in enumerate(valid_actions)
|
||||
)
|
||||
# For testing we add the following "dry" parameter to tell the
|
||||
# controller not to actually do any changes
|
||||
if dry: md_list['dry'] = 1
|
||||
if dry:
|
||||
md_list["dry"] = 1
|
||||
self.logger.info("Pumping out %d requests..." % len(valid_actions))
|
||||
return self.services.reload_metadata_group(_post_data=md_list)
|
||||
|
||||
#returns a list of all db files for a given directory in JSON format:
|
||||
#{"files":["path/to/file1", "path/to/file2"]}
|
||||
#Note that these are relative paths to the given directory. The full
|
||||
#path is not returned.
|
||||
# returns a list of all db files for a given directory in JSON format:
|
||||
# {"files":["path/to/file1", "path/to/file2"]}
|
||||
# Note that these are relative paths to the given directory. The full
|
||||
# path is not returned.
|
||||
def list_all_db_files(self, dir_id, all_files=True):
|
||||
logger = self.logger
|
||||
try:
|
||||
all_files = "1" if all_files else "0"
|
||||
response = self.services.list_all_db_files(dir_id=dir_id,
|
||||
all=all_files)
|
||||
response = self.services.list_all_db_files(dir_id=dir_id, all=all_files)
|
||||
except Exception as e:
|
||||
response = {}
|
||||
logger.error("Exception: %s", e)
|
||||
try:
|
||||
return response["files"]
|
||||
except KeyError:
|
||||
self.logger.error("Could not find index 'files' in dictionary: %s",
|
||||
str(response))
|
||||
self.logger.error(
|
||||
"Could not find index 'files' in dictionary: %s", str(response)
|
||||
)
|
||||
return []
|
||||
|
||||
"""
|
||||
Caller of this method needs to catch any exceptions such as
|
||||
ValueError thrown by json.loads or URLError by urllib2.urlopen
|
||||
"""
|
||||
|
||||
def list_all_watched_dirs(self):
|
||||
return self.services.list_all_watched_dirs()
|
||||
|
||||
|
@ -314,6 +390,7 @@ class AirtimeApiClient(object):
|
|||
Caller of this method needs to catch any exceptions such as
|
||||
ValueError thrown by json.loads or URLError by urllib2.urlopen
|
||||
"""
|
||||
|
||||
def add_watched_dir(self, path):
|
||||
return self.services.add_watched_dir(path=base64.b64encode(path))
|
||||
|
||||
|
@ -321,6 +398,7 @@ class AirtimeApiClient(object):
|
|||
Caller of this method needs to catch any exceptions such as
|
||||
ValueError thrown by json.loads or URLError by urllib2.urlopen
|
||||
"""
|
||||
|
||||
def remove_watched_dir(self, path):
|
||||
return self.services.remove_watched_dir(path=base64.b64encode(path))
|
||||
|
||||
|
@ -328,6 +406,7 @@ class AirtimeApiClient(object):
|
|||
Caller of this method needs to catch any exceptions such as
|
||||
ValueError thrown by json.loads or URLError by urllib2.urlopen
|
||||
"""
|
||||
|
||||
def set_storage_dir(self, path):
|
||||
return self.services.set_storage_dir(path=base64.b64encode(path))
|
||||
|
||||
|
@ -335,15 +414,16 @@ class AirtimeApiClient(object):
|
|||
Caller of this method needs to catch any exceptions such as
|
||||
ValueError thrown by json.loads or URLError by urllib2.urlopen
|
||||
"""
|
||||
|
||||
def get_stream_setting(self):
|
||||
return self.services.get_stream_setting()
|
||||
|
||||
def register_component(self, component):
|
||||
""" Purpose of this method is to contact the server with a "Hey its
|
||||
"""Purpose of this method is to contact the server with a "Hey its
|
||||
me!" message. This will allow the server to register the component's
|
||||
(component = media-monitor, pypo etc.) ip address, and later use it
|
||||
to query monit via monit's http service, or download log files via a
|
||||
http server. """
|
||||
http server."""
|
||||
return self.services.register_component(component=component)
|
||||
|
||||
def notify_liquidsoap_status(self, msg, stream_id, time):
|
||||
|
@ -351,24 +431,24 @@ class AirtimeApiClient(object):
|
|||
try:
|
||||
post_data = {"msg_post": msg}
|
||||
|
||||
#encoded_msg is no longer used server_side!!
|
||||
encoded_msg = urllib.parse.quote('dummy')
|
||||
self.services.update_liquidsoap_status.req(post_data,
|
||||
msg=encoded_msg,
|
||||
stream_id=stream_id,
|
||||
boot_time=time).retry(5)
|
||||
# encoded_msg is no longer used server_side!!
|
||||
encoded_msg = urllib.parse.quote("dummy")
|
||||
self.services.update_liquidsoap_status.req(
|
||||
post_data, msg=encoded_msg, stream_id=stream_id, boot_time=time
|
||||
).retry(5)
|
||||
except Exception as e:
|
||||
self.logger.exception(e)
|
||||
|
||||
def notify_source_status(self, sourcename, status):
|
||||
try:
|
||||
return self.services.update_source_status.req(sourcename=sourcename,
|
||||
status=status).retry(5)
|
||||
return self.services.update_source_status.req(
|
||||
sourcename=sourcename, status=status
|
||||
).retry(5)
|
||||
except Exception as e:
|
||||
self.logger.exception(e)
|
||||
|
||||
def get_bootstrap_info(self):
|
||||
""" Retrieve infomations needed on bootstrap time """
|
||||
"""Retrieve infomations needed on bootstrap time"""
|
||||
return self.services.get_bootstrap_info()
|
||||
|
||||
def get_files_without_replay_gain_value(self, dir_id):
|
||||
|
@ -377,7 +457,7 @@ class AirtimeApiClient(object):
|
|||
calculated. This list of files is downloaded into a file and the path
|
||||
to this file is the return value.
|
||||
"""
|
||||
#http://localhost/api/get-files-without-replay-gain/dir_id/1
|
||||
# http://localhost/api/get-files-without-replay-gain/dir_id/1
|
||||
try:
|
||||
return self.services.get_files_without_replay_gain(dir_id=dir_id)
|
||||
except Exception as e:
|
||||
|
@ -401,25 +481,31 @@ class AirtimeApiClient(object):
|
|||
'pairs' is a list of pairs in (x, y), where x is the file's database
|
||||
row id and y is the file's replay_gain value in dB
|
||||
"""
|
||||
self.logger.debug(self.services.update_replay_gain_value(
|
||||
_post_data={'data': json.dumps(pairs)}))
|
||||
|
||||
self.logger.debug(
|
||||
self.services.update_replay_gain_value(
|
||||
_post_data={"data": json.dumps(pairs)}
|
||||
)
|
||||
)
|
||||
|
||||
def update_cue_values_by_silan(self, pairs):
|
||||
"""
|
||||
'pairs' is a list of pairs in (x, y), where x is the file's database
|
||||
row id and y is the file's cue values in dB
|
||||
"""
|
||||
return self.services.update_cue_values_by_silan(_post_data={'data': json.dumps(pairs)})
|
||||
|
||||
return self.services.update_cue_values_by_silan(
|
||||
_post_data={"data": json.dumps(pairs)}
|
||||
)
|
||||
|
||||
def notify_webstream_data(self, data, media_id):
|
||||
"""
|
||||
Update the server with the latest metadata we've received from the
|
||||
external webstream
|
||||
"""
|
||||
self.logger.info( self.services.notify_webstream_data.req(
|
||||
_post_data={'data':data}, media_id=str(media_id)).retry(5))
|
||||
self.logger.info(
|
||||
self.services.notify_webstream_data.req(
|
||||
_post_data={"data": data}, media_id=str(media_id)
|
||||
).retry(5)
|
||||
)
|
||||
|
||||
def get_stream_parameters(self):
|
||||
response = self.services.get_stream_parameters()
|
||||
|
@ -428,12 +514,16 @@ class AirtimeApiClient(object):
|
|||
|
||||
def push_stream_stats(self, data):
|
||||
# TODO : users of this method should do their own error handling
|
||||
response = self.services.push_stream_stats(_post_data={'data': json.dumps(data)})
|
||||
response = self.services.push_stream_stats(
|
||||
_post_data={"data": json.dumps(data)}
|
||||
)
|
||||
return response
|
||||
|
||||
def update_stream_setting_table(self, data):
|
||||
try:
|
||||
response = self.services.update_stream_setting_table(_post_data={'data': json.dumps(data)})
|
||||
response = self.services.update_stream_setting_table(
|
||||
_post_data={"data": json.dumps(data)}
|
||||
)
|
||||
return response
|
||||
except Exception as e:
|
||||
self.logger.exception(e)
|
||||
|
|
|
@ -18,17 +18,18 @@ LIBRETIME_API_VERSION = "2.0"
|
|||
api_config = {}
|
||||
api_endpoints = {}
|
||||
|
||||
api_endpoints['version_url'] = 'version/'
|
||||
api_endpoints['schedule_url'] = 'schedule/'
|
||||
api_endpoints['webstream_url'] = 'webstreams/{id}/'
|
||||
api_endpoints['show_instance_url'] = 'show-instances/{id}/'
|
||||
api_endpoints['show_url'] = 'shows/{id}/'
|
||||
api_endpoints['file_url'] = 'files/{id}/'
|
||||
api_endpoints['file_download_url'] = 'files/{id}/download/'
|
||||
api_config['api_base'] = 'api/v2'
|
||||
api_endpoints["version_url"] = "version/"
|
||||
api_endpoints["schedule_url"] = "schedule/"
|
||||
api_endpoints["webstream_url"] = "webstreams/{id}/"
|
||||
api_endpoints["show_instance_url"] = "show-instances/{id}/"
|
||||
api_endpoints["show_url"] = "shows/{id}/"
|
||||
api_endpoints["file_url"] = "files/{id}/"
|
||||
api_endpoints["file_download_url"] = "files/{id}/download/"
|
||||
api_config["api_base"] = "api/v2"
|
||||
|
||||
|
||||
class AirtimeApiClient:
|
||||
def __init__(self, logger=None, config_path='/etc/airtime/airtime.conf'):
|
||||
def __init__(self, logger=None, config_path="/etc/airtime/airtime.conf"):
|
||||
if logger is None:
|
||||
self.logger = logging
|
||||
else:
|
||||
|
@ -39,87 +40,89 @@ class AirtimeApiClient:
|
|||
self.config.update(api_config)
|
||||
self.services = RequestProvider(self.config, api_endpoints)
|
||||
except Exception as e:
|
||||
self.logger.exception('Error loading config file: %s', config_path)
|
||||
self.logger.exception("Error loading config file: %s", config_path)
|
||||
sys.exit(1)
|
||||
|
||||
def get_schedule(self):
|
||||
current_time = datetime.datetime.utcnow()
|
||||
end_time = current_time + datetime.timedelta(hours=1)
|
||||
|
||||
str_current = current_time.isoformat(timespec='seconds')
|
||||
str_end = end_time.isoformat(timespec='seconds')
|
||||
data = self.services.schedule_url(params={
|
||||
'ends__range': ('{}Z,{}Z'.format(str_current, str_end)),
|
||||
})
|
||||
result = {'media': {} }
|
||||
for item in data:
|
||||
start = isoparse(item['starts'])
|
||||
key = start.strftime('%YYYY-%mm-%dd-%HH-%MM-%SS')
|
||||
end = isoparse(item['ends'])
|
||||
|
||||
show_instance = self.services.show_instance_url(id=item['instance_id'])
|
||||
show = self.services.show_url(id=show_instance['show_id'])
|
||||
|
||||
result['media'][key] = {
|
||||
'start': start.strftime('%Y-%m-%d-%H-%M-%S'),
|
||||
'end': end.strftime('%Y-%m-%d-%H-%M-%S'),
|
||||
'row_id': item['id']
|
||||
str_current = current_time.isoformat(timespec="seconds")
|
||||
str_end = end_time.isoformat(timespec="seconds")
|
||||
data = self.services.schedule_url(
|
||||
params={
|
||||
"ends__range": ("{}Z,{}Z".format(str_current, str_end)),
|
||||
}
|
||||
current = result['media'][key]
|
||||
if item['file']:
|
||||
current['independent_event'] = False
|
||||
current['type'] = 'file'
|
||||
current['id'] = item['file_id']
|
||||
)
|
||||
result = {"media": {}}
|
||||
for item in data:
|
||||
start = isoparse(item["starts"])
|
||||
key = start.strftime("%YYYY-%mm-%dd-%HH-%MM-%SS")
|
||||
end = isoparse(item["ends"])
|
||||
|
||||
fade_in = time_in_milliseconds(fromisoformat(item['fade_in']))
|
||||
fade_out = time_in_milliseconds(fromisoformat(item['fade_out']))
|
||||
show_instance = self.services.show_instance_url(id=item["instance_id"])
|
||||
show = self.services.show_url(id=show_instance["show_id"])
|
||||
|
||||
cue_in = time_in_seconds(fromisoformat(item['cue_in']))
|
||||
cue_out = time_in_seconds(fromisoformat(item['cue_out']))
|
||||
result["media"][key] = {
|
||||
"start": start.strftime("%Y-%m-%d-%H-%M-%S"),
|
||||
"end": end.strftime("%Y-%m-%d-%H-%M-%S"),
|
||||
"row_id": item["id"],
|
||||
}
|
||||
current = result["media"][key]
|
||||
if item["file"]:
|
||||
current["independent_event"] = False
|
||||
current["type"] = "file"
|
||||
current["id"] = item["file_id"]
|
||||
|
||||
current['fade_in'] = fade_in
|
||||
current['fade_out'] = fade_out
|
||||
current['cue_in'] = cue_in
|
||||
current['cue_out'] = cue_out
|
||||
fade_in = time_in_milliseconds(fromisoformat(item["fade_in"]))
|
||||
fade_out = time_in_milliseconds(fromisoformat(item["fade_out"]))
|
||||
|
||||
info = self.services.file_url(id=item['file_id'])
|
||||
current['metadata'] = info
|
||||
current['uri'] = item['file']
|
||||
current['filesize'] = info['filesize']
|
||||
elif item['stream']:
|
||||
current['independent_event'] = True
|
||||
current['id'] = item['stream_id']
|
||||
info = self.services.webstream_url(id=item['stream_id'])
|
||||
current['uri'] = info['url']
|
||||
current['type'] = 'stream_buffer_start'
|
||||
cue_in = time_in_seconds(fromisoformat(item["cue_in"]))
|
||||
cue_out = time_in_seconds(fromisoformat(item["cue_out"]))
|
||||
|
||||
current["fade_in"] = fade_in
|
||||
current["fade_out"] = fade_out
|
||||
current["cue_in"] = cue_in
|
||||
current["cue_out"] = cue_out
|
||||
|
||||
info = self.services.file_url(id=item["file_id"])
|
||||
current["metadata"] = info
|
||||
current["uri"] = item["file"]
|
||||
current["filesize"] = info["filesize"]
|
||||
elif item["stream"]:
|
||||
current["independent_event"] = True
|
||||
current["id"] = item["stream_id"]
|
||||
info = self.services.webstream_url(id=item["stream_id"])
|
||||
current["uri"] = info["url"]
|
||||
current["type"] = "stream_buffer_start"
|
||||
# Stream events are instantaneous
|
||||
current['end'] = current['start']
|
||||
current["end"] = current["start"]
|
||||
|
||||
result['{}_0'.format(key)] = {
|
||||
'id': current['id'],
|
||||
'type': 'stream_output_start',
|
||||
'start': current['start'],
|
||||
'end': current['start'],
|
||||
'uri': current['uri'],
|
||||
'row_id': current['row_id'],
|
||||
'independent_event': current['independent_event'],
|
||||
result["{}_0".format(key)] = {
|
||||
"id": current["id"],
|
||||
"type": "stream_output_start",
|
||||
"start": current["start"],
|
||||
"end": current["start"],
|
||||
"uri": current["uri"],
|
||||
"row_id": current["row_id"],
|
||||
"independent_event": current["independent_event"],
|
||||
}
|
||||
|
||||
result[end.isoformat()] = {
|
||||
'type': 'stream_buffer_end',
|
||||
'start': current['end'],
|
||||
'end': current['end'],
|
||||
'uri': current['uri'],
|
||||
'row_id': current['row_id'],
|
||||
'independent_event': current['independent_event'],
|
||||
"type": "stream_buffer_end",
|
||||
"start": current["end"],
|
||||
"end": current["end"],
|
||||
"uri": current["uri"],
|
||||
"row_id": current["row_id"],
|
||||
"independent_event": current["independent_event"],
|
||||
}
|
||||
|
||||
result['{}_0'.format(end.isoformat())] = {
|
||||
'type': 'stream_output_end',
|
||||
'start': current['end'],
|
||||
'end': current['end'],
|
||||
'uri': current['uri'],
|
||||
'row_id': current['row_id'],
|
||||
'independent_event': current['independent_event'],
|
||||
result["{}_0".format(end.isoformat())] = {
|
||||
"type": "stream_output_end",
|
||||
"start": current["end"],
|
||||
"end": current["end"],
|
||||
"uri": current["uri"],
|
||||
"row_id": current["row_id"],
|
||||
"independent_event": current["independent_event"],
|
||||
}
|
||||
return result
|
||||
|
|
|
@ -9,17 +9,19 @@ script_path = os.path.dirname(os.path.realpath(__file__))
|
|||
print(script_path)
|
||||
os.chdir(script_path)
|
||||
|
||||
setup(name='api_clients',
|
||||
version='2.0.0',
|
||||
description='LibreTime API Client',
|
||||
url='http://github.com/LibreTime/Libretime',
|
||||
author='LibreTime Contributors',
|
||||
license='AGPLv3',
|
||||
packages=['api_clients'],
|
||||
scripts=[],
|
||||
install_requires=[
|
||||
'configobj',
|
||||
'python-dateutil',
|
||||
],
|
||||
zip_safe=False,
|
||||
data_files=[])
|
||||
setup(
|
||||
name="api_clients",
|
||||
version="2.0.0",
|
||||
description="LibreTime API Client",
|
||||
url="http://github.com/LibreTime/Libretime",
|
||||
author="LibreTime Contributors",
|
||||
license="AGPLv3",
|
||||
packages=["api_clients"],
|
||||
scripts=[],
|
||||
install_requires=[
|
||||
"configobj",
|
||||
"python-dateutil",
|
||||
],
|
||||
zip_safe=False,
|
||||
data_files=[],
|
||||
)
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
import unittest
|
||||
from api_clients.utils import ApcUrl, UrlBadParam, IncompleteUrl
|
||||
|
||||
|
||||
class TestApcUrl(unittest.TestCase):
|
||||
def test_init(self):
|
||||
url = "/testing"
|
||||
|
@ -10,22 +11,23 @@ class TestApcUrl(unittest.TestCase):
|
|||
|
||||
def test_params_1(self):
|
||||
u = ApcUrl("/testing/{key}")
|
||||
self.assertEqual(u.params(key='val').url(), '/testing/val')
|
||||
self.assertEqual(u.params(key="val").url(), "/testing/val")
|
||||
|
||||
def test_params_2(self):
|
||||
u = ApcUrl('/testing/{key}/{api}/more_testing')
|
||||
full_url = u.params(key="AAA",api="BBB").url()
|
||||
self.assertEqual(full_url, '/testing/AAA/BBB/more_testing')
|
||||
u = ApcUrl("/testing/{key}/{api}/more_testing")
|
||||
full_url = u.params(key="AAA", api="BBB").url()
|
||||
self.assertEqual(full_url, "/testing/AAA/BBB/more_testing")
|
||||
|
||||
def test_params_ex(self):
|
||||
u = ApcUrl("/testing/{key}")
|
||||
with self.assertRaises(UrlBadParam):
|
||||
u.params(bad_key='testing')
|
||||
u.params(bad_key="testing")
|
||||
|
||||
def test_url(self):
|
||||
u = "one/two/three"
|
||||
self.assertEqual( ApcUrl(u).url(), u )
|
||||
self.assertEqual(ApcUrl(u).url(), u)
|
||||
|
||||
def test_url_ex(self):
|
||||
u = ApcUrl('/{one}/{two}/three').params(two='testing')
|
||||
with self.assertRaises(IncompleteUrl): u.url()
|
||||
u = ApcUrl("/{one}/{two}/three").params(two="testing")
|
||||
with self.assertRaises(IncompleteUrl):
|
||||
u.url()
|
||||
|
|
|
@ -4,39 +4,43 @@ import json
|
|||
from mock import MagicMock, patch
|
||||
from api_clients.utils import ApcUrl, ApiRequest
|
||||
|
||||
|
||||
class ResponseInfo:
|
||||
@property
|
||||
def headers(self):
|
||||
return {'content-type': 'application/json'}
|
||||
return {"content-type": "application/json"}
|
||||
|
||||
def json(self):
|
||||
return {'ok', 'ok'}
|
||||
return {"ok", "ok"}
|
||||
|
||||
|
||||
class TestApiRequest(unittest.TestCase):
|
||||
def test_init(self):
|
||||
u = ApiRequest('request_name', ApcUrl('/test/ing'))
|
||||
u = ApiRequest("request_name", ApcUrl("/test/ing"))
|
||||
self.assertEqual(u.name, "request_name")
|
||||
|
||||
def test_call_json(self):
|
||||
ret = {'ok':'ok'}
|
||||
ret = {"ok": "ok"}
|
||||
read = MagicMock()
|
||||
read.headers = {'content-type': 'application/json'}
|
||||
read.headers = {"content-type": "application/json"}
|
||||
read.json = MagicMock(return_value=ret)
|
||||
u = 'http://localhost/testing'
|
||||
with patch('requests.get') as mock_method:
|
||||
u = "http://localhost/testing"
|
||||
with patch("requests.get") as mock_method:
|
||||
mock_method.return_value = read
|
||||
request = ApiRequest('mm', ApcUrl(u))()
|
||||
request = ApiRequest("mm", ApcUrl(u))()
|
||||
self.assertEqual(request, ret)
|
||||
|
||||
def test_call_html(self):
|
||||
ret = '<html><head></head><body></body></html>'
|
||||
ret = "<html><head></head><body></body></html>"
|
||||
read = MagicMock()
|
||||
read.headers = {'content-type': 'application/html'}
|
||||
read.headers = {"content-type": "application/html"}
|
||||
read.text = MagicMock(return_value=ret)
|
||||
u = 'http://localhost/testing'
|
||||
with patch('requests.get') as mock_method:
|
||||
u = "http://localhost/testing"
|
||||
with patch("requests.get") as mock_method:
|
||||
mock_method.return_value = read
|
||||
request = ApiRequest('mm', ApcUrl(u))()
|
||||
request = ApiRequest("mm", ApcUrl(u))()
|
||||
self.assertEqual(request.text(), ret)
|
||||
|
||||
if __name__ == '__main__': unittest.main()
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -6,18 +6,19 @@ from configobj import ConfigObj
|
|||
from api_clients.version1 import api_config
|
||||
from api_clients.utils import RequestProvider
|
||||
|
||||
|
||||
class TestRequestProvider(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.cfg = api_config
|
||||
self.cfg['general'] = {}
|
||||
self.cfg['general']['base_dir'] = '/test'
|
||||
self.cfg['general']['base_port'] = 80
|
||||
self.cfg['general']['base_url'] = 'localhost'
|
||||
self.cfg['general']['api_key'] = 'TEST_KEY'
|
||||
self.cfg['api_base'] = 'api'
|
||||
self.cfg["general"] = {}
|
||||
self.cfg["general"]["base_dir"] = "/test"
|
||||
self.cfg["general"]["base_port"] = 80
|
||||
self.cfg["general"]["base_url"] = "localhost"
|
||||
self.cfg["general"]["api_key"] = "TEST_KEY"
|
||||
self.cfg["api_base"] = "api"
|
||||
|
||||
def test_test(self):
|
||||
self.assertTrue('general' in self.cfg)
|
||||
self.assertTrue("general" in self.cfg)
|
||||
|
||||
def test_init(self):
|
||||
rp = RequestProvider(self.cfg, {})
|
||||
|
@ -25,12 +26,14 @@ class TestRequestProvider(unittest.TestCase):
|
|||
|
||||
def test_contains(self):
|
||||
methods = {
|
||||
'upload_recorded': '/1/',
|
||||
'update_media_url': '/2/',
|
||||
'list_all_db_files': '/3/',
|
||||
"upload_recorded": "/1/",
|
||||
"update_media_url": "/2/",
|
||||
"list_all_db_files": "/3/",
|
||||
}
|
||||
rp = RequestProvider(self.cfg, methods)
|
||||
for meth in methods:
|
||||
self.assertTrue(meth in rp.requests)
|
||||
|
||||
if __name__ == '__main__': unittest.main()
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -4,13 +4,14 @@ import configparser
|
|||
import unittest
|
||||
from api_clients import utils
|
||||
|
||||
|
||||
def get_force_ssl(value, useConfigParser):
|
||||
config = {}
|
||||
if useConfigParser:
|
||||
config = configparser.ConfigParser()
|
||||
config['general'] = {
|
||||
'base_port': 80,
|
||||
'force_ssl': value,
|
||||
config["general"] = {
|
||||
"base_port": 80,
|
||||
"force_ssl": value,
|
||||
}
|
||||
return utils.get_protocol(config)
|
||||
|
||||
|
@ -27,65 +28,65 @@ class TestTime(unittest.TestCase):
|
|||
|
||||
class TestGetProtocol(unittest.TestCase):
|
||||
def test_dict_config_empty_http(self):
|
||||
config = {'general': {}}
|
||||
config = {"general": {}}
|
||||
protocol = utils.get_protocol(config)
|
||||
self.assertEqual(protocol, 'http')
|
||||
self.assertEqual(protocol, "http")
|
||||
|
||||
def test_dict_config_http(self):
|
||||
config = {
|
||||
'general': {
|
||||
'base_port': 80,
|
||||
"general": {
|
||||
"base_port": 80,
|
||||
},
|
||||
}
|
||||
protocol = utils.get_protocol(config)
|
||||
self.assertEqual(protocol, 'http')
|
||||
self.assertEqual(protocol, "http")
|
||||
|
||||
def test_dict_config_https(self):
|
||||
config = {
|
||||
'general': {
|
||||
'base_port': 443,
|
||||
"general": {
|
||||
"base_port": 443,
|
||||
},
|
||||
}
|
||||
protocol = utils.get_protocol(config)
|
||||
self.assertEqual(protocol, 'https')
|
||||
self.assertEqual(protocol, "https")
|
||||
|
||||
def test_dict_config_force_https(self):
|
||||
postive_values = ['yes', 'Yes', 'True', 'true', True]
|
||||
negative_values = ['no', 'No', 'False', 'false', False]
|
||||
postive_values = ["yes", "Yes", "True", "true", True]
|
||||
negative_values = ["no", "No", "False", "false", False]
|
||||
for value in postive_values:
|
||||
self.assertEqual(get_force_ssl(value, False), 'https')
|
||||
self.assertEqual(get_force_ssl(value, False), "https")
|
||||
for value in negative_values:
|
||||
self.assertEqual(get_force_ssl(value, False), 'http')
|
||||
self.assertEqual(get_force_ssl(value, False), "http")
|
||||
|
||||
def test_configparser_config_empty_http(self):
|
||||
config = configparser.ConfigParser()
|
||||
config['general'] = {}
|
||||
config["general"] = {}
|
||||
protocol = utils.get_protocol(config)
|
||||
self.assertEqual(protocol, 'http')
|
||||
self.assertEqual(protocol, "http")
|
||||
|
||||
def test_configparser_config_http(self):
|
||||
config = configparser.ConfigParser()
|
||||
config['general'] = {
|
||||
'base_port': 80,
|
||||
config["general"] = {
|
||||
"base_port": 80,
|
||||
}
|
||||
protocol = utils.get_protocol(config)
|
||||
self.assertEqual(protocol, 'http')
|
||||
self.assertEqual(protocol, "http")
|
||||
|
||||
def test_configparser_config_https(self):
|
||||
config = configparser.ConfigParser()
|
||||
config['general'] = {
|
||||
'base_port': 443,
|
||||
config["general"] = {
|
||||
"base_port": 443,
|
||||
}
|
||||
protocol = utils.get_protocol(config)
|
||||
self.assertEqual(protocol, 'https')
|
||||
self.assertEqual(protocol, "https")
|
||||
|
||||
def test_configparser_config_force_https(self):
|
||||
postive_values = ['yes', 'Yes', 'True', 'true', True]
|
||||
negative_values = ['no', 'No', 'False', 'false', False]
|
||||
postive_values = ["yes", "Yes", "True", "true", True]
|
||||
negative_values = ["no", "No", "False", "false", False]
|
||||
for value in postive_values:
|
||||
self.assertEqual(get_force_ssl(value, True), 'https')
|
||||
self.assertEqual(get_force_ssl(value, True), "https")
|
||||
for value in negative_values:
|
||||
self.assertEqual(get_force_ssl(value, True), 'http')
|
||||
self.assertEqual(get_force_ssl(value, True), "http")
|
||||
|
||||
def test_fromisoformat(self):
|
||||
time = {
|
||||
|
@ -96,4 +97,6 @@ class TestGetProtocol(unittest.TestCase):
|
|||
result = utils.fromisoformat(time_string)
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
if __name__ == '__main__': unittest.main()
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -9,14 +9,18 @@ if os.geteuid() != 0:
|
|||
print("Please run this as root.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_current_script_dir():
|
||||
current_script_dir = os.path.realpath(__file__)
|
||||
index = current_script_dir.rindex('/')
|
||||
return current_script_dir[0:index]
|
||||
current_script_dir = os.path.realpath(__file__)
|
||||
index = current_script_dir.rindex("/")
|
||||
return current_script_dir[0:index]
|
||||
|
||||
|
||||
try:
|
||||
current_script_dir = get_current_script_dir()
|
||||
shutil.copy(current_script_dir+"/../airtime-icecast-status.xsl", "/usr/share/icecast2/web")
|
||||
shutil.copy(
|
||||
current_script_dir + "/../airtime-icecast-status.xsl", "/usr/share/icecast2/web"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print("exception: {}".format(e))
|
||||
|
|
|
@ -2,5 +2,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import runpy
|
||||
|
||||
# Run the liquidsoap python module
|
||||
runpy.run_module('liquidsoap')
|
||||
# Run the liquidsoap python module
|
||||
runpy.run_module("liquidsoap")
|
||||
|
|
|
@ -3,4 +3,3 @@
|
|||
import runpy
|
||||
|
||||
runpy.run_module("pypo", run_name="__main__")
|
||||
|
||||
|
|
|
@ -27,27 +27,75 @@ import json
|
|||
from configobj import ConfigObj
|
||||
|
||||
# custom imports
|
||||
#from util import *
|
||||
# from util import *
|
||||
from api_clients import version1 as api_client
|
||||
|
||||
LOG_LEVEL = logging.INFO
|
||||
LOG_PATH = '/var/log/airtime/pypo/notify.log'
|
||||
LOG_PATH = "/var/log/airtime/pypo/notify.log"
|
||||
|
||||
# help screeen / info
|
||||
usage = "%prog [options]" + " - notification gateway"
|
||||
parser = OptionParser(usage=usage)
|
||||
|
||||
# Options
|
||||
parser.add_option("-d", "--data", help="Pass JSON data from Liquidsoap into this script.", metavar="data")
|
||||
parser.add_option("-m", "--media-id", help="ID of the file that is currently playing.", metavar="media_id")
|
||||
parser.add_option("-e", "--error", action="store", dest="error", type="string", help="Liquidsoap error msg.", metavar="error_msg")
|
||||
parser.add_option(
|
||||
"-d",
|
||||
"--data",
|
||||
help="Pass JSON data from Liquidsoap into this script.",
|
||||
metavar="data",
|
||||
)
|
||||
parser.add_option(
|
||||
"-m",
|
||||
"--media-id",
|
||||
help="ID of the file that is currently playing.",
|
||||
metavar="media_id",
|
||||
)
|
||||
parser.add_option(
|
||||
"-e",
|
||||
"--error",
|
||||
action="store",
|
||||
dest="error",
|
||||
type="string",
|
||||
help="Liquidsoap error msg.",
|
||||
metavar="error_msg",
|
||||
)
|
||||
parser.add_option("-s", "--stream-id", help="ID stream", metavar="stream_id")
|
||||
parser.add_option("-c", "--connect", help="Liquidsoap connected", action="store_true", metavar="connect")
|
||||
parser.add_option("-t", "--time", help="Liquidsoap boot up time", action="store", dest="time", metavar="time", type="string")
|
||||
parser.add_option("-x", "--source-name", help="source connection name", metavar="source_name")
|
||||
parser.add_option("-y", "--source-status", help="source connection status", metavar="source_status")
|
||||
parser.add_option("-w", "--webstream", help="JSON metadata associated with webstream", metavar="json_data")
|
||||
parser.add_option("-n", "--liquidsoap-started", help="notify liquidsoap started", metavar="json_data", action="store_true", default=False)
|
||||
parser.add_option(
|
||||
"-c",
|
||||
"--connect",
|
||||
help="Liquidsoap connected",
|
||||
action="store_true",
|
||||
metavar="connect",
|
||||
)
|
||||
parser.add_option(
|
||||
"-t",
|
||||
"--time",
|
||||
help="Liquidsoap boot up time",
|
||||
action="store",
|
||||
dest="time",
|
||||
metavar="time",
|
||||
type="string",
|
||||
)
|
||||
parser.add_option(
|
||||
"-x", "--source-name", help="source connection name", metavar="source_name"
|
||||
)
|
||||
parser.add_option(
|
||||
"-y", "--source-status", help="source connection status", metavar="source_status"
|
||||
)
|
||||
parser.add_option(
|
||||
"-w",
|
||||
"--webstream",
|
||||
help="JSON metadata associated with webstream",
|
||||
metavar="json_data",
|
||||
)
|
||||
parser.add_option(
|
||||
"-n",
|
||||
"--liquidsoap-started",
|
||||
help="notify liquidsoap started",
|
||||
metavar="json_data",
|
||||
action="store_true",
|
||||
default=False,
|
||||
)
|
||||
|
||||
|
||||
# parse options
|
||||
|
@ -55,12 +103,15 @@ parser.add_option("-n", "--liquidsoap-started", help="notify liquidsoap started"
|
|||
|
||||
# Set up logging
|
||||
logging.captureWarnings(True)
|
||||
logFormatter = logging.Formatter("%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s")
|
||||
logFormatter = logging.Formatter(
|
||||
"%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s"
|
||||
)
|
||||
rootLogger = logging.getLogger()
|
||||
rootLogger.setLevel(LOG_LEVEL)
|
||||
|
||||
fileHandler = logging.handlers.RotatingFileHandler(filename=LOG_PATH, maxBytes=1024*1024*30,
|
||||
backupCount=8)
|
||||
fileHandler = logging.handlers.RotatingFileHandler(
|
||||
filename=LOG_PATH, maxBytes=1024 * 1024 * 30, backupCount=8
|
||||
)
|
||||
fileHandler.setFormatter(logFormatter)
|
||||
rootLogger.addHandler(fileHandler)
|
||||
|
||||
|
@ -69,15 +120,15 @@ consoleHandler.setFormatter(logFormatter)
|
|||
rootLogger.addHandler(consoleHandler)
|
||||
logger = rootLogger
|
||||
|
||||
#need to wait for Python 2.7 for this..
|
||||
#logging.captureWarnings(True)
|
||||
# need to wait for Python 2.7 for this..
|
||||
# logging.captureWarnings(True)
|
||||
|
||||
# loading config file
|
||||
try:
|
||||
config = ConfigObj('/etc/airtime/airtime.conf')
|
||||
config = ConfigObj("/etc/airtime/airtime.conf")
|
||||
|
||||
except Exception as e:
|
||||
logger.error('Error loading config file: %s', e)
|
||||
logger.error("Error loading config file: %s", e)
|
||||
sys.exit()
|
||||
|
||||
|
||||
|
@ -90,39 +141,41 @@ class Notify:
|
|||
self.api_client.notify_liquidsoap_started()
|
||||
|
||||
def notify_media_start_playing(self, media_id):
|
||||
logger.debug('#################################################')
|
||||
logger.debug('# Calling server to update about what\'s playing #')
|
||||
logger.debug('#################################################')
|
||||
logger.debug("#################################################")
|
||||
logger.debug("# Calling server to update about what's playing #")
|
||||
logger.debug("#################################################")
|
||||
response = self.api_client.notify_media_item_start_playing(media_id)
|
||||
logger.debug("Response: " + json.dumps(response))
|
||||
|
||||
# @pram time: time that LS started
|
||||
def notify_liquidsoap_status(self, msg, stream_id, time):
|
||||
logger.info('#################################################')
|
||||
logger.info('# Calling server to update liquidsoap status #')
|
||||
logger.info('#################################################')
|
||||
logger.info('msg = ' + str(msg))
|
||||
logger.info("#################################################")
|
||||
logger.info("# Calling server to update liquidsoap status #")
|
||||
logger.info("#################################################")
|
||||
logger.info("msg = " + str(msg))
|
||||
response = self.api_client.notify_liquidsoap_status(msg, stream_id, time)
|
||||
logger.info("Response: " + json.dumps(response))
|
||||
|
||||
def notify_source_status(self, source_name, status):
|
||||
logger.debug('#################################################')
|
||||
logger.debug('# Calling server to update source status #')
|
||||
logger.debug('#################################################')
|
||||
logger.debug('msg = ' + str(source_name) + ' : ' + str(status))
|
||||
logger.debug("#################################################")
|
||||
logger.debug("# Calling server to update source status #")
|
||||
logger.debug("#################################################")
|
||||
logger.debug("msg = " + str(source_name) + " : " + str(status))
|
||||
response = self.api_client.notify_source_status(source_name, status)
|
||||
logger.debug("Response: " + json.dumps(response))
|
||||
|
||||
def notify_webstream_data(self, data, media_id):
|
||||
logger.debug('#################################################')
|
||||
logger.debug('# Calling server to update webstream data #')
|
||||
logger.debug('#################################################')
|
||||
logger.debug("#################################################")
|
||||
logger.debug("# Calling server to update webstream data #")
|
||||
logger.debug("#################################################")
|
||||
response = self.api_client.notify_webstream_data(data, media_id)
|
||||
logger.debug("Response: " + json.dumps(response))
|
||||
|
||||
def run_with_options(self, options):
|
||||
if options.error and options.stream_id:
|
||||
self.notify_liquidsoap_status(options.error, options.stream_id, options.time)
|
||||
self.notify_liquidsoap_status(
|
||||
options.error, options.stream_id, options.time
|
||||
)
|
||||
elif options.connect and options.stream_id:
|
||||
self.notify_liquidsoap_status("OK", options.stream_id, options.time)
|
||||
elif options.source_name and options.source_status:
|
||||
|
@ -134,15 +187,17 @@ class Notify:
|
|||
elif options.liquidsoap_started:
|
||||
self.notify_liquidsoap_started()
|
||||
else:
|
||||
logger.debug("Unrecognized option in options({}). Doing nothing".format(options))
|
||||
logger.debug(
|
||||
"Unrecognized option in options({}). Doing nothing".format(options)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
print()
|
||||
print('#########################################')
|
||||
print('# *** pypo *** #')
|
||||
print('# pypo notification gateway #')
|
||||
print('#########################################')
|
||||
print("#########################################")
|
||||
print("# *** pypo *** #")
|
||||
print("# pypo notification gateway #")
|
||||
print("#########################################")
|
||||
|
||||
# initialize
|
||||
try:
|
||||
|
@ -150,4 +205,3 @@ if __name__ == '__main__':
|
|||
n.run_with_options(options)
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
|
||||
|
|
|
@ -7,9 +7,10 @@ import time
|
|||
import traceback
|
||||
from api_clients.version1 import AirtimeApiClient
|
||||
|
||||
|
||||
def generate_liquidsoap_config(ss):
|
||||
data = ss['msg']
|
||||
fh = open('/etc/airtime/liquidsoap.cfg', 'w')
|
||||
data = ss["msg"]
|
||||
fh = open("/etc/airtime/liquidsoap.cfg", "w")
|
||||
fh.write("################################################\n")
|
||||
fh.write("# THIS FILE IS AUTO GENERATED. DO NOT CHANGE!! #\n")
|
||||
fh.write("################################################\n")
|
||||
|
@ -17,17 +18,17 @@ def generate_liquidsoap_config(ss):
|
|||
|
||||
for key, value in data.items():
|
||||
try:
|
||||
if not "port" in key and not "bitrate" in key: # Stupid hack
|
||||
if not "port" in key and not "bitrate" in key: # Stupid hack
|
||||
raise ValueError()
|
||||
str_buffer = "%s = %s\n" % (key, int(value))
|
||||
except ValueError:
|
||||
try: # Is it a boolean?
|
||||
if value=="true" or value=="false":
|
||||
try: # Is it a boolean?
|
||||
if value == "true" or value == "false":
|
||||
str_buffer = "%s = %s\n" % (key, value.lower())
|
||||
else:
|
||||
raise ValueError() # Just drop into the except below
|
||||
except: #Everything else is a string
|
||||
str_buffer = "%s = \"%s\"\n" % (key, value)
|
||||
raise ValueError() # Just drop into the except below
|
||||
except: # Everything else is a string
|
||||
str_buffer = '%s = "%s"\n' % (key, value)
|
||||
|
||||
fh.write(str_buffer)
|
||||
# ignore squashes unused variable errors from Liquidsoap
|
||||
|
@ -38,8 +39,9 @@ def generate_liquidsoap_config(ss):
|
|||
fh.write('auth_path = "%s/liquidsoap_auth.py"\n' % auth_path)
|
||||
fh.close()
|
||||
|
||||
|
||||
def run():
|
||||
logging.basicConfig(format='%(message)s')
|
||||
logging.basicConfig(format="%(message)s")
|
||||
attempts = 0
|
||||
max_attempts = 10
|
||||
successful = False
|
||||
|
|
|
@ -9,16 +9,16 @@ dj_type = sys.argv[1]
|
|||
username = sys.argv[2]
|
||||
password = sys.argv[3]
|
||||
|
||||
source_type = ''
|
||||
if dj_type == '--master':
|
||||
source_type = 'master'
|
||||
elif dj_type == '--dj':
|
||||
source_type = 'dj'
|
||||
source_type = ""
|
||||
if dj_type == "--master":
|
||||
source_type = "master"
|
||||
elif dj_type == "--dj":
|
||||
source_type = "dj"
|
||||
|
||||
response = api_clients.check_live_stream_auth(username, password, source_type)
|
||||
|
||||
if 'msg' in response and response['msg'] == True:
|
||||
print(response['msg'])
|
||||
if "msg" in response and response["msg"] == True:
|
||||
print(response["msg"])
|
||||
sys.exit(0)
|
||||
else:
|
||||
print(False)
|
||||
|
|
|
@ -4,17 +4,16 @@ import telnetlib
|
|||
import sys
|
||||
|
||||
try:
|
||||
config = ConfigObj('/etc/airtime/airtime.conf')
|
||||
LS_HOST = config['pypo']['ls_host']
|
||||
LS_PORT = config['pypo']['ls_port']
|
||||
config = ConfigObj("/etc/airtime/airtime.conf")
|
||||
LS_HOST = config["pypo"]["ls_host"]
|
||||
LS_PORT = config["pypo"]["ls_port"]
|
||||
|
||||
tn = telnetlib.Telnet(LS_HOST, LS_PORT)
|
||||
tn.write("master_harbor.stop\n")
|
||||
tn.write("live_dj_harbor.stop\n")
|
||||
tn.write('exit\n')
|
||||
tn.write("exit\n")
|
||||
tn.read_all()
|
||||
|
||||
except Exception as e:
|
||||
print("Error loading config file: {}".format(e))
|
||||
sys.exit()
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ from configobj import ConfigObj
|
|||
from datetime import datetime
|
||||
from optparse import OptionParser
|
||||
import importlib
|
||||
|
||||
try:
|
||||
from queue import Queue
|
||||
except ImportError: # Python 2.7.5 (CentOS 7)
|
||||
|
|
|
@ -10,9 +10,10 @@ import time
|
|||
|
||||
from api_clients import version1 as api_client
|
||||
|
||||
|
||||
class ListenerStat(Thread):
|
||||
|
||||
HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout
|
||||
HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout
|
||||
|
||||
def __init__(self, config, logger=None):
|
||||
Thread.__init__(self)
|
||||
|
@ -28,50 +29,49 @@ class ListenerStat(Thread):
|
|||
for node in nodelist:
|
||||
if node.nodeType == node.TEXT_NODE:
|
||||
rc.append(node.data)
|
||||
return ''.join(rc)
|
||||
return "".join(rc)
|
||||
|
||||
def get_stream_parameters(self):
|
||||
#[{"user":"", "password":"", "url":"", "port":""},{},{}]
|
||||
# [{"user":"", "password":"", "url":"", "port":""},{},{}]
|
||||
return self.api_client.get_stream_parameters()
|
||||
|
||||
|
||||
def get_stream_server_xml(self, ip, url, is_shoutcast=False):
|
||||
auth_string = "%(admin_user)s:%(admin_pass)s" % ip
|
||||
encoded = base64.b64encode(auth_string.encode('utf-8'))
|
||||
encoded = base64.b64encode(auth_string.encode("utf-8"))
|
||||
|
||||
header = {"Authorization":"Basic %s" % encoded.decode('ascii')}
|
||||
header = {"Authorization": "Basic %s" % encoded.decode("ascii")}
|
||||
|
||||
if is_shoutcast:
|
||||
#user agent is required for shoutcast auth, otherwise it returns 404.
|
||||
# user agent is required for shoutcast auth, otherwise it returns 404.
|
||||
user_agent = "Mozilla/5.0 (Linux; rv:22.0) Gecko/20130405 Firefox/22.0"
|
||||
header["User-Agent"] = user_agent
|
||||
|
||||
req = urllib.request.Request(
|
||||
#assuming that the icecast stats path is /admin/stats.xml
|
||||
#need to fix this
|
||||
# assuming that the icecast stats path is /admin/stats.xml
|
||||
# need to fix this
|
||||
url=url,
|
||||
headers=header)
|
||||
headers=header,
|
||||
)
|
||||
|
||||
f = urllib.request.urlopen(req, timeout=ListenerStat.HTTP_REQUEST_TIMEOUT)
|
||||
document = f.read()
|
||||
|
||||
return document
|
||||
|
||||
|
||||
def get_icecast_stats(self, ip):
|
||||
document = None
|
||||
if "airtime.pro" in ip["host"].lower():
|
||||
url = 'http://%(host)s:%(port)s/stats.xsl' % ip
|
||||
url = "http://%(host)s:%(port)s/stats.xsl" % ip
|
||||
document = self.get_stream_server_xml(ip, url)
|
||||
else:
|
||||
url = 'http://%(host)s:%(port)s/admin/stats.xml' % ip
|
||||
url = "http://%(host)s:%(port)s/admin/stats.xml" % ip
|
||||
document = self.get_stream_server_xml(ip, url)
|
||||
dom = defusedxml.minidom.parseString(document)
|
||||
sources = dom.getElementsByTagName("source")
|
||||
|
||||
mount_stats = None
|
||||
for s in sources:
|
||||
#drop the leading '/' character
|
||||
# drop the leading '/' character
|
||||
mount_name = s.getAttribute("mount")[1:]
|
||||
if mount_name == ip["mount"]:
|
||||
timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
@ -80,14 +80,16 @@ class ListenerStat(Thread):
|
|||
if len(listeners):
|
||||
num_listeners = self.get_node_text(listeners[0].childNodes)
|
||||
|
||||
mount_stats = {"timestamp":timestamp, \
|
||||
"num_listeners": num_listeners, \
|
||||
"mount_name": mount_name}
|
||||
mount_stats = {
|
||||
"timestamp": timestamp,
|
||||
"num_listeners": num_listeners,
|
||||
"mount_name": mount_name,
|
||||
}
|
||||
|
||||
return mount_stats
|
||||
|
||||
def get_shoutcast_stats(self, ip):
|
||||
url = 'http://%(host)s:%(port)s/admin.cgi?sid=1&mode=viewxml' % ip
|
||||
url = "http://%(host)s:%(port)s/admin.cgi?sid=1&mode=viewxml" % ip
|
||||
document = self.get_stream_server_xml(ip, url, is_shoutcast=True)
|
||||
dom = defusedxml.minidom.parseString(document)
|
||||
current_listeners = dom.getElementsByTagName("CURRENTLISTENERS")
|
||||
|
@ -97,34 +99,37 @@ class ListenerStat(Thread):
|
|||
if len(current_listeners):
|
||||
num_listeners = self.get_node_text(current_listeners[0].childNodes)
|
||||
|
||||
mount_stats = {"timestamp":timestamp, \
|
||||
"num_listeners": num_listeners, \
|
||||
"mount_name": "shoutcast"}
|
||||
mount_stats = {
|
||||
"timestamp": timestamp,
|
||||
"num_listeners": num_listeners,
|
||||
"mount_name": "shoutcast",
|
||||
}
|
||||
|
||||
return mount_stats
|
||||
|
||||
def get_stream_stats(self, stream_parameters):
|
||||
stats = []
|
||||
|
||||
#iterate over stream_parameters which is a list of dicts. Each dict
|
||||
#represents one Airtime stream (currently this limit is 3).
|
||||
#Note that there can be optimizations done, since if all three
|
||||
#streams are the same server, we will still initiate 3 separate
|
||||
#connections
|
||||
# iterate over stream_parameters which is a list of dicts. Each dict
|
||||
# represents one Airtime stream (currently this limit is 3).
|
||||
# Note that there can be optimizations done, since if all three
|
||||
# streams are the same server, we will still initiate 3 separate
|
||||
# connections
|
||||
for k, v in stream_parameters.items():
|
||||
if v["enable"] == 'true':
|
||||
if v["enable"] == "true":
|
||||
try:
|
||||
if v["output"] == "icecast":
|
||||
mount_stats = self.get_icecast_stats(v)
|
||||
if mount_stats: stats.append(mount_stats)
|
||||
if mount_stats:
|
||||
stats.append(mount_stats)
|
||||
else:
|
||||
stats.append(self.get_shoutcast_stats(v))
|
||||
self.update_listener_stat_error(v["mount"], 'OK')
|
||||
self.update_listener_stat_error(v["mount"], "OK")
|
||||
except Exception as e:
|
||||
try:
|
||||
self.update_listener_stat_error(v["mount"], str(e))
|
||||
except Exception as e:
|
||||
self.logger.error('Exception: %s', e)
|
||||
self.logger.error("Exception: %s", e)
|
||||
|
||||
return stats
|
||||
|
||||
|
@ -132,15 +137,15 @@ class ListenerStat(Thread):
|
|||
self.api_client.push_stream_stats(stats)
|
||||
|
||||
def update_listener_stat_error(self, stream_id, error):
|
||||
keyname = '%s_listener_stat_error' % stream_id
|
||||
keyname = "%s_listener_stat_error" % stream_id
|
||||
data = {keyname: error}
|
||||
self.api_client.update_stream_setting_table(data)
|
||||
|
||||
def run(self):
|
||||
#Wake up every 120 seconds and gather icecast statistics. Note that we
|
||||
#are currently querying the server every 2 minutes for list of
|
||||
#mountpoints as well. We could remove this query if we hooked into
|
||||
#rabbitmq events, and listened for these changes instead.
|
||||
# Wake up every 120 seconds and gather icecast statistics. Note that we
|
||||
# are currently querying the server every 2 minutes for list of
|
||||
# mountpoints as well. We could remove this query if we hooked into
|
||||
# rabbitmq events, and listened for these changes instead.
|
||||
while True:
|
||||
try:
|
||||
stream_parameters = self.get_stream_parameters()
|
||||
|
@ -149,25 +154,27 @@ class ListenerStat(Thread):
|
|||
if stats:
|
||||
self.push_stream_stats(stats)
|
||||
except Exception as e:
|
||||
self.logger.error('Exception: %s', e)
|
||||
self.logger.error("Exception: %s", e)
|
||||
|
||||
time.sleep(120)
|
||||
self.logger.info('ListenerStat thread exiting')
|
||||
self.logger.info("ListenerStat thread exiting")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# create logger
|
||||
logger = logging.getLogger('std_out')
|
||||
logger = logging.getLogger("std_out")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
# create console handler and set level to debug
|
||||
#ch = logging.StreamHandler()
|
||||
#ch.setLevel(logging.DEBUG)
|
||||
# ch = logging.StreamHandler()
|
||||
# ch.setLevel(logging.DEBUG)
|
||||
# create formatter
|
||||
formatter = logging.Formatter('%(asctime)s - %(name)s - %(lineno)s - %(levelname)s - %(message)s')
|
||||
formatter = logging.Formatter(
|
||||
"%(asctime)s - %(name)s - %(lineno)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
# add formatter to ch
|
||||
#ch.setFormatter(formatter)
|
||||
# ch.setFormatter(formatter)
|
||||
# add ch to logger
|
||||
#logger.addHandler(ch)
|
||||
# logger.addHandler(ch)
|
||||
|
||||
#ls = ListenerStat(logger=logger)
|
||||
#ls.run()
|
||||
# ls = ListenerStat(logger=logger)
|
||||
# ls.run()
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
import re
|
||||
from packaging.version import Version, parse
|
||||
|
||||
|
||||
def version_cmp(version1, version2):
|
||||
version1 = parse(version1)
|
||||
version2 = parse(version2)
|
||||
|
@ -11,12 +12,14 @@ def version_cmp(version1, version2):
|
|||
return 0
|
||||
return -1
|
||||
|
||||
|
||||
def date_interval_to_seconds(interval):
|
||||
"""
|
||||
Convert timedelta object into int representing the number of seconds. If
|
||||
number of seconds is less than 0, then return 0.
|
||||
"""
|
||||
seconds = (interval.microseconds + \
|
||||
(interval.seconds + interval.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
|
||||
seconds = (
|
||||
interval.microseconds + (interval.seconds + interval.days * 24 * 3600) * 10 ** 6
|
||||
) / float(10 ** 6)
|
||||
|
||||
return seconds
|
||||
|
|
|
@ -23,20 +23,24 @@ from .timeout import ls_timeout
|
|||
|
||||
def keyboardInterruptHandler(signum, frame):
|
||||
logger = logging.getLogger()
|
||||
logger.info('\nKeyboard Interrupt\n')
|
||||
logger.info("\nKeyboard Interrupt\n")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
signal.signal(signal.SIGINT, keyboardInterruptHandler)
|
||||
|
||||
logging.captureWarnings(True)
|
||||
|
||||
POLL_INTERVAL = 400
|
||||
|
||||
class PypoFetch(Thread):
|
||||
|
||||
def __init__(self, pypoFetch_q, pypoPush_q, media_q, telnet_lock, pypo_liquidsoap, config):
|
||||
class PypoFetch(Thread):
|
||||
def __init__(
|
||||
self, pypoFetch_q, pypoPush_q, media_q, telnet_lock, pypo_liquidsoap, config
|
||||
):
|
||||
Thread.__init__(self)
|
||||
|
||||
#Hacky...
|
||||
# Hacky...
|
||||
PypoFetch.ref = self
|
||||
|
||||
self.v1_api_client = v1_api_client.AirtimeApiClient()
|
||||
|
@ -76,6 +80,7 @@ class PypoFetch(Thread):
|
|||
Handle a message from RabbitMQ, put it into our yucky global var.
|
||||
Hopefully there is a better way to do this.
|
||||
"""
|
||||
|
||||
def handle_message(self, message):
|
||||
try:
|
||||
self.logger.info("Received event from Pypo Message Handler: %s" % message)
|
||||
|
@ -85,50 +90,52 @@ class PypoFetch(Thread):
|
|||
except (UnicodeDecodeError, AttributeError):
|
||||
pass
|
||||
m = json.loads(message)
|
||||
command = m['event_type']
|
||||
command = m["event_type"]
|
||||
self.logger.info("Handling command: " + command)
|
||||
|
||||
if command == 'update_schedule':
|
||||
self.schedule_data = m['schedule']
|
||||
if command == "update_schedule":
|
||||
self.schedule_data = m["schedule"]
|
||||
self.process_schedule(self.schedule_data)
|
||||
elif command == 'reset_liquidsoap_bootstrap':
|
||||
elif command == "reset_liquidsoap_bootstrap":
|
||||
self.set_bootstrap_variables()
|
||||
elif command == 'update_stream_setting':
|
||||
elif command == "update_stream_setting":
|
||||
self.logger.info("Updating stream setting...")
|
||||
self.regenerate_liquidsoap_conf(m['setting'])
|
||||
elif command == 'update_stream_format':
|
||||
self.regenerate_liquidsoap_conf(m["setting"])
|
||||
elif command == "update_stream_format":
|
||||
self.logger.info("Updating stream format...")
|
||||
self.update_liquidsoap_stream_format(m['stream_format'])
|
||||
elif command == 'update_station_name':
|
||||
self.update_liquidsoap_stream_format(m["stream_format"])
|
||||
elif command == "update_station_name":
|
||||
self.logger.info("Updating station name...")
|
||||
self.update_liquidsoap_station_name(m['station_name'])
|
||||
elif command == 'update_transition_fade':
|
||||
self.update_liquidsoap_station_name(m["station_name"])
|
||||
elif command == "update_transition_fade":
|
||||
self.logger.info("Updating transition_fade...")
|
||||
self.update_liquidsoap_transition_fade(m['transition_fade'])
|
||||
elif command == 'switch_source':
|
||||
self.update_liquidsoap_transition_fade(m["transition_fade"])
|
||||
elif command == "switch_source":
|
||||
self.logger.info("switch_on_source show command received...")
|
||||
self.pypo_liquidsoap.\
|
||||
get_telnet_dispatcher().\
|
||||
switch_source(m['sourcename'], m['status'])
|
||||
elif command == 'disconnect_source':
|
||||
self.pypo_liquidsoap.get_telnet_dispatcher().switch_source(
|
||||
m["sourcename"], m["status"]
|
||||
)
|
||||
elif command == "disconnect_source":
|
||||
self.logger.info("disconnect_on_source show command received...")
|
||||
self.pypo_liquidsoap.get_telnet_dispatcher().\
|
||||
disconnect_source(m['sourcename'])
|
||||
self.pypo_liquidsoap.get_telnet_dispatcher().disconnect_source(
|
||||
m["sourcename"]
|
||||
)
|
||||
else:
|
||||
self.logger.info("Unknown command: %s" % command)
|
||||
|
||||
# update timeout value
|
||||
if command == 'update_schedule':
|
||||
if command == "update_schedule":
|
||||
self.listener_timeout = POLL_INTERVAL
|
||||
else:
|
||||
self.listener_timeout = self.last_update_schedule_timestamp - time.time() + POLL_INTERVAL
|
||||
self.listener_timeout = (
|
||||
self.last_update_schedule_timestamp - time.time() + POLL_INTERVAL
|
||||
)
|
||||
if self.listener_timeout < 0:
|
||||
self.listener_timeout = 0
|
||||
self.logger.info("New timeout: %s" % self.listener_timeout)
|
||||
except Exception as e:
|
||||
self.logger.exception("Exception in handling Message Handler message")
|
||||
|
||||
|
||||
def switch_source_temp(self, sourcename, status):
|
||||
self.logger.debug('Switching source: %s to "%s" status', sourcename, status)
|
||||
command = "streams."
|
||||
|
@ -149,25 +156,28 @@ class PypoFetch(Thread):
|
|||
"""
|
||||
Initialize Liquidsoap environment
|
||||
"""
|
||||
|
||||
def set_bootstrap_variables(self):
|
||||
self.logger.debug('Getting information needed on bootstrap from Airtime')
|
||||
self.logger.debug("Getting information needed on bootstrap from Airtime")
|
||||
try:
|
||||
info = self.v1_api_client.get_bootstrap_info()
|
||||
except Exception as e:
|
||||
self.logger.exception('Unable to get bootstrap info.. Exiting pypo...')
|
||||
self.logger.exception("Unable to get bootstrap info.. Exiting pypo...")
|
||||
|
||||
self.logger.debug('info:%s', info)
|
||||
self.logger.debug("info:%s", info)
|
||||
commands = []
|
||||
for k, v in info['switch_status'].items():
|
||||
for k, v in info["switch_status"].items():
|
||||
commands.append(self.switch_source_temp(k, v))
|
||||
|
||||
stream_format = info['stream_label']
|
||||
station_name = info['station_name']
|
||||
fade = info['transition_fade']
|
||||
stream_format = info["stream_label"]
|
||||
station_name = info["station_name"]
|
||||
fade = info["transition_fade"]
|
||||
|
||||
commands.append(('vars.stream_metadata_type %s\n' % stream_format).encode('utf-8'))
|
||||
commands.append(('vars.station_name %s\n' % station_name).encode('utf-8'))
|
||||
commands.append(('vars.default_dj_fade %s\n' % fade).encode('utf-8'))
|
||||
commands.append(
|
||||
("vars.stream_metadata_type %s\n" % stream_format).encode("utf-8")
|
||||
)
|
||||
commands.append(("vars.station_name %s\n" % station_name).encode("utf-8"))
|
||||
commands.append(("vars.default_dj_fade %s\n" % fade).encode("utf-8"))
|
||||
self.pypo_liquidsoap.get_telnet_dispatcher().telnet_send(commands)
|
||||
|
||||
self.pypo_liquidsoap.clear_all_queues()
|
||||
|
@ -182,21 +192,24 @@ class PypoFetch(Thread):
|
|||
will be thrown."""
|
||||
self.telnet_lock.acquire(False)
|
||||
|
||||
|
||||
self.logger.info("Restarting Liquidsoap")
|
||||
subprocess.call('kill -9 `pidof airtime-liquidsoap`', shell=True, close_fds=True)
|
||||
subprocess.call(
|
||||
"kill -9 `pidof airtime-liquidsoap`", shell=True, close_fds=True
|
||||
)
|
||||
|
||||
#Wait here and poll Liquidsoap until it has started up
|
||||
# Wait here and poll Liquidsoap until it has started up
|
||||
self.logger.info("Waiting for Liquidsoap to start")
|
||||
while True:
|
||||
try:
|
||||
tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port'])
|
||||
tn.write('exit\n'.encode('utf-8'))
|
||||
tn = telnetlib.Telnet(
|
||||
self.config["ls_host"], self.config["ls_port"]
|
||||
)
|
||||
tn.write("exit\n".encode("utf-8"))
|
||||
tn.read_all()
|
||||
self.logger.info("Liquidsoap is up and running")
|
||||
break
|
||||
except Exception as e:
|
||||
#sleep 0.5 seconds and try again
|
||||
# sleep 0.5 seconds and try again
|
||||
time.sleep(0.5)
|
||||
|
||||
except Exception as e:
|
||||
|
@ -208,11 +221,11 @@ class PypoFetch(Thread):
|
|||
"""
|
||||
NOTE: This function is quite short after it was refactored.
|
||||
"""
|
||||
|
||||
def regenerate_liquidsoap_conf(self, setting):
|
||||
self.restart_liquidsoap()
|
||||
self.update_liquidsoap_connection_status()
|
||||
|
||||
|
||||
@ls_timeout
|
||||
def update_liquidsoap_connection_status(self):
|
||||
"""
|
||||
|
@ -222,20 +235,22 @@ class PypoFetch(Thread):
|
|||
|
||||
try:
|
||||
self.telnet_lock.acquire()
|
||||
tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port'])
|
||||
tn = telnetlib.Telnet(self.config["ls_host"], self.config["ls_port"])
|
||||
# update the boot up time of Liquidsoap. Since Liquidsoap is not restarting,
|
||||
# we are manually adjusting the bootup time variable so the status msg will get
|
||||
# updated.
|
||||
current_time = time.time()
|
||||
boot_up_time_command = ("vars.bootup_time " + str(current_time) + "\n").encode('utf-8')
|
||||
boot_up_time_command = (
|
||||
"vars.bootup_time " + str(current_time) + "\n"
|
||||
).encode("utf-8")
|
||||
self.logger.info(boot_up_time_command)
|
||||
tn.write(boot_up_time_command)
|
||||
|
||||
connection_status = ("streams.connection_status\n").encode('utf-8')
|
||||
connection_status = ("streams.connection_status\n").encode("utf-8")
|
||||
self.logger.info(connection_status)
|
||||
tn.write(connection_status)
|
||||
|
||||
tn.write('exit\n'.encode('utf-8'))
|
||||
tn.write("exit\n".encode("utf-8"))
|
||||
|
||||
output = tn.read_all()
|
||||
except Exception as e:
|
||||
|
@ -253,12 +268,13 @@ class PypoFetch(Thread):
|
|||
|
||||
fake_time = current_time + 1
|
||||
for s in streams:
|
||||
info = s.split(':')
|
||||
info = s.split(":")
|
||||
stream_id = info[0]
|
||||
status = info[1]
|
||||
if(status == "true"):
|
||||
self.v1_api_client.notify_liquidsoap_status("OK", stream_id, str(fake_time))
|
||||
|
||||
if status == "true":
|
||||
self.v1_api_client.notify_liquidsoap_status(
|
||||
"OK", stream_id, str(fake_time)
|
||||
)
|
||||
|
||||
@ls_timeout
|
||||
def update_liquidsoap_stream_format(self, stream_format):
|
||||
|
@ -266,11 +282,11 @@ class PypoFetch(Thread):
|
|||
# TODO: THIS LIQUIDSOAP STUFF NEEDS TO BE MOVED TO PYPO-PUSH!!!
|
||||
try:
|
||||
self.telnet_lock.acquire()
|
||||
tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port'])
|
||||
command = ('vars.stream_metadata_type %s\n' % stream_format).encode('utf-8')
|
||||
tn = telnetlib.Telnet(self.config["ls_host"], self.config["ls_port"])
|
||||
command = ("vars.stream_metadata_type %s\n" % stream_format).encode("utf-8")
|
||||
self.logger.info(command)
|
||||
tn.write(command)
|
||||
tn.write('exit\n'.encode('utf-8'))
|
||||
tn.write("exit\n".encode("utf-8"))
|
||||
tn.read_all()
|
||||
except Exception as e:
|
||||
self.logger.exception(e)
|
||||
|
@ -283,11 +299,11 @@ class PypoFetch(Thread):
|
|||
# TODO: THIS LIQUIDSOAP STUFF NEEDS TO BE MOVED TO PYPO-PUSH!!!
|
||||
try:
|
||||
self.telnet_lock.acquire()
|
||||
tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port'])
|
||||
command = ('vars.default_dj_fade %s\n' % fade).encode('utf-8')
|
||||
tn = telnetlib.Telnet(self.config["ls_host"], self.config["ls_port"])
|
||||
command = ("vars.default_dj_fade %s\n" % fade).encode("utf-8")
|
||||
self.logger.info(command)
|
||||
tn.write(command)
|
||||
tn.write('exit\n'.encode('utf-8'))
|
||||
tn.write("exit\n".encode("utf-8"))
|
||||
tn.read_all()
|
||||
except Exception as e:
|
||||
self.logger.exception(e)
|
||||
|
@ -301,11 +317,11 @@ class PypoFetch(Thread):
|
|||
try:
|
||||
try:
|
||||
self.telnet_lock.acquire()
|
||||
tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port'])
|
||||
command = ('vars.station_name %s\n' % station_name).encode('utf-8')
|
||||
tn = telnetlib.Telnet(self.config["ls_host"], self.config["ls_port"])
|
||||
command = ("vars.station_name %s\n" % station_name).encode("utf-8")
|
||||
self.logger.info(command)
|
||||
tn.write(command)
|
||||
tn.write('exit\n'.encode('utf-8'))
|
||||
tn.write("exit\n".encode("utf-8"))
|
||||
tn.read_all()
|
||||
except Exception as e:
|
||||
self.logger.exception(e)
|
||||
|
@ -322,6 +338,7 @@ class PypoFetch(Thread):
|
|||
to the cache dir (Folder-structure: cache/YYYY-MM-DD-hh-mm-ss)
|
||||
- runs the cleanup routine, to get rid of unused cached files
|
||||
"""
|
||||
|
||||
def process_schedule(self, schedule_data):
|
||||
self.last_update_schedule_timestamp = time.time()
|
||||
self.logger.debug(schedule_data)
|
||||
|
@ -343,20 +360,21 @@ class PypoFetch(Thread):
|
|||
media_copy = {}
|
||||
for key in media:
|
||||
media_item = media[key]
|
||||
if (media_item['type'] == 'file'):
|
||||
if media_item["type"] == "file":
|
||||
fileExt = self.sanity_check_media_item(media_item)
|
||||
dst = os.path.join(download_dir, f'{media_item["id"]}{fileExt}')
|
||||
media_item['dst'] = dst
|
||||
media_item['file_ready'] = False
|
||||
media_item["dst"] = dst
|
||||
media_item["file_ready"] = False
|
||||
media_filtered[key] = media_item
|
||||
|
||||
media_item['start'] = datetime.strptime(media_item['start'],
|
||||
"%Y-%m-%d-%H-%M-%S")
|
||||
media_item['end'] = datetime.strptime(media_item['end'],
|
||||
"%Y-%m-%d-%H-%M-%S")
|
||||
media_item["start"] = datetime.strptime(
|
||||
media_item["start"], "%Y-%m-%d-%H-%M-%S"
|
||||
)
|
||||
media_item["end"] = datetime.strptime(
|
||||
media_item["end"], "%Y-%m-%d-%H-%M-%S"
|
||||
)
|
||||
media_copy[key] = media_item
|
||||
|
||||
|
||||
self.media_prepare_queue.put(copy.copy(media_filtered))
|
||||
except Exception as e:
|
||||
self.logger.exception(e)
|
||||
|
@ -365,37 +383,36 @@ class PypoFetch(Thread):
|
|||
self.logger.debug("Pushing to pypo-push")
|
||||
self.push_queue.put(media_copy)
|
||||
|
||||
|
||||
# cleanup
|
||||
try:
|
||||
self.cache_cleanup(media)
|
||||
except Exception as e:
|
||||
self.logger.exception(e)
|
||||
|
||||
#do basic validation of file parameters. Useful for debugging
|
||||
#purposes
|
||||
# do basic validation of file parameters. Useful for debugging
|
||||
# purposes
|
||||
def sanity_check_media_item(self, media_item):
|
||||
start = datetime.strptime(media_item['start'], "%Y-%m-%d-%H-%M-%S")
|
||||
end = datetime.strptime(media_item['end'], "%Y-%m-%d-%H-%M-%S")
|
||||
start = datetime.strptime(media_item["start"], "%Y-%m-%d-%H-%M-%S")
|
||||
end = datetime.strptime(media_item["end"], "%Y-%m-%d-%H-%M-%S")
|
||||
|
||||
mime = media_item['metadata']['mime']
|
||||
mime = media_item["metadata"]["mime"]
|
||||
mimetypes.init(["%s/mime.types" % os.path.dirname(os.path.realpath(__file__))])
|
||||
mime_ext = mimetypes.guess_extension(mime, strict=False)
|
||||
|
||||
length1 = pure.date_interval_to_seconds(end - start)
|
||||
length2 = media_item['cue_out'] - media_item['cue_in']
|
||||
length2 = media_item["cue_out"] - media_item["cue_in"]
|
||||
|
||||
if abs(length2 - length1) > 1:
|
||||
self.logger.error("end - start length: %s", length1)
|
||||
self.logger.error("cue_out - cue_in length: %s", length2)
|
||||
self.logger.error("Two lengths are not equal!!!")
|
||||
|
||||
media_item['file_ext'] = mime_ext
|
||||
media_item["file_ext"] = mime_ext
|
||||
|
||||
return mime_ext
|
||||
|
||||
def is_file_opened(self, path):
|
||||
#Capture stderr to avoid polluting py-interpreter.log
|
||||
# Capture stderr to avoid polluting py-interpreter.log
|
||||
proc = Popen(["lsof", path], stdout=PIPE, stderr=PIPE)
|
||||
out = proc.communicate()[0].strip()
|
||||
return bool(out)
|
||||
|
@ -411,10 +428,14 @@ class PypoFetch(Thread):
|
|||
|
||||
for mkey in media:
|
||||
media_item = media[mkey]
|
||||
if media_item['type'] == 'file':
|
||||
if media_item["type"] == "file":
|
||||
if "file_ext" not in media_item.keys():
|
||||
media_item["file_ext"] = mimetypes.guess_extension(media_item['metadata']['mime'], strict=False)
|
||||
scheduled_file_set.add("{}{}".format(media_item["id"], media_item["file_ext"]))
|
||||
media_item["file_ext"] = mimetypes.guess_extension(
|
||||
media_item["metadata"]["mime"], strict=False
|
||||
)
|
||||
scheduled_file_set.add(
|
||||
"{}{}".format(media_item["id"], media_item["file_ext"])
|
||||
)
|
||||
|
||||
expired_files = cached_file_set - scheduled_file_set
|
||||
|
||||
|
@ -424,9 +445,9 @@ class PypoFetch(Thread):
|
|||
path = os.path.join(self.cache_dir, f)
|
||||
self.logger.debug("Removing %s" % path)
|
||||
|
||||
#check if this file is opened (sometimes Liquidsoap is still
|
||||
#playing the file due to our knowledge of the track length
|
||||
#being incorrect!)
|
||||
# check if this file is opened (sometimes Liquidsoap is still
|
||||
# playing the file due to our knowledge of the track length
|
||||
# being incorrect!)
|
||||
if not self.is_file_opened(path):
|
||||
os.remove(path)
|
||||
self.logger.info("File '%s' removed" % path)
|
||||
|
@ -441,7 +462,7 @@ class PypoFetch(Thread):
|
|||
self.process_schedule(self.schedule_data)
|
||||
return True
|
||||
except Exception as e:
|
||||
self.logger.error('Unable to fetch schedule')
|
||||
self.logger.error("Unable to fetch schedule")
|
||||
self.logger.exception(e)
|
||||
return False
|
||||
|
||||
|
@ -462,11 +483,11 @@ class PypoFetch(Thread):
|
|||
Timer(120, self.update_metadata_on_tunein).start()
|
||||
|
||||
def main(self):
|
||||
#Make sure all Liquidsoap queues are empty. This is important in the
|
||||
#case where we've just restarted the pypo scheduler, but Liquidsoap still
|
||||
#is playing tracks. In this case let's just restart everything from scratch
|
||||
#so that we can repopulate our dictionary that keeps track of what
|
||||
#Liquidsoap is playing much more easily.
|
||||
# Make sure all Liquidsoap queues are empty. This is important in the
|
||||
# case where we've just restarted the pypo scheduler, but Liquidsoap still
|
||||
# is playing tracks. In this case let's just restart everything from scratch
|
||||
# so that we can repopulate our dictionary that keeps track of what
|
||||
# Liquidsoap is playing much more easily.
|
||||
self.pypo_liquidsoap.clear_all_queues()
|
||||
|
||||
self.set_bootstrap_variables()
|
||||
|
@ -500,7 +521,9 @@ class PypoFetch(Thread):
|
|||
Currently we are checking every POLL_INTERVAL seconds
|
||||
"""
|
||||
|
||||
message = self.fetch_queue.get(block=True, timeout=self.listener_timeout)
|
||||
message = self.fetch_queue.get(
|
||||
block=True, timeout=self.listener_timeout
|
||||
)
|
||||
manual_fetch_needed = False
|
||||
self.handle_message(message)
|
||||
except Empty as e:
|
||||
|
@ -513,7 +536,7 @@ class PypoFetch(Thread):
|
|||
if manual_fetch_needed:
|
||||
self.persistent_manual_schedule_fetch(max_attempts=5)
|
||||
except Exception as e:
|
||||
self.logger.exception('Failed to manually fetch the schedule.')
|
||||
self.logger.exception("Failed to manually fetch the schedule.")
|
||||
|
||||
loops += 1
|
||||
|
||||
|
@ -522,4 +545,4 @@ class PypoFetch(Thread):
|
|||
Entry point of the thread
|
||||
"""
|
||||
self.main()
|
||||
self.logger.info('PypoFetch thread exiting')
|
||||
self.logger.info("PypoFetch thread exiting")
|
||||
|
|
|
@ -18,13 +18,12 @@ import hashlib
|
|||
from requests.exceptions import ConnectionError, HTTPError, Timeout
|
||||
from api_clients import version2 as api_client
|
||||
|
||||
CONFIG_PATH = '/etc/airtime/airtime.conf'
|
||||
CONFIG_PATH = "/etc/airtime/airtime.conf"
|
||||
|
||||
logging.captureWarnings(True)
|
||||
|
||||
|
||||
class PypoFile(Thread):
|
||||
|
||||
def __init__(self, schedule_queue, config):
|
||||
Thread.__init__(self)
|
||||
self.logger = logging.getLogger()
|
||||
|
@ -38,10 +37,10 @@ class PypoFile(Thread):
|
|||
"""
|
||||
Copy media_item from local library directory to local cache directory.
|
||||
"""
|
||||
src = media_item['uri']
|
||||
dst = media_item['dst']
|
||||
src = media_item["uri"]
|
||||
dst = media_item["dst"]
|
||||
|
||||
src_size = media_item['filesize']
|
||||
src_size = media_item["filesize"]
|
||||
|
||||
dst_exists = True
|
||||
try:
|
||||
|
@ -59,34 +58,44 @@ class PypoFile(Thread):
|
|||
# become an issue here... This needs proper cache management.
|
||||
# https://github.com/LibreTime/libretime/issues/756#issuecomment-477853018
|
||||
# https://github.com/LibreTime/libretime/pull/845
|
||||
self.logger.debug("file %s already exists in local cache as %s, skipping copying..." % (src, dst))
|
||||
self.logger.debug(
|
||||
"file %s already exists in local cache as %s, skipping copying..."
|
||||
% (src, dst)
|
||||
)
|
||||
else:
|
||||
do_copy = True
|
||||
|
||||
media_item['file_ready'] = not do_copy
|
||||
media_item["file_ready"] = not do_copy
|
||||
|
||||
if do_copy:
|
||||
self.logger.info("copying from %s to local cache %s" % (src, dst))
|
||||
try:
|
||||
with open(dst, "wb") as handle:
|
||||
self.logger.info(media_item)
|
||||
response = self.api_client.services.file_download_url(id=media_item['id'])
|
||||
response = self.api_client.services.file_download_url(
|
||||
id=media_item["id"]
|
||||
)
|
||||
|
||||
if not response.ok:
|
||||
self.logger.error(response)
|
||||
raise Exception("%s - Error occurred downloading file" % response.status_code)
|
||||
raise Exception(
|
||||
"%s - Error occurred downloading file"
|
||||
% response.status_code
|
||||
)
|
||||
|
||||
for chunk in response.iter_content(chunk_size=1024):
|
||||
handle.write(chunk)
|
||||
|
||||
#make file world readable and owner writable
|
||||
# make file world readable and owner writable
|
||||
os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
|
||||
|
||||
if media_item['filesize'] == 0:
|
||||
file_size = self.report_file_size_and_md5_to_airtime(dst, media_item["id"], host, username)
|
||||
if media_item["filesize"] == 0:
|
||||
file_size = self.report_file_size_and_md5_to_airtime(
|
||||
dst, media_item["id"], host, username
|
||||
)
|
||||
media_item["filesize"] = file_size
|
||||
|
||||
media_item['file_ready'] = True
|
||||
media_item["file_ready"] = True
|
||||
except Exception as e:
|
||||
self.logger.error("Could not copy from %s to %s" % (src, dst))
|
||||
self.logger.error(e)
|
||||
|
@ -95,7 +104,7 @@ class PypoFile(Thread):
|
|||
try:
|
||||
file_size = os.path.getsize(file_path)
|
||||
|
||||
with open(file_path, 'rb') as fh:
|
||||
with open(file_path, "rb") as fh:
|
||||
m = hashlib.md5()
|
||||
while True:
|
||||
data = fh.read(8192)
|
||||
|
@ -105,15 +114,21 @@ class PypoFile(Thread):
|
|||
md5_hash = m.hexdigest()
|
||||
except (OSError, IOError) as e:
|
||||
file_size = 0
|
||||
self.logger.error("Error getting file size and md5 hash for file id %s" % file_id)
|
||||
self.logger.error(
|
||||
"Error getting file size and md5 hash for file id %s" % file_id
|
||||
)
|
||||
self.logger.error(e)
|
||||
|
||||
# Make PUT request to Airtime to update the file size and hash
|
||||
error_msg = "Could not update media file %s with file size and md5 hash" % file_id
|
||||
error_msg = (
|
||||
"Could not update media file %s with file size and md5 hash" % file_id
|
||||
)
|
||||
try:
|
||||
put_url = "%s://%s:%s/rest/media/%s" % (host[0], host[1], host[2], file_id)
|
||||
payload = json.dumps({'filesize': file_size, 'md5': md5_hash})
|
||||
response = requests.put(put_url, data=payload, auth=requests.auth.HTTPBasicAuth(api_key, ''))
|
||||
payload = json.dumps({"filesize": file_size, "md5": md5_hash})
|
||||
response = requests.put(
|
||||
put_url, data=payload, auth=requests.auth.HTTPBasicAuth(api_key, "")
|
||||
)
|
||||
if not response.ok:
|
||||
self.logger.error(error_msg)
|
||||
except (ConnectionError, Timeout):
|
||||
|
@ -160,7 +175,9 @@ class PypoFile(Thread):
|
|||
try:
|
||||
config.readfp(open(config_path))
|
||||
except IOError as e:
|
||||
logging.debug("Failed to open config file at %s: %s" % (config_path, e.strerror))
|
||||
logging.debug(
|
||||
"Failed to open config file at %s: %s" % (config_path, e.strerror)
|
||||
)
|
||||
sys.exit()
|
||||
except Exception as e:
|
||||
logging.debug(e.strerror)
|
||||
|
@ -189,12 +206,12 @@ class PypoFile(Thread):
|
|||
except Empty as e:
|
||||
pass
|
||||
|
||||
|
||||
media_item = self.get_highest_priority_media_item(self.media)
|
||||
if media_item is not None:
|
||||
self.copy_file(media_item)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
top = traceback.format_exc()
|
||||
self.logger.error(str(e))
|
||||
self.logger.error(top)
|
||||
|
@ -204,9 +221,10 @@ class PypoFile(Thread):
|
|||
"""
|
||||
Entry point of the thread
|
||||
"""
|
||||
try: self.main()
|
||||
try:
|
||||
self.main()
|
||||
except Exception as e:
|
||||
top = traceback.format_exc()
|
||||
self.logger.error('PypoFile Exception: %s', top)
|
||||
self.logger.error("PypoFile Exception: %s", top)
|
||||
time.sleep(5)
|
||||
self.logger.info('PypoFile thread exiting')
|
||||
self.logger.info("PypoFile thread exiting")
|
||||
|
|
|
@ -11,12 +11,17 @@ import time
|
|||
from queue import Empty
|
||||
|
||||
import signal
|
||||
|
||||
|
||||
def keyboardInterruptHandler(signum, frame):
|
||||
logger = logging.getLogger()
|
||||
logger.info('\nKeyboard Interrupt\n')
|
||||
logger.info("\nKeyboard Interrupt\n")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
signal.signal(signal.SIGINT, keyboardInterruptHandler)
|
||||
|
||||
|
||||
class PypoLiqQueue(Thread):
|
||||
def __init__(self, q, pypo_liquidsoap, logger):
|
||||
Thread.__init__(self)
|
||||
|
@ -35,18 +40,20 @@ class PypoLiqQueue(Thread):
|
|||
self.logger.info("waiting indefinitely for schedule")
|
||||
media_schedule = self.queue.get(block=True)
|
||||
else:
|
||||
self.logger.info("waiting %ss until next scheduled item" % \
|
||||
time_until_next_play)
|
||||
media_schedule = self.queue.get(block=True, \
|
||||
timeout=time_until_next_play)
|
||||
self.logger.info(
|
||||
"waiting %ss until next scheduled item" % time_until_next_play
|
||||
)
|
||||
media_schedule = self.queue.get(
|
||||
block=True, timeout=time_until_next_play
|
||||
)
|
||||
except Empty as e:
|
||||
#Time to push a scheduled item.
|
||||
# Time to push a scheduled item.
|
||||
media_item = schedule_deque.popleft()
|
||||
self.pypo_liquidsoap.play(media_item)
|
||||
if len(schedule_deque):
|
||||
time_until_next_play = \
|
||||
self.date_interval_to_seconds(
|
||||
schedule_deque[0]['start'] - datetime.utcnow())
|
||||
time_until_next_play = self.date_interval_to_seconds(
|
||||
schedule_deque[0]["start"] - datetime.utcnow()
|
||||
)
|
||||
if time_until_next_play < 0:
|
||||
time_until_next_play = 0
|
||||
else:
|
||||
|
@ -54,7 +61,7 @@ class PypoLiqQueue(Thread):
|
|||
else:
|
||||
self.logger.info("New schedule received: %s", media_schedule)
|
||||
|
||||
#new schedule received. Replace old one with this.
|
||||
# new schedule received. Replace old one with this.
|
||||
schedule_deque.clear()
|
||||
|
||||
keys = sorted(media_schedule.keys())
|
||||
|
@ -63,28 +70,28 @@ class PypoLiqQueue(Thread):
|
|||
|
||||
if len(keys):
|
||||
time_until_next_play = self.date_interval_to_seconds(
|
||||
media_schedule[keys[0]]['start'] -
|
||||
datetime.utcnow())
|
||||
media_schedule[keys[0]]["start"] - datetime.utcnow()
|
||||
)
|
||||
|
||||
else:
|
||||
time_until_next_play = None
|
||||
|
||||
|
||||
def date_interval_to_seconds(self, interval):
|
||||
"""
|
||||
Convert timedelta object into int representing the number of seconds. If
|
||||
number of seconds is less than 0, then return 0.
|
||||
"""
|
||||
seconds = (interval.microseconds + \
|
||||
(interval.seconds + interval.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
|
||||
if seconds < 0: seconds = 0
|
||||
seconds = (
|
||||
interval.microseconds
|
||||
+ (interval.seconds + interval.days * 24 * 3600) * 10 ** 6
|
||||
) / float(10 ** 6)
|
||||
if seconds < 0:
|
||||
seconds = 0
|
||||
|
||||
return seconds
|
||||
|
||||
def run(self):
|
||||
try: self.main()
|
||||
try:
|
||||
self.main()
|
||||
except Exception as e:
|
||||
self.logger.error('PypoLiqQueue Exception: %s', traceback.format_exc())
|
||||
|
||||
|
||||
|
||||
self.logger.error("PypoLiqQueue Exception: %s", traceback.format_exc())
|
||||
|
|
|
@ -8,27 +8,25 @@ from datetime import timedelta
|
|||
from . import eventtypes
|
||||
import time
|
||||
|
||||
class PypoLiquidsoap():
|
||||
|
||||
class PypoLiquidsoap:
|
||||
def __init__(self, logger, telnet_lock, host, port):
|
||||
self.logger = logger
|
||||
self.liq_queue_tracker = {
|
||||
"s0": None,
|
||||
"s1": None,
|
||||
"s2": None,
|
||||
"s3": None,
|
||||
"s4": None,
|
||||
}
|
||||
"s0": None,
|
||||
"s1": None,
|
||||
"s2": None,
|
||||
"s3": None,
|
||||
"s4": None,
|
||||
}
|
||||
|
||||
self.telnet_liquidsoap = TelnetLiquidsoap(telnet_lock, \
|
||||
logger,\
|
||||
host,\
|
||||
port,\
|
||||
list(self.liq_queue_tracker.keys()))
|
||||
self.telnet_liquidsoap = TelnetLiquidsoap(
|
||||
telnet_lock, logger, host, port, list(self.liq_queue_tracker.keys())
|
||||
)
|
||||
|
||||
def get_telnet_dispatcher(self):
|
||||
return self.telnet_liquidsoap
|
||||
|
||||
|
||||
def play(self, media_item):
|
||||
if media_item["type"] == eventtypes.FILE:
|
||||
self.handle_file_type(media_item)
|
||||
|
@ -37,28 +35,32 @@ class PypoLiquidsoap():
|
|||
elif media_item["type"] == eventtypes.STREAM_BUFFER_START:
|
||||
self.telnet_liquidsoap.start_web_stream_buffer(media_item)
|
||||
elif media_item["type"] == eventtypes.STREAM_OUTPUT_START:
|
||||
if media_item['row_id'] != self.telnet_liquidsoap.current_prebuffering_stream_id:
|
||||
#this is called if the stream wasn't scheduled sufficiently ahead of time
|
||||
#so that the prebuffering stage could take effect. Let's do the prebuffering now.
|
||||
if (
|
||||
media_item["row_id"]
|
||||
!= self.telnet_liquidsoap.current_prebuffering_stream_id
|
||||
):
|
||||
# this is called if the stream wasn't scheduled sufficiently ahead of time
|
||||
# so that the prebuffering stage could take effect. Let's do the prebuffering now.
|
||||
self.telnet_liquidsoap.start_web_stream_buffer(media_item)
|
||||
self.telnet_liquidsoap.start_web_stream(media_item)
|
||||
elif media_item['type'] == eventtypes.STREAM_BUFFER_END:
|
||||
elif media_item["type"] == eventtypes.STREAM_BUFFER_END:
|
||||
self.telnet_liquidsoap.stop_web_stream_buffer()
|
||||
elif media_item['type'] == eventtypes.STREAM_OUTPUT_END:
|
||||
elif media_item["type"] == eventtypes.STREAM_OUTPUT_END:
|
||||
self.telnet_liquidsoap.stop_web_stream_output()
|
||||
else: raise UnknownMediaItemType(str(media_item))
|
||||
else:
|
||||
raise UnknownMediaItemType(str(media_item))
|
||||
|
||||
def handle_file_type(self, media_item):
|
||||
"""
|
||||
Wait 200 seconds (2000 iterations) for file to become ready,
|
||||
Wait 200 seconds (2000 iterations) for file to become ready,
|
||||
otherwise give up on it.
|
||||
"""
|
||||
iter_num = 0
|
||||
while not media_item['file_ready'] and iter_num < 2000:
|
||||
while not media_item["file_ready"] and iter_num < 2000:
|
||||
time.sleep(0.1)
|
||||
iter_num += 1
|
||||
|
||||
if media_item['file_ready']:
|
||||
if media_item["file_ready"]:
|
||||
available_queue = self.find_available_queue()
|
||||
|
||||
try:
|
||||
|
@ -68,27 +70,29 @@ class PypoLiquidsoap():
|
|||
self.logger.error(e)
|
||||
raise
|
||||
else:
|
||||
self.logger.warn("File %s did not become ready in less than 5 seconds. Skipping...", media_item['dst'])
|
||||
self.logger.warn(
|
||||
"File %s did not become ready in less than 5 seconds. Skipping...",
|
||||
media_item["dst"],
|
||||
)
|
||||
|
||||
def handle_event_type(self, media_item):
|
||||
if media_item['event_type'] == "kick_out":
|
||||
if media_item["event_type"] == "kick_out":
|
||||
self.telnet_liquidsoap.disconnect_source("live_dj")
|
||||
elif media_item['event_type'] == "switch_off":
|
||||
elif media_item["event_type"] == "switch_off":
|
||||
self.telnet_liquidsoap.switch_source("live_dj", "off")
|
||||
|
||||
|
||||
def is_media_item_finished(self, media_item):
|
||||
if media_item is None:
|
||||
return True
|
||||
else:
|
||||
return datetime.utcnow() > media_item['end']
|
||||
return datetime.utcnow() > media_item["end"]
|
||||
|
||||
def find_available_queue(self):
|
||||
available_queue = None
|
||||
for i in self.liq_queue_tracker:
|
||||
mi = self.liq_queue_tracker[i]
|
||||
if mi == None or self.is_media_item_finished(mi):
|
||||
#queue "i" is available. Push to this queue
|
||||
# queue "i" is available. Push to this queue
|
||||
available_queue = i
|
||||
|
||||
if available_queue == None:
|
||||
|
@ -96,7 +100,6 @@ class PypoLiquidsoap():
|
|||
|
||||
return available_queue
|
||||
|
||||
|
||||
def verify_correct_present_media(self, scheduled_now):
|
||||
"""
|
||||
verify whether Liquidsoap is currently playing the correct files.
|
||||
|
@ -122,11 +125,13 @@ class PypoLiquidsoap():
|
|||
"""
|
||||
|
||||
try:
|
||||
scheduled_now_files = \
|
||||
[x for x in scheduled_now if x["type"] == eventtypes.FILE]
|
||||
scheduled_now_files = [
|
||||
x for x in scheduled_now if x["type"] == eventtypes.FILE
|
||||
]
|
||||
|
||||
scheduled_now_webstream = \
|
||||
[x for x in scheduled_now if x["type"] == eventtypes.STREAM_OUTPUT_START]
|
||||
scheduled_now_webstream = [
|
||||
x for x in scheduled_now if x["type"] == eventtypes.STREAM_OUTPUT_START
|
||||
]
|
||||
|
||||
schedule_ids = set([x["row_id"] for x in scheduled_now_files])
|
||||
|
||||
|
@ -141,19 +146,21 @@ class PypoLiquidsoap():
|
|||
to_be_removed = set()
|
||||
to_be_added = set()
|
||||
|
||||
#Iterate over the new files, and compare them to currently scheduled
|
||||
#tracks. If already in liquidsoap queue still need to make sure they don't
|
||||
#have different attributes
|
||||
#if replay gain changes, it shouldn't change the amplification of the currently playing song
|
||||
# Iterate over the new files, and compare them to currently scheduled
|
||||
# tracks. If already in liquidsoap queue still need to make sure they don't
|
||||
# have different attributes
|
||||
# if replay gain changes, it shouldn't change the amplification of the currently playing song
|
||||
for i in scheduled_now_files:
|
||||
if i["row_id"] in row_id_map:
|
||||
mi = row_id_map[i["row_id"]]
|
||||
correct = mi['start'] == i['start'] and \
|
||||
mi['end'] == i['end'] and \
|
||||
mi['row_id'] == i['row_id']
|
||||
correct = (
|
||||
mi["start"] == i["start"]
|
||||
and mi["end"] == i["end"]
|
||||
and mi["row_id"] == i["row_id"]
|
||||
)
|
||||
|
||||
if not correct:
|
||||
#need to re-add
|
||||
# need to re-add
|
||||
self.logger.info("Track %s found to have new attr." % i)
|
||||
to_be_removed.add(i["row_id"])
|
||||
to_be_added.add(i["row_id"])
|
||||
|
@ -162,37 +169,38 @@ class PypoLiquidsoap():
|
|||
to_be_added.update(schedule_ids - liq_queue_ids)
|
||||
|
||||
if to_be_removed:
|
||||
self.logger.info("Need to remove items from Liquidsoap: %s" % \
|
||||
to_be_removed)
|
||||
self.logger.info(
|
||||
"Need to remove items from Liquidsoap: %s" % to_be_removed
|
||||
)
|
||||
|
||||
#remove files from Liquidsoap's queue
|
||||
# remove files from Liquidsoap's queue
|
||||
for i in self.liq_queue_tracker:
|
||||
mi = self.liq_queue_tracker[i]
|
||||
if mi is not None and mi["row_id"] in to_be_removed:
|
||||
self.stop(i)
|
||||
|
||||
if to_be_added:
|
||||
self.logger.info("Need to add items to Liquidsoap *now*: %s" % \
|
||||
to_be_added)
|
||||
self.logger.info(
|
||||
"Need to add items to Liquidsoap *now*: %s" % to_be_added
|
||||
)
|
||||
|
||||
for i in scheduled_now_files:
|
||||
if i["row_id"] in to_be_added:
|
||||
self.modify_cue_point(i)
|
||||
self.play(i)
|
||||
|
||||
#handle webstreams
|
||||
# handle webstreams
|
||||
current_stream_id = self.telnet_liquidsoap.get_current_stream_id()
|
||||
if scheduled_now_webstream:
|
||||
if int(current_stream_id) != int(scheduled_now_webstream[0]["row_id"]):
|
||||
self.play(scheduled_now_webstream[0])
|
||||
elif current_stream_id != "-1":
|
||||
#something is playing and it shouldn't be.
|
||||
# something is playing and it shouldn't be.
|
||||
self.telnet_liquidsoap.stop_web_stream_buffer()
|
||||
self.telnet_liquidsoap.stop_web_stream_output()
|
||||
except KeyError as e:
|
||||
self.logger.error("Error: Malformed event in schedule. " + str(e))
|
||||
|
||||
|
||||
def stop(self, queue):
|
||||
self.telnet_liquidsoap.queue_remove(queue)
|
||||
self.liq_queue_tracker[queue] = None
|
||||
|
@ -209,24 +217,32 @@ class PypoLiquidsoap():
|
|||
|
||||
tnow = datetime.utcnow()
|
||||
|
||||
link_start = link['start']
|
||||
link_start = link["start"]
|
||||
|
||||
diff_td = tnow - link_start
|
||||
diff_sec = self.date_interval_to_seconds(diff_td)
|
||||
|
||||
if diff_sec > 0:
|
||||
self.logger.debug("media item was supposed to start %s ago. Preparing to start..", diff_sec)
|
||||
original_cue_in_td = timedelta(seconds=float(link['cue_in']))
|
||||
link['cue_in'] = self.date_interval_to_seconds(original_cue_in_td) + diff_sec
|
||||
self.logger.debug(
|
||||
"media item was supposed to start %s ago. Preparing to start..",
|
||||
diff_sec,
|
||||
)
|
||||
original_cue_in_td = timedelta(seconds=float(link["cue_in"]))
|
||||
link["cue_in"] = (
|
||||
self.date_interval_to_seconds(original_cue_in_td) + diff_sec
|
||||
)
|
||||
|
||||
def date_interval_to_seconds(self, interval):
|
||||
"""
|
||||
Convert timedelta object into int representing the number of seconds. If
|
||||
number of seconds is less than 0, then return 0.
|
||||
"""
|
||||
seconds = (interval.microseconds + \
|
||||
(interval.seconds + interval.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
|
||||
if seconds < 0: seconds = 0
|
||||
seconds = (
|
||||
interval.microseconds
|
||||
+ (interval.seconds + interval.days * 24 * 3600) * 10 ** 6
|
||||
) / float(10 ** 6)
|
||||
if seconds < 0:
|
||||
seconds = 0
|
||||
|
||||
return seconds
|
||||
|
||||
|
@ -237,5 +253,6 @@ class PypoLiquidsoap():
|
|||
class UnknownMediaItemType(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class NoQueueAvailableException(Exception):
|
||||
pass
|
||||
|
|
|
@ -6,6 +6,7 @@ import os
|
|||
import sys
|
||||
from threading import Thread
|
||||
import time
|
||||
|
||||
# For RabbitMQ
|
||||
from kombu.connection import Connection
|
||||
from kombu.messaging import Exchange, Queue
|
||||
|
@ -26,17 +27,18 @@ class RabbitConsumer(ConsumerMixin):
|
|||
|
||||
def get_consumers(self, Consumer, channel):
|
||||
return [
|
||||
Consumer(self.queues, callbacks=[self.on_message], accept=['text/plain']),
|
||||
Consumer(self.queues, callbacks=[self.on_message], accept=["text/plain"]),
|
||||
]
|
||||
|
||||
def on_message(self, body, message):
|
||||
self.handler.handle_message(message.payload)
|
||||
message.ack()
|
||||
|
||||
|
||||
class PypoMessageHandler(Thread):
|
||||
def __init__(self, pq, rq, config):
|
||||
Thread.__init__(self)
|
||||
self.logger = logging.getLogger('message_h')
|
||||
self.logger = logging.getLogger("message_h")
|
||||
self.pypo_queue = pq
|
||||
self.recorder_queue = rq
|
||||
self.config = config
|
||||
|
@ -44,13 +46,17 @@ class PypoMessageHandler(Thread):
|
|||
def init_rabbit_mq(self):
|
||||
self.logger.info("Initializing RabbitMQ stuff")
|
||||
try:
|
||||
schedule_exchange = Exchange("airtime-pypo", "direct", durable=True, auto_delete=True)
|
||||
schedule_exchange = Exchange(
|
||||
"airtime-pypo", "direct", durable=True, auto_delete=True
|
||||
)
|
||||
schedule_queue = Queue("pypo-fetch", exchange=schedule_exchange, key="foo")
|
||||
with Connection(self.config["host"], \
|
||||
self.config["user"], \
|
||||
self.config["password"], \
|
||||
self.config["vhost"], \
|
||||
heartbeat = 5) as connection:
|
||||
with Connection(
|
||||
self.config["host"],
|
||||
self.config["user"],
|
||||
self.config["password"],
|
||||
self.config["vhost"],
|
||||
heartbeat=5,
|
||||
) as connection:
|
||||
rabbit = RabbitConsumer(connection, [schedule_queue], self)
|
||||
rabbit.run()
|
||||
except Exception as e:
|
||||
|
@ -60,6 +66,7 @@ class PypoMessageHandler(Thread):
|
|||
Handle a message from RabbitMQ, put it into our yucky global var.
|
||||
Hopefully there is a better way to do this.
|
||||
"""
|
||||
|
||||
def handle_message(self, message):
|
||||
try:
|
||||
self.logger.info("Received event from RabbitMQ: %s" % message)
|
||||
|
@ -69,36 +76,36 @@ class PypoMessageHandler(Thread):
|
|||
except (UnicodeDecodeError, AttributeError):
|
||||
pass
|
||||
m = json.loads(message)
|
||||
command = m['event_type']
|
||||
command = m["event_type"]
|
||||
self.logger.info("Handling command: " + command)
|
||||
|
||||
if command == 'update_schedule':
|
||||
if command == "update_schedule":
|
||||
self.logger.info("Updating schedule...")
|
||||
self.pypo_queue.put(message)
|
||||
elif command == 'reset_liquidsoap_bootstrap':
|
||||
elif command == "reset_liquidsoap_bootstrap":
|
||||
self.logger.info("Resetting bootstrap vars...")
|
||||
self.pypo_queue.put(message)
|
||||
elif command == 'update_stream_setting':
|
||||
elif command == "update_stream_setting":
|
||||
self.logger.info("Updating stream setting...")
|
||||
self.pypo_queue.put(message)
|
||||
elif command == 'update_stream_format':
|
||||
elif command == "update_stream_format":
|
||||
self.logger.info("Updating stream format...")
|
||||
self.pypo_queue.put(message)
|
||||
elif command == 'update_station_name':
|
||||
elif command == "update_station_name":
|
||||
self.logger.info("Updating station name...")
|
||||
self.pypo_queue.put(message)
|
||||
elif command == 'switch_source':
|
||||
elif command == "switch_source":
|
||||
self.logger.info("switch_source command received...")
|
||||
self.pypo_queue.put(message)
|
||||
elif command == 'update_transition_fade':
|
||||
elif command == "update_transition_fade":
|
||||
self.logger.info("Updating trasition fade...")
|
||||
self.pypo_queue.put(message)
|
||||
elif command == 'disconnect_source':
|
||||
elif command == "disconnect_source":
|
||||
self.logger.info("disconnect_source command received...")
|
||||
self.pypo_queue.put(message)
|
||||
elif command == 'update_recorder_schedule':
|
||||
elif command == "update_recorder_schedule":
|
||||
self.recorder_queue.put(message)
|
||||
elif command == 'cancel_recording':
|
||||
elif command == "cancel_recording":
|
||||
self.recorder_queue.put(message)
|
||||
else:
|
||||
self.logger.info("Unknown command: %s" % command)
|
||||
|
@ -109,9 +116,11 @@ class PypoMessageHandler(Thread):
|
|||
try:
|
||||
self.init_rabbit_mq()
|
||||
except Exception as e:
|
||||
self.logger.error('Exception: %s', e)
|
||||
self.logger.error("Exception: %s", e)
|
||||
self.logger.error("traceback: %s", traceback.format_exc())
|
||||
self.logger.error("Error connecting to RabbitMQ Server. Trying again in few seconds")
|
||||
self.logger.error(
|
||||
"Error connecting to RabbitMQ Server. Trying again in few seconds"
|
||||
)
|
||||
time.sleep(5)
|
||||
|
||||
"""
|
||||
|
@ -119,7 +128,7 @@ class PypoMessageHandler(Thread):
|
|||
Wait for schedule updates from RabbitMQ, but in case there aren't any,
|
||||
poll the server to get the upcoming schedule.
|
||||
"""
|
||||
|
||||
def run(self):
|
||||
while True:
|
||||
self.main()
|
||||
|
||||
|
|
|
@ -29,10 +29,12 @@ PUSH_INTERVAL = 2
|
|||
|
||||
|
||||
def is_stream(media_item):
|
||||
return media_item['type'] == 'stream_output_start'
|
||||
return media_item["type"] == "stream_output_start"
|
||||
|
||||
|
||||
def is_file(media_item):
|
||||
return media_item['type'] == 'file'
|
||||
return media_item["type"] == "file"
|
||||
|
||||
|
||||
class PypoPush(Thread):
|
||||
def __init__(self, q, telnet_lock, pypo_liquidsoap, config):
|
||||
|
@ -44,20 +46,19 @@ class PypoPush(Thread):
|
|||
self.config = config
|
||||
|
||||
self.pushed_objects = {}
|
||||
self.logger = logging.getLogger('push')
|
||||
self.logger = logging.getLogger("push")
|
||||
self.current_prebuffering_stream_id = None
|
||||
self.queue_id = 0
|
||||
|
||||
self.future_scheduled_queue = Queue()
|
||||
self.pypo_liquidsoap = pypo_liquidsoap
|
||||
|
||||
self.plq = PypoLiqQueue(self.future_scheduled_queue, \
|
||||
self.pypo_liquidsoap, \
|
||||
self.logger)
|
||||
self.plq = PypoLiqQueue(
|
||||
self.future_scheduled_queue, self.pypo_liquidsoap, self.logger
|
||||
)
|
||||
self.plq.daemon = True
|
||||
self.plq.start()
|
||||
|
||||
|
||||
def main(self):
|
||||
loops = 0
|
||||
heartbeat_period = math.floor(30 / PUSH_INTERVAL)
|
||||
|
@ -72,10 +73,11 @@ class PypoPush(Thread):
|
|||
raise
|
||||
else:
|
||||
self.logger.debug(media_schedule)
|
||||
#separate media_schedule list into currently_playing and
|
||||
#scheduled_for_future lists
|
||||
currently_playing, scheduled_for_future = \
|
||||
self.separate_present_future(media_schedule)
|
||||
# separate media_schedule list into currently_playing and
|
||||
# scheduled_for_future lists
|
||||
currently_playing, scheduled_for_future = self.separate_present_future(
|
||||
media_schedule
|
||||
)
|
||||
|
||||
self.pypo_liquidsoap.verify_correct_present_media(currently_playing)
|
||||
self.future_scheduled_queue.put(scheduled_for_future)
|
||||
|
@ -85,7 +87,6 @@ class PypoPush(Thread):
|
|||
loops = 0
|
||||
loops += 1
|
||||
|
||||
|
||||
def separate_present_future(self, media_schedule):
|
||||
tnow = datetime.utcnow()
|
||||
|
||||
|
@ -96,7 +97,7 @@ class PypoPush(Thread):
|
|||
for mkey in sorted_keys:
|
||||
media_item = media_schedule[mkey]
|
||||
|
||||
diff_td = tnow - media_item['start']
|
||||
diff_td = tnow - media_item["start"]
|
||||
diff_sec = self.date_interval_to_seconds(diff_td)
|
||||
|
||||
if diff_sec >= 0:
|
||||
|
@ -111,8 +112,10 @@ class PypoPush(Thread):
|
|||
Convert timedelta object into int representing the number of seconds. If
|
||||
number of seconds is less than 0, then return 0.
|
||||
"""
|
||||
seconds = (interval.microseconds + \
|
||||
(interval.seconds + interval.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
|
||||
seconds = (
|
||||
interval.microseconds
|
||||
+ (interval.seconds + interval.days * 24 * 3600) * 10 ** 6
|
||||
) / float(10 ** 6)
|
||||
|
||||
return seconds
|
||||
|
||||
|
@ -120,18 +123,18 @@ class PypoPush(Thread):
|
|||
def stop_web_stream_all(self):
|
||||
try:
|
||||
self.telnet_lock.acquire()
|
||||
tn = telnetlib.Telnet(self.config['LS_HOST'], self.config['LS_PORT'])
|
||||
tn = telnetlib.Telnet(self.config["LS_HOST"], self.config["LS_PORT"])
|
||||
|
||||
#msg = 'dynamic_source.read_stop_all xxx\n'
|
||||
msg = 'http.stop\n'
|
||||
# msg = 'dynamic_source.read_stop_all xxx\n'
|
||||
msg = "http.stop\n"
|
||||
self.logger.debug(msg)
|
||||
tn.write(msg)
|
||||
|
||||
msg = 'dynamic_source.output_stop\n'
|
||||
msg = "dynamic_source.output_stop\n"
|
||||
self.logger.debug(msg)
|
||||
tn.write(msg)
|
||||
|
||||
msg = 'dynamic_source.id -1\n'
|
||||
msg = "dynamic_source.id -1\n"
|
||||
self.logger.debug(msg)
|
||||
tn.write(msg)
|
||||
|
||||
|
@ -145,10 +148,10 @@ class PypoPush(Thread):
|
|||
|
||||
def run(self):
|
||||
while True:
|
||||
try: self.main()
|
||||
try:
|
||||
self.main()
|
||||
except Exception as e:
|
||||
top = traceback.format_exc()
|
||||
self.logger.error('Pypo Push Exception: %s', top)
|
||||
self.logger.error("Pypo Push Exception: %s", top)
|
||||
time.sleep(5)
|
||||
self.logger.info('PypoPush thread exiting')
|
||||
|
||||
self.logger.info("PypoPush thread exiting")
|
||||
|
|
|
@ -24,6 +24,7 @@ import mutagen
|
|||
from api_clients import version1 as v1_api_client
|
||||
from api_clients import version2 as api_client
|
||||
|
||||
|
||||
def api_client(logger):
|
||||
"""
|
||||
api_client returns the correct instance of AirtimeApiClient. Although there is only one
|
||||
|
@ -31,15 +32,17 @@ def api_client(logger):
|
|||
"""
|
||||
return v1_api_client.AirtimeApiClient(logger)
|
||||
|
||||
|
||||
# loading config file
|
||||
try:
|
||||
config = ConfigObj('/etc/airtime/airtime.conf')
|
||||
config = ConfigObj("/etc/airtime/airtime.conf")
|
||||
except Exception as e:
|
||||
print("Error loading config file: {}".format(e))
|
||||
sys.exit()
|
||||
|
||||
# TODO : add docstrings everywhere in this module
|
||||
|
||||
|
||||
def getDateTimeObj(time):
|
||||
# TODO : clean up for this function later.
|
||||
# - use tuples to parse result from split (instead of indices)
|
||||
|
@ -49,17 +52,20 @@ def getDateTimeObj(time):
|
|||
# shadowed
|
||||
# - add docstring to document all behaviour of this function
|
||||
timeinfo = time.split(" ")
|
||||
date = [ int(x) for x in timeinfo[0].split("-") ]
|
||||
my_time = [ int(x) for x in timeinfo[1].split(":") ]
|
||||
return datetime.datetime(date[0], date[1], date[2], my_time[0], my_time[1], my_time[2], 0, None)
|
||||
date = [int(x) for x in timeinfo[0].split("-")]
|
||||
my_time = [int(x) for x in timeinfo[1].split(":")]
|
||||
return datetime.datetime(
|
||||
date[0], date[1], date[2], my_time[0], my_time[1], my_time[2], 0, None
|
||||
)
|
||||
|
||||
|
||||
PUSH_INTERVAL = 2
|
||||
|
||||
class ShowRecorder(Thread):
|
||||
|
||||
def __init__ (self, show_instance, show_name, filelength, start_time):
|
||||
class ShowRecorder(Thread):
|
||||
def __init__(self, show_instance, show_name, filelength, start_time):
|
||||
Thread.__init__(self)
|
||||
self.logger = logging.getLogger('recorder')
|
||||
self.logger = logging.getLogger("recorder")
|
||||
self.api_client = api_client(self.logger)
|
||||
self.filelength = filelength
|
||||
self.start_time = start_time
|
||||
|
@ -75,35 +81,41 @@ class ShowRecorder(Thread):
|
|||
if config["pypo"]["record_file_type"] in ["mp3", "ogg"]:
|
||||
filetype = config["pypo"]["record_file_type"]
|
||||
else:
|
||||
filetype = "ogg";
|
||||
filetype = "ogg"
|
||||
|
||||
joined_path = os.path.join(config["pypo"]["base_recorded_files"], filename)
|
||||
filepath = "%s.%s" % (joined_path, filetype)
|
||||
|
||||
br = config["pypo"]["record_bitrate"]
|
||||
sr = config["pypo"]["record_samplerate"]
|
||||
c = config["pypo"]["record_channels"]
|
||||
c = config["pypo"]["record_channels"]
|
||||
ss = config["pypo"]["record_sample_size"]
|
||||
|
||||
#-f:16,2,44100
|
||||
#-b:256
|
||||
command = "ecasound -f:%s,%s,%s -i alsa -o %s,%s000 -t:%s" % \
|
||||
(ss, c, sr, filepath, br, length)
|
||||
# -f:16,2,44100
|
||||
# -b:256
|
||||
command = "ecasound -f:%s,%s,%s -i alsa -o %s,%s000 -t:%s" % (
|
||||
ss,
|
||||
c,
|
||||
sr,
|
||||
filepath,
|
||||
br,
|
||||
length,
|
||||
)
|
||||
args = command.split(" ")
|
||||
|
||||
self.logger.info("starting record")
|
||||
self.logger.info("command " + command)
|
||||
|
||||
self.p = Popen(args,stdout=PIPE,stderr=PIPE)
|
||||
self.p = Popen(args, stdout=PIPE, stderr=PIPE)
|
||||
|
||||
#blocks at the following line until the child process
|
||||
#quits
|
||||
# blocks at the following line until the child process
|
||||
# quits
|
||||
self.p.wait()
|
||||
outmsgs = self.p.stdout.readlines()
|
||||
for msg in outmsgs:
|
||||
m = re.search('^ERROR',msg)
|
||||
m = re.search("^ERROR", msg)
|
||||
if not m == None:
|
||||
self.logger.info('Recording error is found: %s', outmsgs)
|
||||
self.logger.info("Recording error is found: %s", outmsgs)
|
||||
self.logger.info("finishing record, return code %s", self.p.returncode)
|
||||
code = self.p.returncode
|
||||
|
||||
|
@ -112,21 +124,25 @@ class ShowRecorder(Thread):
|
|||
return code, filepath
|
||||
|
||||
def cancel_recording(self):
|
||||
#send signal interrupt (2)
|
||||
# send signal interrupt (2)
|
||||
self.logger.info("Show manually cancelled!")
|
||||
if (self.p is not None):
|
||||
if self.p is not None:
|
||||
self.p.send_signal(signal.SIGINT)
|
||||
|
||||
#if self.p is defined, then the child process ecasound is recording
|
||||
# if self.p is defined, then the child process ecasound is recording
|
||||
def is_recording(self):
|
||||
return (self.p is not None)
|
||||
return self.p is not None
|
||||
|
||||
def upload_file(self, filepath):
|
||||
|
||||
filename = os.path.split(filepath)[1]
|
||||
|
||||
# files is what requests actually expects
|
||||
files = {'file': open(filepath, "rb"), 'name': filename, 'show_instance': self.show_instance}
|
||||
files = {
|
||||
"file": open(filepath, "rb"),
|
||||
"name": filename,
|
||||
"show_instance": self.show_instance,
|
||||
}
|
||||
|
||||
self.api_client.upload_recorded_show(files, self.show_instance)
|
||||
|
||||
|
@ -136,27 +152,25 @@ class ShowRecorder(Thread):
|
|||
self.start_time, self.show_name, self.show_instance
|
||||
"""
|
||||
try:
|
||||
full_date, full_time = self.start_time.split(" ",1)
|
||||
full_date, full_time = self.start_time.split(" ", 1)
|
||||
# No idea why we translated - to : before
|
||||
#full_time = full_time.replace(":","-")
|
||||
# full_time = full_time.replace(":","-")
|
||||
self.logger.info("time: %s" % full_time)
|
||||
artist = "Airtime Show Recorder"
|
||||
#set some metadata for our file daemon
|
||||
recorded_file = mutagen.File(filepath, easy = True)
|
||||
recorded_file['artist'] = artist
|
||||
recorded_file['date'] = full_date
|
||||
recorded_file['title'] = "%s-%s-%s" % (self.show_name,
|
||||
full_date, full_time)
|
||||
#You cannot pass ints into the metadata of a file. Even tracknumber needs to be a string
|
||||
recorded_file['tracknumber'] = self.show_instance
|
||||
# set some metadata for our file daemon
|
||||
recorded_file = mutagen.File(filepath, easy=True)
|
||||
recorded_file["artist"] = artist
|
||||
recorded_file["date"] = full_date
|
||||
recorded_file["title"] = "%s-%s-%s" % (self.show_name, full_date, full_time)
|
||||
# You cannot pass ints into the metadata of a file. Even tracknumber needs to be a string
|
||||
recorded_file["tracknumber"] = self.show_instance
|
||||
recorded_file.save()
|
||||
|
||||
except Exception as e:
|
||||
top = traceback.format_exc()
|
||||
self.logger.error('Exception: %s', e)
|
||||
self.logger.error("Exception: %s", e)
|
||||
self.logger.error("traceback: %s", top)
|
||||
|
||||
|
||||
def run(self):
|
||||
code, filepath = self.record_show()
|
||||
|
||||
|
@ -174,14 +188,15 @@ class ShowRecorder(Thread):
|
|||
self.logger.info("problem recording show")
|
||||
os.remove(filepath)
|
||||
|
||||
|
||||
class Recorder(Thread):
|
||||
def __init__(self, q):
|
||||
Thread.__init__(self)
|
||||
self.logger = logging.getLogger('recorder')
|
||||
self.logger = logging.getLogger("recorder")
|
||||
self.api_client = api_client(self.logger)
|
||||
self.sr = None
|
||||
self.shows_to_record = {}
|
||||
self.server_timezone = ''
|
||||
self.server_timezone = ""
|
||||
self.queue = q
|
||||
self.loops = 0
|
||||
self.logger.info("RecorderFetch: init complete")
|
||||
|
@ -189,7 +204,7 @@ class Recorder(Thread):
|
|||
success = False
|
||||
while not success:
|
||||
try:
|
||||
self.api_client.register_component('show-recorder')
|
||||
self.api_client.register_component("show-recorder")
|
||||
success = True
|
||||
except Exception as e:
|
||||
self.logger.error(str(e))
|
||||
|
@ -205,7 +220,7 @@ class Recorder(Thread):
|
|||
msg = json.loads(message)
|
||||
command = msg["event_type"]
|
||||
self.logger.info("Received msg from Pypo Message Handler: %s", msg)
|
||||
if command == 'cancel_recording':
|
||||
if command == "cancel_recording":
|
||||
if self.currently_recording():
|
||||
self.cancel_recording()
|
||||
else:
|
||||
|
@ -218,14 +233,18 @@ class Recorder(Thread):
|
|||
def process_recorder_schedule(self, m):
|
||||
self.logger.info("Parsing recording show schedules...")
|
||||
temp_shows_to_record = {}
|
||||
shows = m['shows']
|
||||
shows = m["shows"]
|
||||
for show in shows:
|
||||
show_starts = getDateTimeObj(show['starts'])
|
||||
show_end = getDateTimeObj(show['ends'])
|
||||
show_starts = getDateTimeObj(show["starts"])
|
||||
show_end = getDateTimeObj(show["ends"])
|
||||
time_delta = show_end - show_starts
|
||||
|
||||
temp_shows_to_record[show['starts']] = [time_delta,
|
||||
show['instance_id'], show['name'], m['server_timezone']]
|
||||
temp_shows_to_record[show["starts"]] = [
|
||||
time_delta,
|
||||
show["instance_id"],
|
||||
show["name"],
|
||||
m["server_timezone"],
|
||||
]
|
||||
self.shows_to_record = temp_shows_to_record
|
||||
|
||||
def get_time_till_next_show(self):
|
||||
|
@ -237,7 +256,7 @@ class Recorder(Thread):
|
|||
next_show = getDateTimeObj(start_time)
|
||||
|
||||
delta = next_show - tnow
|
||||
s = '%s.%s' % (delta.seconds, delta.microseconds)
|
||||
s = "%s.%s" % (delta.seconds, delta.microseconds)
|
||||
out = float(s)
|
||||
|
||||
if out < 5:
|
||||
|
@ -257,7 +276,8 @@ class Recorder(Thread):
|
|||
return False
|
||||
|
||||
def start_record(self):
|
||||
if len(self.shows_to_record) == 0: return None
|
||||
if len(self.shows_to_record) == 0:
|
||||
return None
|
||||
try:
|
||||
delta = self.get_time_till_next_show()
|
||||
if delta < 5:
|
||||
|
@ -273,16 +293,25 @@ class Recorder(Thread):
|
|||
|
||||
T = pytz.timezone(server_timezone)
|
||||
start_time_on_UTC = getDateTimeObj(start_time)
|
||||
start_time_on_server = start_time_on_UTC.replace(tzinfo=pytz.utc).astimezone(T)
|
||||
start_time_formatted = '%(year)d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:%(sec)02d' % \
|
||||
{'year': start_time_on_server.year, 'month': start_time_on_server.month, 'day': start_time_on_server.day, \
|
||||
'hour': start_time_on_server.hour, 'min': start_time_on_server.minute, 'sec': start_time_on_server.second}
|
||||
|
||||
start_time_on_server = start_time_on_UTC.replace(
|
||||
tzinfo=pytz.utc
|
||||
).astimezone(T)
|
||||
start_time_formatted = (
|
||||
"%(year)d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:%(sec)02d"
|
||||
% {
|
||||
"year": start_time_on_server.year,
|
||||
"month": start_time_on_server.month,
|
||||
"day": start_time_on_server.day,
|
||||
"hour": start_time_on_server.hour,
|
||||
"min": start_time_on_server.minute,
|
||||
"sec": start_time_on_server.second,
|
||||
}
|
||||
)
|
||||
|
||||
seconds_waiting = 0
|
||||
|
||||
#avoiding CC-5299
|
||||
while(True):
|
||||
# avoiding CC-5299
|
||||
while True:
|
||||
if self.currently_recording():
|
||||
self.logger.info("Previous record not finished, sleeping 100ms")
|
||||
seconds_waiting = seconds_waiting + 0.1
|
||||
|
@ -290,16 +319,21 @@ class Recorder(Thread):
|
|||
else:
|
||||
show_length_seconds = show_length.seconds - seconds_waiting
|
||||
|
||||
self.sr = ShowRecorder(show_instance, show_name, show_length_seconds, start_time_formatted)
|
||||
self.sr = ShowRecorder(
|
||||
show_instance,
|
||||
show_name,
|
||||
show_length_seconds,
|
||||
start_time_formatted,
|
||||
)
|
||||
self.sr.start()
|
||||
break
|
||||
|
||||
#remove show from shows to record.
|
||||
# remove show from shows to record.
|
||||
del self.shows_to_record[start_time]
|
||||
#self.time_till_next_show = self.get_time_till_next_show()
|
||||
except Exception as e :
|
||||
# self.time_till_next_show = self.get_time_till_next_show()
|
||||
except Exception as e:
|
||||
top = traceback.format_exc()
|
||||
self.logger.error('Exception: %s', e)
|
||||
self.logger.error("Exception: %s", e)
|
||||
self.logger.error("traceback: %s", top)
|
||||
|
||||
def run(self):
|
||||
|
@ -318,7 +352,7 @@ class Recorder(Thread):
|
|||
self.process_recorder_schedule(temp)
|
||||
self.logger.info("Bootstrap recorder schedule received: %s", temp)
|
||||
except Exception as e:
|
||||
self.logger.error( traceback.format_exc() )
|
||||
self.logger.error(traceback.format_exc())
|
||||
self.logger.error(e)
|
||||
|
||||
self.logger.info("Bootstrap complete: got initial copy of the schedule")
|
||||
|
@ -338,16 +372,16 @@ class Recorder(Thread):
|
|||
self.process_recorder_schedule(temp)
|
||||
self.logger.info("updated recorder schedule received: %s", temp)
|
||||
except Exception as e:
|
||||
self.logger.error( traceback.format_exc() )
|
||||
self.logger.error(traceback.format_exc())
|
||||
self.logger.error(e)
|
||||
try: self.handle_message()
|
||||
try:
|
||||
self.handle_message()
|
||||
except Exception as e:
|
||||
self.logger.error( traceback.format_exc() )
|
||||
self.logger.error('Pypo Recorder Exception: %s', e)
|
||||
self.logger.error(traceback.format_exc())
|
||||
self.logger.error("Pypo Recorder Exception: %s", e)
|
||||
time.sleep(PUSH_INTERVAL)
|
||||
self.loops += 1
|
||||
except Exception as e :
|
||||
except Exception as e:
|
||||
top = traceback.format_exc()
|
||||
self.logger.error('Exception: %s', e)
|
||||
self.logger.error("Exception: %s", e)
|
||||
self.logger.error("traceback: %s", top)
|
||||
|
||||
|
|
|
@ -4,32 +4,36 @@ import telnetlib
|
|||
from .timeout import ls_timeout
|
||||
import traceback
|
||||
|
||||
|
||||
def create_liquidsoap_annotation(media):
|
||||
# We need liq_start_next value in the annotate. That is the value that controls overlap duration of crossfade.
|
||||
|
||||
filename = media['dst']
|
||||
annotation = ('annotate:media_id="%s",liq_start_next="0",liq_fade_in="%s",' + \
|
||||
'liq_fade_out="%s",liq_cue_in="%s",liq_cue_out="%s",' + \
|
||||
'schedule_table_id="%s",replay_gain="%s dB"') % \
|
||||
(media['id'],
|
||||
float(media['fade_in']) / 1000,
|
||||
float(media['fade_out']) / 1000,
|
||||
float(media['cue_in']),
|
||||
float(media['cue_out']),
|
||||
media['row_id'],
|
||||
media['replay_gain'])
|
||||
filename = media["dst"]
|
||||
annotation = (
|
||||
'annotate:media_id="%s",liq_start_next="0",liq_fade_in="%s",'
|
||||
+ 'liq_fade_out="%s",liq_cue_in="%s",liq_cue_out="%s",'
|
||||
+ 'schedule_table_id="%s",replay_gain="%s dB"'
|
||||
) % (
|
||||
media["id"],
|
||||
float(media["fade_in"]) / 1000,
|
||||
float(media["fade_out"]) / 1000,
|
||||
float(media["cue_in"]),
|
||||
float(media["cue_out"]),
|
||||
media["row_id"],
|
||||
media["replay_gain"],
|
||||
)
|
||||
|
||||
# Override the the artist/title that Liquidsoap extracts from a file's metadata
|
||||
# with the metadata we get from Airtime. (You can modify metadata in Airtime's library,
|
||||
# which doesn't get saved back to the file.)
|
||||
if 'metadata' in media:
|
||||
if "metadata" in media:
|
||||
|
||||
if 'artist_name' in media['metadata']:
|
||||
artist_name = media['metadata']['artist_name']
|
||||
if "artist_name" in media["metadata"]:
|
||||
artist_name = media["metadata"]["artist_name"]
|
||||
if isinstance(artist_name, str):
|
||||
annotation += ',artist="%s"' % (artist_name.replace('"', '\\"'))
|
||||
if 'track_title' in media['metadata']:
|
||||
track_title = media['metadata']['track_title']
|
||||
if "track_title" in media["metadata"]:
|
||||
track_title = media["metadata"]["track_title"]
|
||||
if isinstance(track_title, str):
|
||||
annotation += ',title="%s"' % (track_title.replace('"', '\\"'))
|
||||
|
||||
|
@ -37,8 +41,8 @@ def create_liquidsoap_annotation(media):
|
|||
|
||||
return annotation
|
||||
|
||||
class TelnetLiquidsoap:
|
||||
|
||||
class TelnetLiquidsoap:
|
||||
def __init__(self, telnet_lock, logger, ls_host, ls_port, queues):
|
||||
self.telnet_lock = telnet_lock
|
||||
self.ls_host = ls_host
|
||||
|
@ -53,9 +57,9 @@ class TelnetLiquidsoap:
|
|||
def __is_empty(self, queue_id):
|
||||
return True
|
||||
tn = self.__connect()
|
||||
msg = '%s.queue\nexit\n' % queue_id
|
||||
tn.write(msg.encode('utf-8'))
|
||||
output = tn.read_all().decode('utf-8').splitlines()
|
||||
msg = "%s.queue\nexit\n" % queue_id
|
||||
tn.write(msg.encode("utf-8"))
|
||||
output = tn.read_all().decode("utf-8").splitlines()
|
||||
if len(output) == 3:
|
||||
return len(output[0]) == 0
|
||||
else:
|
||||
|
@ -68,12 +72,12 @@ class TelnetLiquidsoap:
|
|||
tn = self.__connect()
|
||||
|
||||
for i in self.queues:
|
||||
msg = 'queues.%s_skip\n' % i
|
||||
msg = "queues.%s_skip\n" % i
|
||||
self.logger.debug(msg)
|
||||
tn.write(msg.encode('utf-8'))
|
||||
tn.write(msg.encode("utf-8"))
|
||||
|
||||
tn.write("exit\n".encode('utf-8'))
|
||||
self.logger.debug(tn.read_all().decode('utf-8'))
|
||||
tn.write("exit\n".encode("utf-8"))
|
||||
self.logger.debug(tn.read_all().decode("utf-8"))
|
||||
except Exception:
|
||||
raise
|
||||
finally:
|
||||
|
@ -85,18 +89,17 @@ class TelnetLiquidsoap:
|
|||
self.telnet_lock.acquire()
|
||||
tn = self.__connect()
|
||||
|
||||
msg = 'queues.%s_skip\n' % queue_id
|
||||
msg = "queues.%s_skip\n" % queue_id
|
||||
self.logger.debug(msg)
|
||||
tn.write(msg.encode('utf-8'))
|
||||
tn.write(msg.encode("utf-8"))
|
||||
|
||||
tn.write("exit\n".encode('utf-8'))
|
||||
self.logger.debug(tn.read_all().decode('utf-8'))
|
||||
tn.write("exit\n".encode("utf-8"))
|
||||
self.logger.debug(tn.read_all().decode("utf-8"))
|
||||
except Exception:
|
||||
raise
|
||||
finally:
|
||||
self.telnet_lock.release()
|
||||
|
||||
|
||||
@ls_timeout
|
||||
def queue_push(self, queue_id, media_item):
|
||||
try:
|
||||
|
@ -107,40 +110,39 @@ class TelnetLiquidsoap:
|
|||
|
||||
tn = self.__connect()
|
||||
annotation = create_liquidsoap_annotation(media_item)
|
||||
msg = '%s.push %s\n' % (queue_id, annotation)
|
||||
msg = "%s.push %s\n" % (queue_id, annotation)
|
||||
self.logger.debug(msg)
|
||||
tn.write(msg.encode('utf-8'))
|
||||
tn.write(msg.encode("utf-8"))
|
||||
|
||||
show_name = media_item['show_name']
|
||||
msg = 'vars.show_name %s\n' % show_name
|
||||
tn.write(msg.encode('utf-8'))
|
||||
show_name = media_item["show_name"]
|
||||
msg = "vars.show_name %s\n" % show_name
|
||||
tn.write(msg.encode("utf-8"))
|
||||
self.logger.debug(msg)
|
||||
|
||||
tn.write("exit\n".encode('utf-8'))
|
||||
self.logger.debug(tn.read_all().decode('utf-8'))
|
||||
tn.write("exit\n".encode("utf-8"))
|
||||
self.logger.debug(tn.read_all().decode("utf-8"))
|
||||
except Exception:
|
||||
raise
|
||||
finally:
|
||||
self.telnet_lock.release()
|
||||
|
||||
|
||||
@ls_timeout
|
||||
def stop_web_stream_buffer(self):
|
||||
try:
|
||||
self.telnet_lock.acquire()
|
||||
tn = telnetlib.Telnet(self.ls_host, self.ls_port)
|
||||
#dynamic_source.stop http://87.230.101.24:80/top100station.mp3
|
||||
# dynamic_source.stop http://87.230.101.24:80/top100station.mp3
|
||||
|
||||
msg = 'http.stop\n'
|
||||
msg = "http.stop\n"
|
||||
self.logger.debug(msg)
|
||||
tn.write(msg.encode('utf-8'))
|
||||
tn.write(msg.encode("utf-8"))
|
||||
|
||||
msg = 'dynamic_source.id -1\n'
|
||||
msg = "dynamic_source.id -1\n"
|
||||
self.logger.debug(msg)
|
||||
tn.write(msg.encode('utf-8'))
|
||||
tn.write(msg.encode("utf-8"))
|
||||
|
||||
tn.write("exit\n".encode('utf-8'))
|
||||
self.logger.debug(tn.read_all().decode('utf-8'))
|
||||
tn.write("exit\n".encode("utf-8"))
|
||||
self.logger.debug(tn.read_all().decode("utf-8"))
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(str(e))
|
||||
|
@ -153,14 +155,14 @@ class TelnetLiquidsoap:
|
|||
try:
|
||||
self.telnet_lock.acquire()
|
||||
tn = telnetlib.Telnet(self.ls_host, self.ls_port)
|
||||
#dynamic_source.stop http://87.230.101.24:80/top100station.mp3
|
||||
# dynamic_source.stop http://87.230.101.24:80/top100station.mp3
|
||||
|
||||
msg = 'dynamic_source.output_stop\n'
|
||||
msg = "dynamic_source.output_stop\n"
|
||||
self.logger.debug(msg)
|
||||
tn.write(msg.encode('utf-8'))
|
||||
tn.write(msg.encode("utf-8"))
|
||||
|
||||
tn.write("exit\n".encode('utf-8'))
|
||||
self.logger.debug(tn.read_all().decode('utf-8'))
|
||||
tn.write("exit\n".encode("utf-8"))
|
||||
self.logger.debug(tn.read_all().decode("utf-8"))
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(str(e))
|
||||
|
@ -174,16 +176,16 @@ class TelnetLiquidsoap:
|
|||
self.telnet_lock.acquire()
|
||||
tn = telnetlib.Telnet(self.ls_host, self.ls_port)
|
||||
|
||||
#TODO: DO we need this?
|
||||
msg = 'streams.scheduled_play_start\n'
|
||||
tn.write(msg.encode('utf-8'))
|
||||
# TODO: DO we need this?
|
||||
msg = "streams.scheduled_play_start\n"
|
||||
tn.write(msg.encode("utf-8"))
|
||||
|
||||
msg = 'dynamic_source.output_start\n'
|
||||
msg = "dynamic_source.output_start\n"
|
||||
self.logger.debug(msg)
|
||||
tn.write(msg.encode('utf-8'))
|
||||
tn.write(msg.encode("utf-8"))
|
||||
|
||||
tn.write("exit\n".encode('utf-8'))
|
||||
self.logger.debug(tn.read_all().decode('utf-8'))
|
||||
tn.write("exit\n".encode("utf-8"))
|
||||
self.logger.debug(tn.read_all().decode("utf-8"))
|
||||
|
||||
self.current_prebuffering_stream_id = None
|
||||
except Exception as e:
|
||||
|
@ -198,18 +200,18 @@ class TelnetLiquidsoap:
|
|||
self.telnet_lock.acquire()
|
||||
tn = telnetlib.Telnet(self.ls_host, self.ls_port)
|
||||
|
||||
msg = 'dynamic_source.id %s\n' % media_item['row_id']
|
||||
msg = "dynamic_source.id %s\n" % media_item["row_id"]
|
||||
self.logger.debug(msg)
|
||||
tn.write(msg.encode('utf-8'))
|
||||
tn.write(msg.encode("utf-8"))
|
||||
|
||||
msg = 'http.restart %s\n' % media_item['uri']
|
||||
msg = "http.restart %s\n" % media_item["uri"]
|
||||
self.logger.debug(msg)
|
||||
tn.write(msg.encode('utf-8'))
|
||||
tn.write(msg.encode("utf-8"))
|
||||
|
||||
tn.write("exit\n".encode('utf-8'))
|
||||
self.logger.debug(tn.read_all().decode('utf-8'))
|
||||
tn.write("exit\n".encode("utf-8"))
|
||||
self.logger.debug(tn.read_all().decode("utf-8"))
|
||||
|
||||
self.current_prebuffering_stream_id = media_item['row_id']
|
||||
self.current_prebuffering_stream_id = media_item["row_id"]
|
||||
except Exception as e:
|
||||
self.logger.error(str(e))
|
||||
self.logger.error(traceback.format_exc())
|
||||
|
@ -222,12 +224,12 @@ class TelnetLiquidsoap:
|
|||
self.telnet_lock.acquire()
|
||||
tn = telnetlib.Telnet(self.ls_host, self.ls_port)
|
||||
|
||||
msg = 'dynamic_source.get_id\n'
|
||||
msg = "dynamic_source.get_id\n"
|
||||
self.logger.debug(msg)
|
||||
tn.write(msg.encode('utf-8'))
|
||||
tn.write(msg.encode("utf-8"))
|
||||
|
||||
tn.write("exit\n".encode('utf-8'))
|
||||
stream_id = tn.read_all().decode('utf-8').splitlines()[0]
|
||||
tn.write("exit\n".encode("utf-8"))
|
||||
stream_id = tn.read_all().decode("utf-8").splitlines()[0]
|
||||
self.logger.debug("stream_id: %s" % stream_id)
|
||||
|
||||
return stream_id
|
||||
|
@ -239,20 +241,20 @@ class TelnetLiquidsoap:
|
|||
|
||||
@ls_timeout
|
||||
def disconnect_source(self, sourcename):
|
||||
self.logger.debug('Disconnecting source: %s', sourcename)
|
||||
self.logger.debug("Disconnecting source: %s", sourcename)
|
||||
command = ""
|
||||
if(sourcename == "master_dj"):
|
||||
if sourcename == "master_dj":
|
||||
command += "master_harbor.stop\n"
|
||||
elif(sourcename == "live_dj"):
|
||||
elif sourcename == "live_dj":
|
||||
command += "live_dj_harbor.stop\n"
|
||||
|
||||
try:
|
||||
self.telnet_lock.acquire()
|
||||
tn = telnetlib.Telnet(self.ls_host, self.ls_port)
|
||||
self.logger.info(command)
|
||||
tn.write(command.encode('utf-8'))
|
||||
tn.write('exit\n'.encode('utf-8'))
|
||||
tn.read_all().decode('utf-8')
|
||||
tn.write(command.encode("utf-8"))
|
||||
tn.write("exit\n".encode("utf-8"))
|
||||
tn.read_all().decode("utf-8")
|
||||
except Exception as e:
|
||||
self.logger.error(traceback.format_exc())
|
||||
finally:
|
||||
|
@ -267,18 +269,17 @@ class TelnetLiquidsoap:
|
|||
for i in commands:
|
||||
self.logger.info(i)
|
||||
if type(i) is str:
|
||||
i = i.encode('utf-8')
|
||||
i = i.encode("utf-8")
|
||||
tn.write(i)
|
||||
|
||||
tn.write('exit\n'.encode('utf-8'))
|
||||
tn.read_all().decode('utf-8')
|
||||
tn.write("exit\n".encode("utf-8"))
|
||||
tn.read_all().decode("utf-8")
|
||||
except Exception as e:
|
||||
self.logger.error(str(e))
|
||||
self.logger.error(traceback.format_exc())
|
||||
finally:
|
||||
self.telnet_lock.release()
|
||||
|
||||
|
||||
def switch_source(self, sourcename, status):
|
||||
self.logger.debug('Switching source: %s to "%s" status', sourcename, status)
|
||||
command = "streams."
|
||||
|
@ -296,15 +297,15 @@ class TelnetLiquidsoap:
|
|||
|
||||
self.telnet_send([command])
|
||||
|
||||
class DummyTelnetLiquidsoap:
|
||||
|
||||
class DummyTelnetLiquidsoap:
|
||||
def __init__(self, telnet_lock, logger):
|
||||
self.telnet_lock = telnet_lock
|
||||
self.liquidsoap_mock_queues = {}
|
||||
self.logger = logger
|
||||
|
||||
for i in range(4):
|
||||
self.liquidsoap_mock_queues["s"+str(i)] = []
|
||||
self.liquidsoap_mock_queues["s" + str(i)] = []
|
||||
|
||||
@ls_timeout
|
||||
def queue_push(self, queue_id, media_item):
|
||||
|
@ -313,6 +314,7 @@ class DummyTelnetLiquidsoap:
|
|||
|
||||
self.logger.info("Pushing %s to queue %s" % (media_item, queue_id))
|
||||
from datetime import datetime
|
||||
|
||||
print("Time now: {:s}".format(datetime.utcnow()))
|
||||
|
||||
annotation = create_liquidsoap_annotation(media_item)
|
||||
|
@ -329,6 +331,7 @@ class DummyTelnetLiquidsoap:
|
|||
|
||||
self.logger.info("Purging queue %s" % queue_id)
|
||||
from datetime import datetime
|
||||
|
||||
print("Time now: {:s}".format(datetime.utcnow()))
|
||||
|
||||
except Exception:
|
||||
|
@ -336,5 +339,6 @@ class DummyTelnetLiquidsoap:
|
|||
finally:
|
||||
self.telnet_lock.release()
|
||||
|
||||
|
||||
class QueueNotEmptyException(Exception):
|
||||
pass
|
||||
|
|
|
@ -13,14 +13,17 @@ import logging
|
|||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
|
||||
|
||||
def keyboardInterruptHandler(signum, frame):
|
||||
logger = logging.getLogger()
|
||||
logger.info('\nKeyboard Interrupt\n')
|
||||
logger.info("\nKeyboard Interrupt\n")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
signal.signal(signal.SIGINT, keyboardInterruptHandler)
|
||||
|
||||
# configure logging
|
||||
format = '%(levelname)s - %(pathname)s - %(lineno)s - %(asctime)s - %(message)s'
|
||||
format = "%(levelname)s - %(pathname)s - %(lineno)s - %(asctime)s - %(message)s"
|
||||
logging.basicConfig(level=logging.DEBUG, format=format)
|
||||
logging.captureWarnings(True)
|
||||
|
||||
|
@ -30,19 +33,18 @@ pypoPush_q = Queue()
|
|||
|
||||
pypoLiq_q = Queue()
|
||||
liq_queue_tracker = {
|
||||
"s0": None,
|
||||
"s1": None,
|
||||
"s2": None,
|
||||
"s3": None,
|
||||
}
|
||||
"s0": None,
|
||||
"s1": None,
|
||||
"s2": None,
|
||||
"s3": None,
|
||||
}
|
||||
|
||||
#dummy_telnet_liquidsoap = DummyTelnetLiquidsoap(telnet_lock, logging)
|
||||
dummy_telnet_liquidsoap = TelnetLiquidsoap(telnet_lock, logging, \
|
||||
"localhost", \
|
||||
1234)
|
||||
# dummy_telnet_liquidsoap = DummyTelnetLiquidsoap(telnet_lock, logging)
|
||||
dummy_telnet_liquidsoap = TelnetLiquidsoap(telnet_lock, logging, "localhost", 1234)
|
||||
|
||||
plq = PypoLiqQueue(pypoLiq_q, telnet_lock, logging, liq_queue_tracker, \
|
||||
dummy_telnet_liquidsoap)
|
||||
plq = PypoLiqQueue(
|
||||
pypoLiq_q, telnet_lock, logging, liq_queue_tracker, dummy_telnet_liquidsoap
|
||||
)
|
||||
plq.daemon = True
|
||||
plq.start()
|
||||
|
||||
|
@ -54,47 +56,43 @@ media_schedule = {}
|
|||
start_dt = datetime.utcnow() + timedelta(seconds=1)
|
||||
end_dt = datetime.utcnow() + timedelta(seconds=6)
|
||||
|
||||
media_schedule[start_dt] = {"id": 5, \
|
||||
"type":"file", \
|
||||
"row_id":9, \
|
||||
"uri":"", \
|
||||
"dst":"/home/martin/Music/ipod/Hot Chocolate - You Sexy Thing.mp3", \
|
||||
"fade_in":0, \
|
||||
"fade_out":0, \
|
||||
"cue_in":0, \
|
||||
"cue_out":300, \
|
||||
"start": start_dt, \
|
||||
"end": end_dt, \
|
||||
"show_name":"Untitled", \
|
||||
"replay_gain": 0, \
|
||||
"independent_event": True \
|
||||
}
|
||||
|
||||
media_schedule[start_dt] = {
|
||||
"id": 5,
|
||||
"type": "file",
|
||||
"row_id": 9,
|
||||
"uri": "",
|
||||
"dst": "/home/martin/Music/ipod/Hot Chocolate - You Sexy Thing.mp3",
|
||||
"fade_in": 0,
|
||||
"fade_out": 0,
|
||||
"cue_in": 0,
|
||||
"cue_out": 300,
|
||||
"start": start_dt,
|
||||
"end": end_dt,
|
||||
"show_name": "Untitled",
|
||||
"replay_gain": 0,
|
||||
"independent_event": True,
|
||||
}
|
||||
|
||||
|
||||
start_dt = datetime.utcnow() + timedelta(seconds=2)
|
||||
end_dt = datetime.utcnow() + timedelta(seconds=6)
|
||||
|
||||
media_schedule[start_dt] = {"id": 5, \
|
||||
"type":"file", \
|
||||
"row_id":9, \
|
||||
"uri":"", \
|
||||
"dst":"/home/martin/Music/ipod/Good Charlotte - bloody valentine.mp3", \
|
||||
"fade_in":0, \
|
||||
"fade_out":0, \
|
||||
"cue_in":0, \
|
||||
"cue_out":300, \
|
||||
"start": start_dt, \
|
||||
"end": end_dt, \
|
||||
"show_name":"Untitled", \
|
||||
"replay_gain": 0, \
|
||||
"independent_event": True \
|
||||
}
|
||||
media_schedule[start_dt] = {
|
||||
"id": 5,
|
||||
"type": "file",
|
||||
"row_id": 9,
|
||||
"uri": "",
|
||||
"dst": "/home/martin/Music/ipod/Good Charlotte - bloody valentine.mp3",
|
||||
"fade_in": 0,
|
||||
"fade_out": 0,
|
||||
"cue_in": 0,
|
||||
"cue_out": 300,
|
||||
"start": start_dt,
|
||||
"end": end_dt,
|
||||
"show_name": "Untitled",
|
||||
"replay_gain": 0,
|
||||
"independent_event": True,
|
||||
}
|
||||
pypoLiq_q.put(media_schedule)
|
||||
|
||||
plq.join()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -2,12 +2,13 @@
|
|||
import threading
|
||||
from . import pypofetch
|
||||
|
||||
def __timeout(func, timeout_duration, default, args, kwargs):
|
||||
|
||||
def __timeout(func, timeout_duration, default, args, kwargs):
|
||||
class InterruptableThread(threading.Thread):
|
||||
def __init__(self):
|
||||
threading.Thread.__init__(self)
|
||||
self.result = default
|
||||
|
||||
def run(self):
|
||||
self.result = func(*args, **kwargs)
|
||||
|
||||
|
@ -21,10 +22,10 @@ def __timeout(func, timeout_duration, default, args, kwargs):
|
|||
it.join(timeout_duration)
|
||||
|
||||
if it.isAlive():
|
||||
"""Restart Liquidsoap and try the command one more time. If it
|
||||
"""Restart Liquidsoap and try the command one more time. If it
|
||||
fails again then there is something critically wrong..."""
|
||||
if first_attempt:
|
||||
#restart liquidsoap
|
||||
# restart liquidsoap
|
||||
pypofetch.PypoFetch.ref.restart_liquidsoap()
|
||||
else:
|
||||
raise Exception("Thread did not terminate")
|
||||
|
@ -33,7 +34,9 @@ def __timeout(func, timeout_duration, default, args, kwargs):
|
|||
|
||||
first_attempt = False
|
||||
|
||||
|
||||
def ls_timeout(f, timeout=15, default=None):
|
||||
def new_f(*args, **kwargs):
|
||||
return __timeout(f, timeout, default, args, kwargs)
|
||||
|
||||
return new_f
|
||||
|
|
|
@ -10,64 +10,63 @@ print(script_path)
|
|||
os.chdir(script_path)
|
||||
|
||||
# Allows us to avoid installing the upstart init script when deploying on Airtime Pro:
|
||||
if '--no-init-script' in sys.argv:
|
||||
if "--no-init-script" in sys.argv:
|
||||
data_files = []
|
||||
sys.argv.remove('--no-init-script') # super hax
|
||||
sys.argv.remove("--no-init-script") # super hax
|
||||
else:
|
||||
pypo_files = []
|
||||
for root, dirnames, filenames in os.walk('pypo'):
|
||||
for root, dirnames, filenames in os.walk("pypo"):
|
||||
for filename in filenames:
|
||||
pypo_files.append(os.path.join(root, filename))
|
||||
|
||||
data_files = [
|
||||
('/etc/init', ['install/upstart/airtime-playout.conf.template']),
|
||||
('/etc/init', ['install/upstart/airtime-liquidsoap.conf.template']),
|
||||
('/etc/init.d', ['install/sysvinit/airtime-playout']),
|
||||
('/etc/init.d', ['install/sysvinit/airtime-liquidsoap']),
|
||||
('/var/log/airtime/pypo', []),
|
||||
('/var/log/airtime/pypo-liquidsoap', []),
|
||||
('/var/tmp/airtime/pypo', []),
|
||||
('/var/tmp/airtime/pypo/cache', []),
|
||||
('/var/tmp/airtime/pypo/files', []),
|
||||
('/var/tmp/airtime/pypo/tmp', []),
|
||||
]
|
||||
("/etc/init", ["install/upstart/airtime-playout.conf.template"]),
|
||||
("/etc/init", ["install/upstart/airtime-liquidsoap.conf.template"]),
|
||||
("/etc/init.d", ["install/sysvinit/airtime-playout"]),
|
||||
("/etc/init.d", ["install/sysvinit/airtime-liquidsoap"]),
|
||||
("/var/log/airtime/pypo", []),
|
||||
("/var/log/airtime/pypo-liquidsoap", []),
|
||||
("/var/tmp/airtime/pypo", []),
|
||||
("/var/tmp/airtime/pypo/cache", []),
|
||||
("/var/tmp/airtime/pypo/files", []),
|
||||
("/var/tmp/airtime/pypo/tmp", []),
|
||||
]
|
||||
print(data_files)
|
||||
|
||||
setup(name='airtime-playout',
|
||||
version='1.0',
|
||||
description='Airtime Playout Engine',
|
||||
url='http://github.com/sourcefabric/Airtime',
|
||||
author='sourcefabric',
|
||||
license='AGPLv3',
|
||||
packages=['pypo', 'pypo.media', 'pypo.media.update',
|
||||
'liquidsoap'],
|
||||
package_data={'': ['**/*.liq', '*.cfg', '*.types']},
|
||||
scripts=[
|
||||
'bin/airtime-playout',
|
||||
'bin/airtime-liquidsoap',
|
||||
'bin/pyponotify'
|
||||
],
|
||||
install_requires=[
|
||||
'amqplib',
|
||||
'anyjson',
|
||||
'argparse',
|
||||
'configobj',
|
||||
'docopt',
|
||||
'future',
|
||||
'kombu',
|
||||
'mutagen',
|
||||
'PyDispatcher',
|
||||
'pyinotify',
|
||||
'pytz',
|
||||
'requests',
|
||||
'defusedxml',
|
||||
'packaging',
|
||||
],
|
||||
zip_safe=False,
|
||||
data_files=data_files)
|
||||
setup(
|
||||
name="airtime-playout",
|
||||
version="1.0",
|
||||
description="Airtime Playout Engine",
|
||||
url="http://github.com/sourcefabric/Airtime",
|
||||
author="sourcefabric",
|
||||
license="AGPLv3",
|
||||
packages=["pypo", "pypo.media", "pypo.media.update", "liquidsoap"],
|
||||
package_data={"": ["**/*.liq", "*.cfg", "*.types"]},
|
||||
scripts=["bin/airtime-playout", "bin/airtime-liquidsoap", "bin/pyponotify"],
|
||||
install_requires=[
|
||||
"amqplib",
|
||||
"anyjson",
|
||||
"argparse",
|
||||
"configobj",
|
||||
"docopt",
|
||||
"future",
|
||||
"kombu",
|
||||
"mutagen",
|
||||
"PyDispatcher",
|
||||
"pyinotify",
|
||||
"pytz",
|
||||
"requests",
|
||||
"defusedxml",
|
||||
"packaging",
|
||||
],
|
||||
zip_safe=False,
|
||||
data_files=data_files,
|
||||
)
|
||||
|
||||
# Reload the initctl config so that playout services works
|
||||
if data_files:
|
||||
print("Reloading initctl configuration")
|
||||
#call(['initctl', 'reload-configuration'])
|
||||
print("Run \"sudo service airtime-playout start\" and \"sudo service airtime-liquidsoap start\"")
|
||||
# call(['initctl', 'reload-configuration'])
|
||||
print(
|
||||
'Run "sudo service airtime-playout start" and "sudo service airtime-liquidsoap start"'
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue