From 0cd499eee131ffc5526695d20cd0db48b4611548 Mon Sep 17 00:00:00 2001 From: jo Date: Thu, 27 May 2021 15:10:58 +0200 Subject: [PATCH 01/28] Add pre-commit setup - Add pre-commit hooks - Add github action to enforce pre-commit setup For any hooks required for a 'sub project', for instance the UI. It will be possible to create custom hooks, and call some package scripts in the package.json file. Fixes #1208 --- .codespellignore | 2 ++ .github/workflows/lint.yml | 14 +++++++++++ .pre-commit-config.yaml | 50 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 66 insertions(+) create mode 100644 .codespellignore create mode 100644 .github/workflows/lint.yml create mode 100644 .pre-commit-config.yaml diff --git a/.codespellignore b/.codespellignore new file mode 100644 index 000000000..a04dbeef4 --- /dev/null +++ b/.codespellignore @@ -0,0 +1,2 @@ +hda +HDA diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 000000000..b943e5ae8 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,14 @@ +name: lint + +on: + pull_request: + push: + branches: [master] + +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - uses: pre-commit/action@v2 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..cbea7daa4 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,50 @@ +--- +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +exclude: ^(airtime_mvc.*)$ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: check-added-large-files + - id: check-case-conflict + - id: check-executables-have-shebangs + - id: check-shebang-scripts-are-executable + - id: check-symlinks + - id: destroyed-symlinks + + - id: check-json + - id: check-yaml + - id: check-yaml + - id: check-toml + + - id: check-merge-conflict + - id: end-of-file-fixer + - id: mixed-line-ending + - id: trailing-whitespace + + - id: fix-encoding-pragma + - id: requirements-txt-fixer + - id: name-tests-test + + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v2.3.0 + hooks: + - id: prettier + files: \.(md|yml|yaml|json)$ + + - repo: https://github.com/psf/black + rev: 21.5b1 + hooks: + - id: black + + - repo: https://github.com/pre-commit/mirrors-pylint + rev: v3.0.0a3 + hooks: + - id: pylint + + - repo: https://github.com/codespell-project/codespell + rev: v2.0.0 + hooks: + - id: codespell + args: [--ignore-words=.codespellignore] From ac17db97fc1827bd5d310a4b4aef9e75bb8b31a1 Mon Sep 17 00:00:00 2001 From: jo Date: Thu, 27 May 2021 15:21:02 +0200 Subject: [PATCH 02/28] Fix missing encoding pragma on python files --- api/libretimeapi/apps.py | 1 + api/libretimeapi/managers.py | 1 + api/libretimeapi/models/__init__.py | 1 + api/libretimeapi/models/authentication.py | 1 + api/libretimeapi/models/celery.py | 1 + api/libretimeapi/models/countries.py | 1 + api/libretimeapi/models/files.py | 1 + api/libretimeapi/models/playlists.py | 1 + api/libretimeapi/models/playout.py | 1 + api/libretimeapi/models/podcasts.py | 1 + api/libretimeapi/models/preferences.py | 1 + api/libretimeapi/models/schedule.py | 1 + api/libretimeapi/models/services.py | 1 + api/libretimeapi/models/shows.py | 1 + api/libretimeapi/models/smart_blocks.py | 1 + api/libretimeapi/models/tracks.py | 1 + api/libretimeapi/models/user_constants.py | 1 + api/libretimeapi/models/webstreams.py | 1 + api/libretimeapi/permission_constants.py | 1 + api/libretimeapi/permissions.py | 1 + api/libretimeapi/serializers.py | 1 + api/libretimeapi/settings.py | 1 + api/libretimeapi/tests/runners.py | 1 + api/libretimeapi/tests/test_models.py | 1 + api/libretimeapi/tests/test_permissions.py | 1 + api/libretimeapi/tests/test_views.py | 1 + api/libretimeapi/urls.py | 1 + api/libretimeapi/utils.py | 1 + api/libretimeapi/views.py | 1 + api/libretimeapi/wsgi.py | 1 + api/setup.py | 1 + dev_tools/compare_cc_files_to_fs.py | 1 + python_apps/airtime-celery/airtime-celery/__init__.py | 1 + python_apps/airtime-celery/airtime-celery/celeryconfig.py | 1 + python_apps/airtime-celery/airtime-celery/tasks.py | 1 + python_apps/airtime-celery/setup.py | 1 + .../airtime_analyzer/airtime_analyzer/airtime_analyzer.py | 1 + python_apps/airtime_analyzer/airtime_analyzer/analyzer.py | 1 + .../airtime_analyzer/airtime_analyzer/analyzer_pipeline.py | 1 + python_apps/airtime_analyzer/airtime_analyzer/cli.py | 1 + python_apps/airtime_analyzer/airtime_analyzer/config_file.py | 1 + .../airtime_analyzer/airtime_analyzer/cuepoint_analyzer.py | 1 + .../airtime_analyzer/airtime_analyzer/filemover_analyzer.py | 1 + .../airtime_analyzer/airtime_analyzer/message_listener.py | 1 + .../airtime_analyzer/airtime_analyzer/metadata_analyzer.py | 1 + .../airtime_analyzer/airtime_analyzer/playability_analyzer.py | 1 + .../airtime_analyzer/airtime_analyzer/replaygain_analyzer.py | 1 + python_apps/airtime_analyzer/airtime_analyzer/status_reporter.py | 1 + python_apps/airtime_analyzer/setup.py | 1 + python_apps/airtime_analyzer/tests/airtime_analyzer_tests.py | 1 + python_apps/airtime_analyzer/tests/analyzer_pipeline_tests.py | 1 + python_apps/airtime_analyzer/tests/analyzer_tests.py | 1 + python_apps/airtime_analyzer/tests/cuepoint_analyzer_tests.py | 1 + python_apps/airtime_analyzer/tests/filemover_analyzer_tests.py | 1 + python_apps/airtime_analyzer/tests/playability_analyzer_tests.py | 1 + python_apps/airtime_analyzer/tests/replaygain_analyzer_tests.py | 1 + python_apps/api_clients/api_clients/utils.py | 1 + python_apps/api_clients/api_clients/version1.py | 1 + python_apps/api_clients/api_clients/version2.py | 1 + python_apps/api_clients/setup.py | 1 + python_apps/api_clients/tests/test_apcurl.py | 1 + python_apps/api_clients/tests/test_apirequest.py | 1 + python_apps/api_clients/tests/test_requestprovider.py | 1 + python_apps/api_clients/tests/test_utils.py | 1 + python_apps/pypo/bin/airtime-liquidsoap | 1 + python_apps/pypo/bin/airtime-playout | 1 + python_apps/pypo/liquidsoap/__main__.py | 1 + python_apps/pypo/liquidsoap/generate_liquidsoap_cfg.py | 1 + python_apps/pypo/liquidsoap/liquidsoap_auth.py | 1 + python_apps/pypo/liquidsoap/liquidsoap_prepare_terminate.py | 1 + python_apps/pypo/pypo/__main__.py | 1 + python_apps/pypo/pypo/eventtypes.py | 1 + python_apps/pypo/pypo/listenerstat.py | 1 + python_apps/pypo/pypo/pure.py | 1 + python_apps/pypo/pypo/pypoliqqueue.py | 1 + python_apps/pypo/pypo/pypoliquidsoap.py | 1 + python_apps/pypo/pypo/telnetliquidsoap.py | 1 + python_apps/pypo/pypo/testpypoliqqueue.py | 1 + python_apps/pypo/pypo/timeout.py | 1 + python_apps/pypo/setup.py | 1 + utils/airtime-import/airtime-import | 1 + utils/airtime-silan | 1 + utils/airtime-test-soundcard.py | 1 + utils/airtime-test-stream.py | 1 + utils/upgrade.py | 1 + 85 files changed, 85 insertions(+) diff --git a/api/libretimeapi/apps.py b/api/libretimeapi/apps.py index ea28bb2ad..d02db415c 100644 --- a/api/libretimeapi/apps.py +++ b/api/libretimeapi/apps.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.apps import AppConfig from django.db.models.signals import pre_save diff --git a/api/libretimeapi/managers.py b/api/libretimeapi/managers.py index ea537970c..f5950acb9 100644 --- a/api/libretimeapi/managers.py +++ b/api/libretimeapi/managers.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.contrib.auth.models import BaseUserManager class UserManager(BaseUserManager): diff --git a/api/libretimeapi/models/__init__.py b/api/libretimeapi/models/__init__.py index a6c479d8b..3a69c2101 100644 --- a/api/libretimeapi/models/__init__.py +++ b/api/libretimeapi/models/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from .authentication import * from .celery import * from .countries import * diff --git a/api/libretimeapi/models/authentication.py b/api/libretimeapi/models/authentication.py index 7d794bab3..f75023dac 100644 --- a/api/libretimeapi/models/authentication.py +++ b/api/libretimeapi/models/authentication.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import hashlib from django.contrib import auth from django.contrib.auth.models import AbstractBaseUser, Permission diff --git a/api/libretimeapi/models/celery.py b/api/libretimeapi/models/celery.py index 3a3ac8dc3..1527f3b35 100644 --- a/api/libretimeapi/models/celery.py +++ b/api/libretimeapi/models/celery.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.db import models diff --git a/api/libretimeapi/models/countries.py b/api/libretimeapi/models/countries.py index 9c56a594e..e0fcf96d9 100644 --- a/api/libretimeapi/models/countries.py +++ b/api/libretimeapi/models/countries.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.db import models diff --git a/api/libretimeapi/models/files.py b/api/libretimeapi/models/files.py index 77d75d8e1..fb154b11f 100644 --- a/api/libretimeapi/models/files.py +++ b/api/libretimeapi/models/files.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.db import models diff --git a/api/libretimeapi/models/playlists.py b/api/libretimeapi/models/playlists.py index d783a0a59..f3f955517 100644 --- a/api/libretimeapi/models/playlists.py +++ b/api/libretimeapi/models/playlists.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.db import models from .files import File from .smart_blocks import SmartBlock diff --git a/api/libretimeapi/models/playout.py b/api/libretimeapi/models/playout.py index 32a6f3bff..808cc5d08 100644 --- a/api/libretimeapi/models/playout.py +++ b/api/libretimeapi/models/playout.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.db import models from .files import File diff --git a/api/libretimeapi/models/podcasts.py b/api/libretimeapi/models/podcasts.py index f4a0dbd9e..2e7ba817a 100644 --- a/api/libretimeapi/models/podcasts.py +++ b/api/libretimeapi/models/podcasts.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.db import models from .authentication import User from .files import File diff --git a/api/libretimeapi/models/preferences.py b/api/libretimeapi/models/preferences.py index 8dbc564e5..2fd2e066b 100644 --- a/api/libretimeapi/models/preferences.py +++ b/api/libretimeapi/models/preferences.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.db import models diff --git a/api/libretimeapi/models/schedule.py b/api/libretimeapi/models/schedule.py index 3c4f2b297..5f51a6344 100644 --- a/api/libretimeapi/models/schedule.py +++ b/api/libretimeapi/models/schedule.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.db import models from .files import File diff --git a/api/libretimeapi/models/services.py b/api/libretimeapi/models/services.py index 29872a78f..8106b5449 100644 --- a/api/libretimeapi/models/services.py +++ b/api/libretimeapi/models/services.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.db import models diff --git a/api/libretimeapi/models/shows.py b/api/libretimeapi/models/shows.py index bae877cf8..8acc09d6b 100644 --- a/api/libretimeapi/models/shows.py +++ b/api/libretimeapi/models/shows.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.db import models from .playlists import Playlist from .files import File diff --git a/api/libretimeapi/models/smart_blocks.py b/api/libretimeapi/models/smart_blocks.py index 766a8bdf4..5b3c23f85 100644 --- a/api/libretimeapi/models/smart_blocks.py +++ b/api/libretimeapi/models/smart_blocks.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.db import models diff --git a/api/libretimeapi/models/tracks.py b/api/libretimeapi/models/tracks.py index fcbf89af6..192480bef 100644 --- a/api/libretimeapi/models/tracks.py +++ b/api/libretimeapi/models/tracks.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.db import models from .files import File diff --git a/api/libretimeapi/models/user_constants.py b/api/libretimeapi/models/user_constants.py index 8944fbd5a..56afd552d 100644 --- a/api/libretimeapi/models/user_constants.py +++ b/api/libretimeapi/models/user_constants.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- GUEST = 'G' DJ = 'H' PROGRAM_MANAGER = 'P' diff --git a/api/libretimeapi/models/webstreams.py b/api/libretimeapi/models/webstreams.py index bb2bc448a..ed940df2e 100644 --- a/api/libretimeapi/models/webstreams.py +++ b/api/libretimeapi/models/webstreams.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.db import models from django.contrib.auth import get_user_model from .schedule import Schedule diff --git a/api/libretimeapi/permission_constants.py b/api/libretimeapi/permission_constants.py index df7a0de2f..323fdd873 100644 --- a/api/libretimeapi/permission_constants.py +++ b/api/libretimeapi/permission_constants.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import logging from django.contrib.auth.models import Group, Permission from .models.user_constants import GUEST, DJ, PROGRAM_MANAGER, USER_TYPES diff --git a/api/libretimeapi/permissions.py b/api/libretimeapi/permissions.py index 724fd8475..943ec562d 100644 --- a/api/libretimeapi/permissions.py +++ b/api/libretimeapi/permissions.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from rest_framework.permissions import BasePermission from django.conf import settings from .models.user_constants import DJ diff --git a/api/libretimeapi/serializers.py b/api/libretimeapi/serializers.py index 87c1f5c46..2b9466125 100644 --- a/api/libretimeapi/serializers.py +++ b/api/libretimeapi/serializers.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.contrib.auth import get_user_model from rest_framework import serializers from .models import * diff --git a/api/libretimeapi/settings.py b/api/libretimeapi/settings.py index 4a545fca1..807d97187 100644 --- a/api/libretimeapi/settings.py +++ b/api/libretimeapi/settings.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import configparser import os from .utils import read_config_file, get_random_string diff --git a/api/libretimeapi/tests/runners.py b/api/libretimeapi/tests/runners.py index b2b6dcbc2..888e59c79 100644 --- a/api/libretimeapi/tests/runners.py +++ b/api/libretimeapi/tests/runners.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.test.runner import DiscoverRunner diff --git a/api/libretimeapi/tests/test_models.py b/api/libretimeapi/tests/test_models.py index aa1058806..89e0368f7 100644 --- a/api/libretimeapi/tests/test_models.py +++ b/api/libretimeapi/tests/test_models.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from rest_framework.test import APITestCase from django.contrib.auth.models import Group from django.apps import apps diff --git a/api/libretimeapi/tests/test_permissions.py b/api/libretimeapi/tests/test_permissions.py index ab0cb09b8..4e557fcdd 100644 --- a/api/libretimeapi/tests/test_permissions.py +++ b/api/libretimeapi/tests/test_permissions.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import os from django.contrib.auth import get_user_model from django.contrib.auth.models import AnonymousUser diff --git a/api/libretimeapi/tests/test_views.py b/api/libretimeapi/tests/test_views.py index d65e863fc..0a16a20f0 100644 --- a/api/libretimeapi/tests/test_views.py +++ b/api/libretimeapi/tests/test_views.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import os from django.contrib.auth.models import AnonymousUser from django.conf import settings diff --git a/api/libretimeapi/urls.py b/api/libretimeapi/urls.py index 2a9187a0f..8dfca3cd3 100644 --- a/api/libretimeapi/urls.py +++ b/api/libretimeapi/urls.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from django.urls import include, path from rest_framework import routers diff --git a/api/libretimeapi/utils.py b/api/libretimeapi/utils.py index 4fa3510a7..f67af5f5c 100644 --- a/api/libretimeapi/utils.py +++ b/api/libretimeapi/utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import configparser import sys import string diff --git a/api/libretimeapi/views.py b/api/libretimeapi/views.py index 61bb9a1b9..cc499f059 100644 --- a/api/libretimeapi/views.py +++ b/api/libretimeapi/views.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import os from django.conf import settings from django.http import FileResponse diff --git a/api/libretimeapi/wsgi.py b/api/libretimeapi/wsgi.py index 479327069..11e2a4c75 100644 --- a/api/libretimeapi/wsgi.py +++ b/api/libretimeapi/wsgi.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """ WSGI config for api project. diff --git a/api/setup.py b/api/setup.py index 4906620d3..533cb8b56 100644 --- a/api/setup.py +++ b/api/setup.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import os import shutil from setuptools import setup, find_packages diff --git a/dev_tools/compare_cc_files_to_fs.py b/dev_tools/compare_cc_files_to_fs.py index fe842332b..bdc6a6124 100644 --- a/dev_tools/compare_cc_files_to_fs.py +++ b/dev_tools/compare_cc_files_to_fs.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import os import time import shutil diff --git a/python_apps/airtime-celery/airtime-celery/__init__.py b/python_apps/airtime-celery/airtime-celery/__init__.py index 81a44f04f..31d871585 100644 --- a/python_apps/airtime-celery/airtime-celery/__init__.py +++ b/python_apps/airtime-celery/airtime-celery/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import os # Make the celeryconfig module visible to celery diff --git a/python_apps/airtime-celery/airtime-celery/celeryconfig.py b/python_apps/airtime-celery/airtime-celery/celeryconfig.py index 3de503d92..3fce374d0 100644 --- a/python_apps/airtime-celery/airtime-celery/celeryconfig.py +++ b/python_apps/airtime-celery/airtime-celery/celeryconfig.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import os from configobj import ConfigObj from kombu import Exchange, Queue diff --git a/python_apps/airtime-celery/airtime-celery/tasks.py b/python_apps/airtime-celery/airtime-celery/tasks.py index 198b59772..74e34a9e9 100644 --- a/python_apps/airtime-celery/airtime-celery/tasks.py +++ b/python_apps/airtime-celery/airtime-celery/tasks.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from future.standard_library import install_aliases install_aliases() diff --git a/python_apps/airtime-celery/setup.py b/python_apps/airtime-celery/setup.py index 48f62b295..c889be027 100644 --- a/python_apps/airtime-celery/setup.py +++ b/python_apps/airtime-celery/setup.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from pathlib import Path from setuptools import setup from subprocess import call diff --git a/python_apps/airtime_analyzer/airtime_analyzer/airtime_analyzer.py b/python_apps/airtime_analyzer/airtime_analyzer/airtime_analyzer.py index 325b2df3d..19f3c555d 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/airtime_analyzer.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/airtime_analyzer.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """Contains the main application class for airtime_analyzer. """ import logging diff --git a/python_apps/airtime_analyzer/airtime_analyzer/analyzer.py b/python_apps/airtime_analyzer/airtime_analyzer/analyzer.py index eae40d1fd..f476c6f9d 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/analyzer.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/analyzer.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # TODO: use an abstract base class (ie. import from abc ...) once we have python >=3.3 that supports @staticmethod with @abstractmethod diff --git a/python_apps/airtime_analyzer/airtime_analyzer/analyzer_pipeline.py b/python_apps/airtime_analyzer/airtime_analyzer/analyzer_pipeline.py index c6b383127..e0b66618e 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/analyzer_pipeline.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/analyzer_pipeline.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """ Analyzes and imports an audio file into the Airtime library. """ import logging diff --git a/python_apps/airtime_analyzer/airtime_analyzer/cli.py b/python_apps/airtime_analyzer/airtime_analyzer/cli.py index 4de4e260f..3161b6aa2 100755 --- a/python_apps/airtime_analyzer/airtime_analyzer/cli.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/cli.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """ Main CLI entrypoint for the libretime-analyzer app. """ diff --git a/python_apps/airtime_analyzer/airtime_analyzer/config_file.py b/python_apps/airtime_analyzer/airtime_analyzer/config_file.py index e98bd529d..fc0e41af7 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/config_file.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/config_file.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import configparser diff --git a/python_apps/airtime_analyzer/airtime_analyzer/cuepoint_analyzer.py b/python_apps/airtime_analyzer/airtime_analyzer/cuepoint_analyzer.py index 739df2478..bc69b49c8 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/cuepoint_analyzer.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/cuepoint_analyzer.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import subprocess import logging import traceback diff --git a/python_apps/airtime_analyzer/airtime_analyzer/filemover_analyzer.py b/python_apps/airtime_analyzer/airtime_analyzer/filemover_analyzer.py index 8bc8bb94d..37a0672a8 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/filemover_analyzer.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/filemover_analyzer.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import logging import os import time diff --git a/python_apps/airtime_analyzer/airtime_analyzer/message_listener.py b/python_apps/airtime_analyzer/airtime_analyzer/message_listener.py index 5d28cc3f2..423be2e8d 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/message_listener.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/message_listener.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import sys import pika import json diff --git a/python_apps/airtime_analyzer/airtime_analyzer/metadata_analyzer.py b/python_apps/airtime_analyzer/airtime_analyzer/metadata_analyzer.py index d590d95a9..91b10a6b9 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/metadata_analyzer.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/metadata_analyzer.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import time import datetime import mutagen diff --git a/python_apps/airtime_analyzer/airtime_analyzer/playability_analyzer.py b/python_apps/airtime_analyzer/airtime_analyzer/playability_analyzer.py index d7b2d546e..0f4b030f4 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/playability_analyzer.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/playability_analyzer.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- __author__ = 'asantoni' import subprocess diff --git a/python_apps/airtime_analyzer/airtime_analyzer/replaygain_analyzer.py b/python_apps/airtime_analyzer/airtime_analyzer/replaygain_analyzer.py index 0c2c4d963..60360a7cd 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/replaygain_analyzer.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/replaygain_analyzer.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import subprocess import logging from .analyzer import Analyzer diff --git a/python_apps/airtime_analyzer/airtime_analyzer/status_reporter.py b/python_apps/airtime_analyzer/airtime_analyzer/status_reporter.py index ee3c78fac..e8d7dd2fa 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/status_reporter.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/status_reporter.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import requests import json import logging diff --git a/python_apps/airtime_analyzer/setup.py b/python_apps/airtime_analyzer/setup.py index 8ca460894..1eebc7cc6 100644 --- a/python_apps/airtime_analyzer/setup.py +++ b/python_apps/airtime_analyzer/setup.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from setuptools import setup import os diff --git a/python_apps/airtime_analyzer/tests/airtime_analyzer_tests.py b/python_apps/airtime_analyzer/tests/airtime_analyzer_tests.py index 928e02b31..f37fc6004 100644 --- a/python_apps/airtime_analyzer/tests/airtime_analyzer_tests.py +++ b/python_apps/airtime_analyzer/tests/airtime_analyzer_tests.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from nose.tools import * import airtime_analyzer diff --git a/python_apps/airtime_analyzer/tests/analyzer_pipeline_tests.py b/python_apps/airtime_analyzer/tests/analyzer_pipeline_tests.py index 57ae8bcf1..82879f554 100644 --- a/python_apps/airtime_analyzer/tests/analyzer_pipeline_tests.py +++ b/python_apps/airtime_analyzer/tests/analyzer_pipeline_tests.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from nose.tools import * import os import shutil diff --git a/python_apps/airtime_analyzer/tests/analyzer_tests.py b/python_apps/airtime_analyzer/tests/analyzer_tests.py index fc6fbc684..6d34b6a9f 100644 --- a/python_apps/airtime_analyzer/tests/analyzer_tests.py +++ b/python_apps/airtime_analyzer/tests/analyzer_tests.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from nose.tools import * from airtime_analyzer.analyzer import Analyzer diff --git a/python_apps/airtime_analyzer/tests/cuepoint_analyzer_tests.py b/python_apps/airtime_analyzer/tests/cuepoint_analyzer_tests.py index f084a404b..7dced3618 100644 --- a/python_apps/airtime_analyzer/tests/cuepoint_analyzer_tests.py +++ b/python_apps/airtime_analyzer/tests/cuepoint_analyzer_tests.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from nose.tools import * from airtime_analyzer.cuepoint_analyzer import CuePointAnalyzer diff --git a/python_apps/airtime_analyzer/tests/filemover_analyzer_tests.py b/python_apps/airtime_analyzer/tests/filemover_analyzer_tests.py index 4e7f9e304..c14e91e27 100644 --- a/python_apps/airtime_analyzer/tests/filemover_analyzer_tests.py +++ b/python_apps/airtime_analyzer/tests/filemover_analyzer_tests.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from nose.tools import * import os import shutil diff --git a/python_apps/airtime_analyzer/tests/playability_analyzer_tests.py b/python_apps/airtime_analyzer/tests/playability_analyzer_tests.py index 3864d6b40..2aa311ece 100644 --- a/python_apps/airtime_analyzer/tests/playability_analyzer_tests.py +++ b/python_apps/airtime_analyzer/tests/playability_analyzer_tests.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from nose.tools import * from airtime_analyzer.playability_analyzer import * diff --git a/python_apps/airtime_analyzer/tests/replaygain_analyzer_tests.py b/python_apps/airtime_analyzer/tests/replaygain_analyzer_tests.py index 3d1269cb7..af25b01c0 100644 --- a/python_apps/airtime_analyzer/tests/replaygain_analyzer_tests.py +++ b/python_apps/airtime_analyzer/tests/replaygain_analyzer_tests.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from __future__ import print_function from nose.tools import * from airtime_analyzer.replaygain_analyzer import ReplayGainAnalyzer diff --git a/python_apps/api_clients/api_clients/utils.py b/python_apps/api_clients/api_clients/utils.py index a2f62686b..c2f236d03 100644 --- a/python_apps/api_clients/api_clients/utils.py +++ b/python_apps/api_clients/api_clients/utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import json import datetime import logging diff --git a/python_apps/api_clients/api_clients/version1.py b/python_apps/api_clients/api_clients/version1.py index 8317e5af6..a3da27637 100644 --- a/python_apps/api_clients/api_clients/version1.py +++ b/python_apps/api_clients/api_clients/version1.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- ############################################################################### # This file holds the implementations for all the API clients. # diff --git a/python_apps/api_clients/api_clients/version2.py b/python_apps/api_clients/api_clients/version2.py index 5e066b69e..c927f1f10 100644 --- a/python_apps/api_clients/api_clients/version2.py +++ b/python_apps/api_clients/api_clients/version2.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- ############################################################################### # This file holds the implementations for all the API clients. # diff --git a/python_apps/api_clients/setup.py b/python_apps/api_clients/setup.py index f8f1367e9..cfcefd462 100644 --- a/python_apps/api_clients/setup.py +++ b/python_apps/api_clients/setup.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from __future__ import print_function from setuptools import setup from subprocess import call diff --git a/python_apps/api_clients/tests/test_apcurl.py b/python_apps/api_clients/tests/test_apcurl.py index b6b553af4..a6b2a3366 100644 --- a/python_apps/api_clients/tests/test_apcurl.py +++ b/python_apps/api_clients/tests/test_apcurl.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import unittest from api_clients.utils import ApcUrl, UrlBadParam, IncompleteUrl diff --git a/python_apps/api_clients/tests/test_apirequest.py b/python_apps/api_clients/tests/test_apirequest.py index dedf40933..4f37766c7 100644 --- a/python_apps/api_clients/tests/test_apirequest.py +++ b/python_apps/api_clients/tests/test_apirequest.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import unittest import json from mock import MagicMock, patch diff --git a/python_apps/api_clients/tests/test_requestprovider.py b/python_apps/api_clients/tests/test_requestprovider.py index 7a475b6f4..c3592d549 100644 --- a/python_apps/api_clients/tests/test_requestprovider.py +++ b/python_apps/api_clients/tests/test_requestprovider.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import unittest import json from mock import patch, MagicMock diff --git a/python_apps/api_clients/tests/test_utils.py b/python_apps/api_clients/tests/test_utils.py index 6808a172c..525b1210c 100644 --- a/python_apps/api_clients/tests/test_utils.py +++ b/python_apps/api_clients/tests/test_utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import datetime import configparser import unittest diff --git a/python_apps/pypo/bin/airtime-liquidsoap b/python_apps/pypo/bin/airtime-liquidsoap index 7863ba692..7d5864022 100755 --- a/python_apps/pypo/bin/airtime-liquidsoap +++ b/python_apps/pypo/bin/airtime-liquidsoap @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- import runpy # Run the liquidsoap python module diff --git a/python_apps/pypo/bin/airtime-playout b/python_apps/pypo/bin/airtime-playout index 6e847a072..9fe0ae5e8 100755 --- a/python_apps/pypo/bin/airtime-playout +++ b/python_apps/pypo/bin/airtime-playout @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- import runpy runpy.run_module("pypo", run_name="__main__") diff --git a/python_apps/pypo/liquidsoap/__main__.py b/python_apps/pypo/liquidsoap/__main__.py index af8145a79..65db9bf8c 100644 --- a/python_apps/pypo/liquidsoap/__main__.py +++ b/python_apps/pypo/liquidsoap/__main__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """ Runs Airtime liquidsoap """ import argparse diff --git a/python_apps/pypo/liquidsoap/generate_liquidsoap_cfg.py b/python_apps/pypo/liquidsoap/generate_liquidsoap_cfg.py index 7eea9c7a1..ca5dd764f 100644 --- a/python_apps/pypo/liquidsoap/generate_liquidsoap_cfg.py +++ b/python_apps/pypo/liquidsoap/generate_liquidsoap_cfg.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import logging import os diff --git a/python_apps/pypo/liquidsoap/liquidsoap_auth.py b/python_apps/pypo/liquidsoap/liquidsoap_auth.py index fd6a2913e..efa087e98 100644 --- a/python_apps/pypo/liquidsoap/liquidsoap_auth.py +++ b/python_apps/pypo/liquidsoap/liquidsoap_auth.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from api_clients import version1 as api_client import sys diff --git a/python_apps/pypo/liquidsoap/liquidsoap_prepare_terminate.py b/python_apps/pypo/liquidsoap/liquidsoap_prepare_terminate.py index d40655fc9..18a3a5dac 100644 --- a/python_apps/pypo/liquidsoap/liquidsoap_prepare_terminate.py +++ b/python_apps/pypo/liquidsoap/liquidsoap_prepare_terminate.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from configobj import ConfigObj import telnetlib import sys diff --git a/python_apps/pypo/pypo/__main__.py b/python_apps/pypo/pypo/__main__.py index fd5f4e5d1..f8c40a7bf 100644 --- a/python_apps/pypo/pypo/__main__.py +++ b/python_apps/pypo/pypo/__main__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """ Python part of radio playout (pypo) """ diff --git a/python_apps/pypo/pypo/eventtypes.py b/python_apps/pypo/pypo/eventtypes.py index 5f9c871db..39e38472d 100644 --- a/python_apps/pypo/pypo/eventtypes.py +++ b/python_apps/pypo/pypo/eventtypes.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- FILE = "file" EVENT = "event" STREAM_BUFFER_START = "stream_buffer_start" diff --git a/python_apps/pypo/pypo/listenerstat.py b/python_apps/pypo/pypo/listenerstat.py index 57a75281d..2df8dc031 100644 --- a/python_apps/pypo/pypo/listenerstat.py +++ b/python_apps/pypo/pypo/listenerstat.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from threading import Thread import urllib.request, urllib.error, urllib.parse import defusedxml.minidom diff --git a/python_apps/pypo/pypo/pure.py b/python_apps/pypo/pypo/pure.py index 24ef48c35..2f9d62a44 100644 --- a/python_apps/pypo/pypo/pure.py +++ b/python_apps/pypo/pypo/pure.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import re from packaging.version import Version, parse diff --git a/python_apps/pypo/pypo/pypoliqqueue.py b/python_apps/pypo/pypo/pypoliqqueue.py index 709b176a6..e41266316 100644 --- a/python_apps/pypo/pypo/pypoliqqueue.py +++ b/python_apps/pypo/pypo/pypoliqqueue.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from threading import Thread from collections import deque from datetime import datetime diff --git a/python_apps/pypo/pypo/pypoliquidsoap.py b/python_apps/pypo/pypo/pypoliquidsoap.py index 76887cd42..7d7854a87 100644 --- a/python_apps/pypo/pypo/pypoliquidsoap.py +++ b/python_apps/pypo/pypo/pypoliquidsoap.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from .pypofetch import PypoFetch from .telnetliquidsoap import TelnetLiquidsoap diff --git a/python_apps/pypo/pypo/telnetliquidsoap.py b/python_apps/pypo/pypo/telnetliquidsoap.py index 8566c7001..65d6bb168 100644 --- a/python_apps/pypo/pypo/telnetliquidsoap.py +++ b/python_apps/pypo/pypo/telnetliquidsoap.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import telnetlib from .timeout import ls_timeout diff --git a/python_apps/pypo/pypo/testpypoliqqueue.py b/python_apps/pypo/pypo/testpypoliqqueue.py index b3918a0bd..15897737e 100644 --- a/python_apps/pypo/pypo/testpypoliqqueue.py +++ b/python_apps/pypo/pypo/testpypoliqqueue.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from .pypoliqqueue import PypoLiqQueue from .telnetliquidsoap import DummyTelnetLiquidsoap, TelnetLiquidsoap diff --git a/python_apps/pypo/pypo/timeout.py b/python_apps/pypo/pypo/timeout.py index 0e409021a..2520b8e21 100644 --- a/python_apps/pypo/pypo/timeout.py +++ b/python_apps/pypo/pypo/timeout.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import threading from . import pypofetch diff --git a/python_apps/pypo/setup.py b/python_apps/pypo/setup.py index 75d0e4e94..e2b9873c4 100644 --- a/python_apps/pypo/setup.py +++ b/python_apps/pypo/setup.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from __future__ import print_function from setuptools import setup from subprocess import call diff --git a/utils/airtime-import/airtime-import b/utils/airtime-import/airtime-import index a878d7194..baf20bcd3 100755 --- a/utils/airtime-import/airtime-import +++ b/utils/airtime-import/airtime-import @@ -1,4 +1,5 @@ #!/usr/bin/python2 import sys +# -*- coding: utf-8 -*- import os import logging from configobj import ConfigObj diff --git a/utils/airtime-silan b/utils/airtime-silan index 3bffb0ee2..626777c63 100755 --- a/utils/airtime-silan +++ b/utils/airtime-silan @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- from configobj import ConfigObj from api_clients import api_client as apc diff --git a/utils/airtime-test-soundcard.py b/utils/airtime-test-soundcard.py index 5e23c6292..834999966 100644 --- a/utils/airtime-test-soundcard.py +++ b/utils/airtime-test-soundcard.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import subprocess import os import pwd diff --git a/utils/airtime-test-stream.py b/utils/airtime-test-stream.py index 5a5bce9c2..675f934ac 100644 --- a/utils/airtime-test-stream.py +++ b/utils/airtime-test-stream.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import subprocess import os import pwd diff --git a/utils/upgrade.py b/utils/upgrade.py index 3b03547e9..6b196555e 100755 --- a/utils/upgrade.py +++ b/utils/upgrade.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- import ConfigParser import argparse From 65f7b414870b58d6624d1b2d47643bfaa87f561b Mon Sep 17 00:00:00 2001 From: jo Date: Thu, 27 May 2021 15:59:00 +0200 Subject: [PATCH 03/28] Fix spelling --- .codespellignore | 8 ++++++++ .github/RELEASE.md | 4 ++-- .github/stale.yml | 2 +- .../public/js/timepicker/jquery-ui-timepicker-addon.js | 4 ++-- changelog | 8 ++++---- dev_tools/compare_cc_files_to_fs.py | 2 +- dev_tools/scripts/git-merge-po | 4 ++-- docs/_docs/backing-up-the-server.md | 2 +- docs/_docs/contribute.md | 6 +++--- docs/_docs/scheduling-shows.md | 2 +- docs/_docs/ssl.md | 2 +- docs/_docs/troubleshooting.md | 2 +- docs/_docs/users.md | 2 +- docs/api/openapi.yaml | 6 +++--- installer/vagrant/centos.sh | 2 +- .../airtime_analyzer/airtime_analyzer/airtime_analyzer.py | 2 +- .../airtime_analyzer/airtime_analyzer/message_listener.py | 2 +- python_apps/pypo/liquidsoap/1.1/ls_lib.liq | 2 +- python_apps/pypo/liquidsoap/1.3/ls_lib.liq | 2 +- python_apps/pypo/liquidsoap/1.4/ls_lib.liq | 2 +- python_apps/pypo/pypo/pypomessagehandler.py | 4 ++-- python_apps/pypo/pypo/recorder.py | 2 +- utils/airtime-import/airtime-import | 8 ++++---- 23 files changed, 44 insertions(+), 36 deletions(-) diff --git a/.codespellignore b/.codespellignore index a04dbeef4..85b169f1d 100644 --- a/.codespellignore +++ b/.codespellignore @@ -1,2 +1,10 @@ hda HDA +conexant + +# TODO: See https://github.com/savonet/liquidsoap/issues/1654 +prefered + +# TODO: Remove once docs/lunr.js is shipped using a package manager +ment +enviroments \ No newline at end of file diff --git a/.github/RELEASE.md b/.github/RELEASE.md index a1b8959a2..091a43713 100644 --- a/.github/RELEASE.md +++ b/.github/RELEASE.md @@ -21,7 +21,7 @@ Please report new issues and/or feature requests in the issue tracker. Join our Liquidsoap support No watched folder support No Line In recording support - Playout wont work if locale is missing + Playout won't work if locale is missing Lack of i18n toolchain is disturbing ## Features @@ -155,7 +155,7 @@ Currently LibreTime does not support watching folders. Uploading files through t ### No line in support This feature went missing from LibreTime due to the fact that we based our code off of the saas-dev branch of legacy upstream and support for recording hasn't been ported to the new airtime analyzer ingest system. #42 currently tracks the progress being made on line in recording. -Playout wont work if locale is missing +Playout won't work if locale is missing Some minimal OS installs do not have a default locale configured. This only seems to affect some VPS installs as they often do not have a locale setup in the default images provided. diff --git a/.github/stale.yml b/.github/stale.yml index 046c0fc33..9055cfe61 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -45,7 +45,7 @@ markComment: > # Comment to post when closing a stale Issue or Pull Request. closeComment: > - This issue has been autmatically closed after is was marked as stale and + This issue has been automatically closed after is was marked as stale and did not receive any further inputs. Feel free to let us know on [discourse](https://discourse.libretime.org/) or diff --git a/airtime_mvc/public/js/timepicker/jquery-ui-timepicker-addon.js b/airtime_mvc/public/js/timepicker/jquery-ui-timepicker-addon.js index 6f35ee532..0a609a382 100644 --- a/airtime_mvc/public/js/timepicker/jquery-ui-timepicker-addon.js +++ b/airtime_mvc/public/js/timepicker/jquery-ui-timepicker-addon.js @@ -1832,11 +1832,11 @@ */ var splitDateTime = function(dateFormat, dateTimeString, dateSettings, timeSettings) { try { - // The idea is to get the number separator occurances in datetime and the time format requested (since time has + // The idea is to get the number separator occurrences in datetime and the time format requested (since time has // fewer unknowns, mostly numbers and am/pm). We will use the time pattern to split. var separator = timeSettings && timeSettings.separator ? timeSettings.separator : $.datetimepicker._defaults.separator, format = timeSettings && timeSettings.timeFormat ? timeSettings.timeFormat : $.datetimepicker._defaults.timeFormat, - timeParts = format.split(separator), // how many occurances of separator may be in our format? + timeParts = format.split(separator), // how many occurrences of separator may be in our format? timePartsLen = timeParts.length, allParts = dateTimeString.split(separator), allPartsLen = allParts.length; diff --git a/changelog b/changelog index ef718995a..c2b3edfb9 100644 --- a/changelog +++ b/changelog @@ -110,7 +110,7 @@ * fixed master/source override URL being reverted to original setting after clicking 'Save' in stream settings. * Add several helpful tips in the Stream Settings page and some UI cleanup * DJ user type cannot delete Playlists that aren't their own or delete tracks - * Playlist Builder should remember your position instead of reseting to the first page everytime an operation was performed + * Playlist Builder should remember your position instead of resetting to the first page every time an operation was performed * If Master or Live input source is disconnected, Airtime will no longer automatically switch off that source. This should allow the source to reconnect and continue playback. @@ -157,7 +157,7 @@ * "Listen" preview player no longer falls behind the broadcast (you can only mute the stream now, not stop it) * Tracks that cannot be played will be rejected on upload and put in to the directory "/srv/airtime/stor/problem_files" (but currently it will not tell you that it rejected them - sorry\!) * Library is automatically refreshed when media import is finished - * Show "Disk Full" message when trying to upload a file that wont fit on the disk + * Show "Disk Full" message when trying to upload a file that won't fit on the disk * Reduced CPU utilization for OGG streams * New command line utilities: * airtime-test-soundcard - verify that the soundcard is working @@ -237,7 +237,7 @@ * Better error checking in cases where two users alter the same data at the same time (for example, in playlists and shows) * Playlists: Removed intermediate "Add Playlist" screen where it asked you - to fill in the name and description of the playlist. This wasnt + to fill in the name and description of the playlist. This wasn't necessary since everything could be changed from the playlist editor itself. * Added "airtime-log" command to display, dump, and view all of Airtime's @@ -324,7 +324,7 @@ - Prevent users from doing a manual install of Airtime if they already have the Debian package version installed * Changes - - Support Settings moved to a seperate page accessible by Admin user only. + - Support Settings moved to a separate page accessible by Admin user only. 1.9.0 - August 9, 2011 diff --git a/dev_tools/compare_cc_files_to_fs.py b/dev_tools/compare_cc_files_to_fs.py index bdc6a6124..80b727222 100644 --- a/dev_tools/compare_cc_files_to_fs.py +++ b/dev_tools/compare_cc_files_to_fs.py @@ -94,7 +94,7 @@ class AirtimeMediaMonitorBootstrap(): and reads the list of files in the local file system. Its purpose is to discover which files exist on the file system but not in the database and vice versa, as well as which files have been modified since the database was last updated. In each case, this method will call an - appropiate method to ensure that the database actually represents the filesystem. + appropriate method to ensure that the database actually represents the filesystem. dir_id -- row id of the directory in the cc_watched_dirs database table dir -- pathname of the directory """ diff --git a/dev_tools/scripts/git-merge-po b/dev_tools/scripts/git-merge-po index 2aedde19c..820904236 100644 --- a/dev_tools/scripts/git-merge-po +++ b/dev_tools/scripts/git-merge-po @@ -20,8 +20,8 @@ # *.po merge=pofile # *.pot merge=pofile # -# - When merging branches, conflicts in PO files will be maked with "#-#-#-#" -# +# - When merging branches, conflicts in PO files will be marked with "#-#-#-#" +# O=$1 A=$2 B=$3 diff --git a/docs/_docs/backing-up-the-server.md b/docs/_docs/backing-up-the-server.md index 1a0a2f1d7..0f019e478 100644 --- a/docs/_docs/backing-up-the-server.md +++ b/docs/_docs/backing-up-the-server.md @@ -34,7 +34,7 @@ be backed up. /srv /airtime /stor - /imported - Sucessfully imported media + /imported - Successfully imported media /organize - A temporary holding place for uploaded media as the importer works /etc /airtime diff --git a/docs/_docs/contribute.md b/docs/_docs/contribute.md index df805eef5..358edfe27 100644 --- a/docs/_docs/contribute.md +++ b/docs/_docs/contribute.md @@ -37,7 +37,7 @@ jekyll serve ## Code -Are you familar with coding in PHP? Have you made projects in Liquidsoap and some of the other services we use? +Are you familiar with coding in PHP? Have you made projects in Liquidsoap and some of the other services we use? Take a look at the [list of bugs and feature requests](https://github.com/LibreTime/libretime/issues), and then fork our repo and have a go! Just use the **Fork** button at the top of our **Code** page, clone the forked repo to your desktop, open up a favorite editor and make some changes, and then commit, push, and open a pull request. @@ -47,12 +47,12 @@ will suit you well, use the links for a quick 101. ## Testing and CI/CD Before submitting code to the project, it's a good idea to test it first. To do this, it's easiest to install -LibreTime in a virtural machine on your local system or in a cloud VM. We have instructions for setting up a virtural +LibreTime in a virtual machine on your local system or in a cloud VM. We have instructions for setting up a virtual instance of LibreTime with [Vagrant](/docs/vagrant) and [Multipass](/docs/multipass). If you would like to try LibreTime in a Docker image, Odclive has instructions [here](https://github.com/kessibi/libretime-docker) for setting up a test image -and a more persistant install. +and a more persistent install. ## Modifying the Database LibreTime is designed to work with a [PostgreSQL](https://www.postgresql.org/) database server running locally. diff --git a/docs/_docs/scheduling-shows.md b/docs/_docs/scheduling-shows.md index c0527fbde..48995fafb 100644 --- a/docs/_docs/scheduling-shows.md +++ b/docs/_docs/scheduling-shows.md @@ -43,7 +43,7 @@ of the pane to add your show to the calendar. | _When_ | | | Start Time (Required) | The time and date the show starts. Note that the time element is in 24 hour time. If the **Now** option is selected, the show will be created to immediately start. | | End Time (Required) | The time and date the show ends. Defaults to a time one hour after the start time, which can be seen in the **Duration** field, which is uneditable. | -| Repeats? | If checked, allows for options to schedule a repeated show. Shows can repeat weekly up to monthly in increments of one week and can be scheduled on multiple days of the same week. An end date can be set, otherwise the show can be deleted by clicking on its entry in the calendar and clicking Delete > Future Occurances. If **Linked ?** is checked, the playlist scheduled for the next show will also play for all future shows. | +| Repeats? | If checked, allows for options to schedule a repeated show. Shows can repeat weekly up to monthly in increments of one week and can be scheduled on multiple days of the same week. An end date can be set, otherwise the show can be deleted by clicking on its entry in the calendar and clicking Delete > Future Occurrences. If **Linked ?** is checked, the playlist scheduled for the next show will also play for all future shows. | | _Autoloading Playlist_ | | | Add Autoloading Playlist? | If checked, allows for the following options | | Select Playlist | Select the playlist the show will autofill from (shows autofill exactly one hour before air). If you wish to use a smartblock you must add it to a playlist and then select that playlist. This can be used to auto-schedule new podcast episodes to air. | diff --git a/docs/_docs/ssl.md b/docs/_docs/ssl.md index 1111e2071..255a371e4 100644 --- a/docs/_docs/ssl.md +++ b/docs/_docs/ssl.md @@ -20,7 +20,7 @@ These instructions come from Certbot's website and assume that you are using an running on Ubuntu 18.04 LTS (the Apache web server is installed with LibreTime by default). Instructions for other Debian-based OSes are similar, but check with Certbot for clarification. -Note: all instructions require you to have sudo priveledges +Note: all instructions require you to have sudo privileges First, add Certbot's PPA using: diff --git a/docs/_docs/troubleshooting.md b/docs/_docs/troubleshooting.md index 800fb202c..fc70e1199 100644 --- a/docs/_docs/troubleshooting.md +++ b/docs/_docs/troubleshooting.md @@ -45,7 +45,7 @@ troubleshooting checklist. - **File not importing successfully?** Libretime has been known to work with MP3 and WAV files, encoded using 41,100 Hz. Variable Bit Rate (VBR) files are currently hit or miss with the importer. Please convert your file to an MP3 or WAV at 41,100 Hz. and try uploading again. - **Podcast hosted by Anchor.fm not importing?** There is no known work-around at this time. Ask your producers to provide their show files manually or check with the show's distributer. - **Tracks won't publish?** We know the Publish screen is broken and we're working on it. A potential work-around is to use an external podcast host like [Anchor.fm](https://www.anchor.fm) or [Blubrry](https://blubrry.com/). -- **Can't hear any sound coming from your soundcard (for analog audio output)?** If you are using ALSA as your audio driver, use `alsamixer` to see the current volume your system is set to. If stil nothing, go to **Settings** > **Streams** and make sure **Hardware Audio Output** is checked. If you need to play a tone to help you troubleshoot, you can use `speaker-test` (does not come installed with Libretime). +- **Can't hear any sound coming from your soundcard (for analog audio output)?** If you are using ALSA as your audio driver, use `alsamixer` to see the current volume your system is set to. If still nothing, go to **Settings** > **Streams** and make sure **Hardware Audio Output** is checked. If you need to play a tone to help you troubleshoot, you can use `speaker-test` (does not come installed with Libretime). ## 4. Read the docs diff --git a/docs/_docs/users.md b/docs/_docs/users.md index 1f17051cd..f198e3f7b 100644 --- a/docs/_docs/users.md +++ b/docs/_docs/users.md @@ -29,7 +29,7 @@ To add further user accounts to the system, one for each of your station staff t - Everything DJs can do, plus - Manage other users' libraries in addition to their own -- Create, edit, and delete color-coded shows on the Calender and assign them to DJs (if needed) +- Create, edit, and delete color-coded shows on the Calendar and assign them to DJs (if needed) - Shows can be scheduled to repeat, with the option of linking content between the shows (helpful if a DJ livestreams in each week) - View listener statistics - Export playout logs for analysis or reporting for music royalties diff --git a/docs/api/openapi.yaml b/docs/api/openapi.yaml index ee5819a41..6e49f49db 100644 --- a/docs/api/openapi.yaml +++ b/docs/api/openapi.yaml @@ -596,7 +596,7 @@ paths: required: false /show-history-feed: get: - summary: BROKEN - Retrieve the show shedules for a given time range and show + summary: BROKEN - Retrieve the show schedules for a given time range and show parameters: - name: api_key in: path @@ -1074,7 +1074,7 @@ paths: /handle-watched-dir-missing: post: summary: |- - BROKEN (LibreTime does not currentl handle watched directories) - + BROKEN (LibreTime does not currently handle watched directories) - Handles missing watched directories. parameters: - name: dir @@ -1290,7 +1290,7 @@ paths: required: true /get-usability-hint: get: - summary: Returns the usibility hint tool-tip for a UI item + summary: Returns the usability hint tool-tip for a UI item parameters: - name: api_key in: path diff --git a/installer/vagrant/centos.sh b/installer/vagrant/centos.sh index 3311a09a3..5789e0d59 100644 --- a/installer/vagrant/centos.sh +++ b/installer/vagrant/centos.sh @@ -144,7 +144,7 @@ sed -i \ # celery will not run unless we install a specific version (https://github.com/pypa/setuptools/issues/942) -# this will need to be figured out later on and will get overriden by the docs installer anyhow :( +# this will need to be figured out later on and will get overridden by the docs installer anyhow :( pip3 install setuptools==33.1.1 pip3 freeze setuptools==33.1.1 diff --git a/python_apps/airtime_analyzer/airtime_analyzer/airtime_analyzer.py b/python_apps/airtime_analyzer/airtime_analyzer/airtime_analyzer.py index 19f3c555d..b8e52d5c4 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/airtime_analyzer.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/airtime_analyzer.py @@ -39,7 +39,7 @@ class AirtimeAnalyzerServer: StatusReporter.start_thread(http_retry_queue_path) # Start listening for RabbitMQ messages telling us about newly - # uploaded files. This blocks until we recieve a shutdown signal. + # uploaded files. This blocks until we receive a shutdown signal. self._msg_listener = MessageListener(rmq_config) StatusReporter.stop_thread() diff --git a/python_apps/airtime_analyzer/airtime_analyzer/message_listener.py b/python_apps/airtime_analyzer/airtime_analyzer/message_listener.py index 423be2e8d..7c3cc29ab 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/message_listener.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/message_listener.py @@ -189,7 +189,7 @@ class MessageListener: (NACK is a negative acknowledgement. We could use ACK instead, but this might come in handy in the future.) Exceptions in this context are unexpected, unhandled errors. We try to recover - from as many errors as possble in AnalyzerPipeline, but we're safeguarding ourselves + from as many errors as possible in AnalyzerPipeline, but we're safeguarding ourselves here from any catastrophic or genuinely unexpected errors: ''' channel.basic_nack(delivery_tag=method_frame.delivery_tag, multiple=False, diff --git a/python_apps/pypo/liquidsoap/1.1/ls_lib.liq b/python_apps/pypo/liquidsoap/1.1/ls_lib.liq index b2822d596..a244c7124 100644 --- a/python_apps/pypo/liquidsoap/1.1/ls_lib.liq +++ b/python_apps/pypo/liquidsoap/1.1/ls_lib.liq @@ -65,7 +65,7 @@ end # we need this function for special transition case(from default to queue) # we don't want the trasition fade to have effect on the first song that would -# be played siwtching out of the default(silent) source +# be played switching out of the default(silent) source def transition_default(a,b) = log("transition called...") if !just_switched then diff --git a/python_apps/pypo/liquidsoap/1.3/ls_lib.liq b/python_apps/pypo/liquidsoap/1.3/ls_lib.liq index 1ed7495ea..3009c28ef 100644 --- a/python_apps/pypo/liquidsoap/1.3/ls_lib.liq +++ b/python_apps/pypo/liquidsoap/1.3/ls_lib.liq @@ -65,7 +65,7 @@ end # we need this function for special transition case(from default to queue) # we don't want the trasition fade to have effect on the first song that would -# be played siwtching out of the default(silent) source +# be played switching out of the default(silent) source def transition_default(a,b) = log("transition called...") if !just_switched then diff --git a/python_apps/pypo/liquidsoap/1.4/ls_lib.liq b/python_apps/pypo/liquidsoap/1.4/ls_lib.liq index ee32f4871..ccd2c03bc 100644 --- a/python_apps/pypo/liquidsoap/1.4/ls_lib.liq +++ b/python_apps/pypo/liquidsoap/1.4/ls_lib.liq @@ -56,7 +56,7 @@ end # we need this function for special transition case(from default to queue) # we don't want the trasition fade to have effect on the first song that would -# be played siwtching out of the default(silent) source +# be played switching out of the default(silent) source def transition_default(a,b) = log("transition called...") if !just_switched then diff --git a/python_apps/pypo/pypo/pypomessagehandler.py b/python_apps/pypo/pypo/pypomessagehandler.py index 3d4b37589..9c8b8e28a 100644 --- a/python_apps/pypo/pypo/pypomessagehandler.py +++ b/python_apps/pypo/pypo/pypomessagehandler.py @@ -73,7 +73,7 @@ class PypoMessageHandler(Thread): self.logger.info("Handling command: " + command) if command == 'update_schedule': - self.logger.info("Updating schdule...") + self.logger.info("Updating schedule...") self.pypo_queue.put(message) elif command == 'reset_liquidsoap_bootstrap': self.logger.info("Resetting bootstrap vars...") @@ -116,7 +116,7 @@ class PypoMessageHandler(Thread): """ Main loop of the thread: - Wait for schedule updates from RabbitMQ, but in case there arent any, + Wait for schedule updates from RabbitMQ, but in case there aren't any, poll the server to get the upcoming schedule. """ def run(self): diff --git a/python_apps/pypo/pypo/recorder.py b/python_apps/pypo/pypo/recorder.py index 4c8582dc2..3e22d6443 100644 --- a/python_apps/pypo/pypo/recorder.py +++ b/python_apps/pypo/pypo/recorder.py @@ -305,7 +305,7 @@ class Recorder(Thread): def run(self): """ Main loop of the thread: - Wait for schedule updates from RabbitMQ, but in case there arent any, + Wait for schedule updates from RabbitMQ, but in case there aren't any, poll the server to get the upcoming schedule. """ try: diff --git a/utils/airtime-import/airtime-import b/utils/airtime-import/airtime-import index baf20bcd3..c3c2500d1 100755 --- a/utils/airtime-import/airtime-import +++ b/utils/airtime-import/airtime-import @@ -168,7 +168,7 @@ def WatchAddAction(option, opt, value, parser): res = api_client.add_watched_dir(path) except Exception, e: exit("Unable to connect to the server.") - # sucess + # success if(res['msg']['code'] == 0): print "%s added to watched folder list successfully" % path else: @@ -212,7 +212,7 @@ def WatchRemoveAction(option, opt, value, parser): res = api_client.remove_watched_dir(path) except Exception, e: exit("Unable to connect to the Airtime server.") - # sucess + # success if(res['msg']['code'] == 0): print "%s removed from watch folder list successfully." % path else: @@ -233,11 +233,11 @@ def StorageSetAction(option, opt, value, parser): if(not bypass): errorIfMultipleOption(parser.rargs, "Only [-f] and [--force] option is allowed with this option.") possibleInput = ['y','Y','n','N'] - confirm = raw_input("Are you sure you want to change the storage direcory? (y/N)") + confirm = raw_input("Are you sure you want to change the storage directory? (y/N)") confirm = confirm or 'N' while(confirm not in possibleInput): print "Not an acceptable input: %s\n" % confirm - confirm = raw_input("Are you sure you want to change the storage direcory? (y/N) ") + confirm = raw_input("Are you sure you want to change the storage directory? (y/N) ") confirm = confirm or 'N' if(confirm == 'n' or confirm =='N'): sys.exit(1) From efe4fa027ed40717cbd2b88ef0051547f510a2cc Mon Sep 17 00:00:00 2001 From: jo Date: Thu, 27 May 2021 16:20:34 +0200 Subject: [PATCH 04/28] Format code using prettier --- .github/ISSUE_TEMPLATE/bug_report.md | 22 +- .github/ISSUE_TEMPLATE/feature_request.md | 5 +- .github/RELEASE.md | 6 +- .github/lock.yml | 1 - .github/workflows/test.yml | 23 +- CONTRIBUTING.md | 30 +- README.md | 12 +- api/README.md | 3 + cloud-init.yml | 8 +- composer.json | 91 +- docs/404.md | 2 +- docs/_config.yml | 5 +- docs/_docs/api.md | 24 +- docs/_docs/backing-up-the-server.md | 19 +- docs/_docs/calendar.md | 6 +- docs/_docs/contribute.md | 7 +- docs/_docs/dashboard.md | 4 +- docs/_docs/default-passwords.md | 18 +- docs/_docs/freeipa.md | 4 +- docs/_docs/hd-audio-modules.md | 6 +- docs/_docs/host-configuration.md | 29 +- docs/_docs/icecast.md | 34 +- docs/_docs/install.md | 3 +- docs/_docs/interface-customization.md | 11 +- docs/_docs/interface-localization.md | 10 +- docs/_docs/listener-stats.md | 4 +- docs/_docs/live-broadcast.md | 36 +- docs/_docs/microsite.md | 2 +- docs/_docs/multipass.md | 3 +- docs/_docs/playlists.md | 10 +- docs/_docs/playout-history.md | 37 +- docs/_docs/podcasts.md | 6 +- docs/_docs/preparing-media.md | 32 +- docs/_docs/reverse-proxy.md | 1 - docs/_docs/scheduling-shows.md | 47 +- docs/_docs/set-server-time.md | 8 +- docs/_docs/settings.md | 26 +- docs/_docs/ssl.md | 13 +- docs/_docs/status.md | 2 +- docs/_docs/troubleshooting.md | 18 +- docs/_docs/upgrading.md | 19 +- docs/_docs/users.md | 7 +- docs/_docs/vagrant.md | 14 +- docs/_docs/webstreams.md | 6 +- docs/_docs/widgets.md | 4 +- docs/api/openapi.yaml | 1319 +++++++++++---------- docs/index.md | 18 +- 47 files changed, 1023 insertions(+), 992 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 98a8f4524..4fd6996b7 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,10 +1,9 @@ --- name: Bug report about: Create a report to help us improve -title: '' +title: "" labels: bug -assignees: '' - +assignees: "" --- **Describe the bug** @@ -12,6 +11,7 @@ A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: + 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' @@ -24,18 +24,20 @@ A clear and concise description of what you expected to happen. Version from the upgrade popup if you can reach it. **Installation method:** - - OS: [e.g. Ubuntu] - - OS Version [e.g. 16.04.5 LTS (Xenial Xerus)] - - Method: [e.g. `./install` script or packages] - - Details: [how did you call the install script, where did you get packages from] + +- OS: [e.g. Ubuntu] +- OS Version [e.g. 16.04.5 LTS (Xenial Xerus)] +- Method: [e.g. `./install` script or packages] +- Details: [how did you call the install script, where did you get packages from] **Screenshots** If applicable, add screenshots to help explain your problem. **Client (please complete the following information if applicable):** - - OS: [e.g. Fedora] - - Browser [e.g. chrome, safari] - - Version [e.g. 22] + +- OS: [e.g. Fedora] +- Browser [e.g. chrome, safari] +- Version [e.g. 22] **Additional context** Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 982a4dc0d..df0588520 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,10 +1,9 @@ --- name: Feature request about: Suggest an idea for this project -title: '' +title: "" labels: feature-request -assignees: '' - +assignees: "" --- **Is your feature request related to a problem? Please describe.** diff --git a/.github/RELEASE.md b/.github/RELEASE.md index 091a43713..eb0aa11ff 100644 --- a/.github/RELEASE.md +++ b/.github/RELEASE.md @@ -86,8 +86,8 @@ storage_backend=file You can then remove the files and the symlink. rm /etc/airtime/cloud_storage.conf \ - /etc/airtime/rabbitmq-analyzer.ini \ - /etc/airtime/production + /etc/airtime/rabbitmq-analyzer.ini \ + /etc/airtime/production While you're at you may also want to remove the amazon_s3 section if it was in any of the files. @@ -174,4 +174,4 @@ sudo update-locale LANGUAGE="en_US.UTF-8" ### Lack of i18n toolchain is disturbing -Some translations might miss the tarball. They didn't get lost, but the build chain needs fixing. Work is in #301 and additional work is needed as it has become clear that we probably want to support bidirectional translation syncing with zanata. \ No newline at end of file +Some translations might miss the tarball. They didn't get lost, but the build chain needs fixing. Work is in #301 and additional work is needed as it has become clear that we probably want to support bidirectional translation syncing with zanata. diff --git a/.github/lock.yml b/.github/lock.yml index a3922cd92..5bae4efbe 100644 --- a/.github/lock.yml +++ b/.github/lock.yml @@ -25,6 +25,5 @@ lockComment: > # Assign `resolved` as the reason for locking. Set to `false` to disable setLockReason: true - # Limit to only `issues` or `pulls` # only: issues diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f4bdf89b7..7ddd6d795 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -2,11 +2,19 @@ name: Python and PHP Tests on: push: paths-ignore: - - 'docs/**' + - "docs/**" pull_request: - types: [opened, ready_for_review, review_requested, edited, reopened, synchronize] + types: + [ + opened, + ready_for_review, + review_requested, + edited, + reopened, + synchronize, + ] paths-ignore: - - 'docs/**' + - "docs/**" workflow_dispatch: jobs: @@ -19,7 +27,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: '3.6' + python-version: "3.6" - name: Setup PostgreSQL run: | sudo systemctl start postgresql.service @@ -31,10 +39,9 @@ jobs: - name: Setup PHP with specific version uses: shivammathur/setup-php@v2 with: - php-version: '7.2' + php-version: "7.2" - name: Install prerequisites - run: - sudo -E ./.github/scripts/install-bionic.sh + run: sudo -E ./.github/scripts/install-bionic.sh - name: Run Python tests run: | sudo ./.github/scripts/python-pkg-install.sh @@ -62,7 +69,7 @@ jobs: - name: Setup PHP with specific version uses: shivammathur/setup-php@v2 with: - php-version: '7.0' + php-version: "7.0" - name: Install prerequisites run: sudo -E ./.github/scripts/install-xenial.sh - name: Run PHP tests diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3f6dcdbef..9796754dd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,33 +1,33 @@ # Contributing to LibreTime First and foremost, thank you! We appreciate that you want to -contribute to LibreTime, your time is valuable, and your +contribute to LibreTime, your time is valuable, and your contributions mean a lot to us. **What does "contributing" mean?** Creating an issue is the simplest form of contributing to a -project. But there are many ways to contribute, including +project. But there are many ways to contribute, including the following: -* Updating or correcting documentation -* Feature requests -* Bug reports +- Updating or correcting documentation +- Feature requests +- Bug reports Before opening an issue, please: -* read and be prepared to adhere to our [code of conduct](https://github.com/LibreTime/code-of-conduct/blob/master/CODE_OF_CONDUCT.md) -* understand that we follow the standardized [C4 development process](https://rfc.zeromq.org/spec:42/C4/) -* [search for existing duplicate or closed issues](https://github.com/LibreTime/libretime/issues?utf8=%E2%9C%93&q=is%3Aissue) -* clearly state the problem you would like to solve in a meaningful way -* be prepared to follow up on issues by providing additional information as requested by a maintainer or contributor helping you out +- read and be prepared to adhere to our [code of conduct](https://github.com/LibreTime/code-of-conduct/blob/master/CODE_OF_CONDUCT.md) +- understand that we follow the standardized [C4 development process](https://rfc.zeromq.org/spec:42/C4/) +- [search for existing duplicate or closed issues](https://github.com/LibreTime/libretime/issues?utf8=%E2%9C%93&q=is%3Aissue) +- clearly state the problem you would like to solve in a meaningful way +- be prepared to follow up on issues by providing additional information as requested by a maintainer or contributor helping you out For bug reports, please provide the following details: -* **version**: what version of LibreTime you were using when you experienced the bug? -* **distro**: what distribution is your install on and which distro version are you using (ie. Ubuntu Trusty) -* **reduced test case**: the minimum amount of detail needed to reproduce the bug -* **error messages**: please paste any error reports into the issue or a gist +- **version**: what version of LibreTime you were using when you experienced the bug? +- **distro**: what distribution is your install on and which distro version are you using (ie. Ubuntu Trusty) +- **reduced test case**: the minimum amount of detail needed to reproduce the bug +- **error messages**: please paste any error reports into the issue or a gist -Please wrap all code and error messages in [markdown code +Please wrap all code and error messages in [markdown code fences](https://help.github.com/articles/creating-and-highlighting-code-blocks/). diff --git a/README.md b/README.md index 9a914bf78..64036fffb 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ We are currently ramping up development on this repository. Check out the [documentation](http://libretime.org) for more information and start broadcasting! -Please note that LibreTime is released with a [Contributor Code +Please note that LibreTime is released with a [Contributor Code of Conduct](https://github.com/LibreTime/code-of-conduct/blob/master/CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. @@ -23,12 +23,12 @@ Please submit enhancements, bugfixes or comments via GitHub. ## Development Process -The LibreTime follows the standardized [Collective Code Construction +The LibreTime follows the standardized [Collective Code Construction Contract (C4)](https://rfc.zeromq.org/spec:42/C4/). Its abstract is provided here. > C4 provides a standard process for contributing, evaluating and -> discussing improvements on software projects. It defines specific +> discussing improvements on software projects. It defines specific > technical requirements for projects like a style guide, unit tests, > git and similar platforms. It also establishes different personas > for projects, with clear and distinct duties. C4 specifies a process @@ -100,7 +100,7 @@ your organization. Your logo will show up here with a link to your website. LibreTime is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public -License as published by the Free Software Foundation, +License as published by the Free Software Foundation, version 3 of the License. ## Copyright @@ -109,6 +109,6 @@ Copyright (c) 2011-2017 Sourcefabric z.ú. Copyright (c) 2017 LibreTime Community -Please refer to the original [README](README), -[CREDITS](CREDITS) and [LICENSE_3RD_PARTY](LICENSE_3RD_PARTY) +Please refer to the original [README](README), +[CREDITS](CREDITS) and [LICENSE_3RD_PARTY](LICENSE_3RD_PARTY) for more information. diff --git a/api/README.md b/api/README.md index a686581d8..d3fcc84c1 100644 --- a/api/README.md +++ b/api/README.md @@ -4,6 +4,7 @@ This API provides access to LibreTime's database via a Django application. This API supersedes the [PHP API](../airtime_mvc/application/controllers/ApiController.php). ## Deploying + Deploying in a production environment is done in the [`install`](../install) script which installs LibreTime. This is how the API is installed in the Vagrant development images too. This method does not automatically reflect changes to @@ -18,6 +19,7 @@ Endpoint exploration and documentation is available from instance. ### Development + For a live reloading version within Vagrant: ``` @@ -38,6 +40,7 @@ sudo -u www-data LIBRETIME_DEBUG=True python3 bin/libretime-api test libretimeap ``` ## 3rd Party Licences + `libretimeapi/tests/resources/song.mp3`: Steps - Tears On The Dancefloor (Album Teaser) by mceyedol. Downloaded from https://soundcloud.com/mceyedol/steps-tears-on-the-dancefloor-album-teaser diff --git a/cloud-init.yml b/cloud-init.yml index 99286dd35..fc697d86a 100644 --- a/cloud-init.yml +++ b/cloud-init.yml @@ -1,10 +1,10 @@ # Maintainer: Zachary Klosko (kloskoz@vcu.edu) hostname: libretimeTest -timezone: America/New York # change as needed +timezone: America/New York # change as needed ntp: - pools: ['north-america.pool.ntp.org'] - servers: ['0.north-america.pool.ntp.org', '0.pool.ntp.org'] + pools: ["north-america.pool.ntp.org"] + servers: ["0.north-america.pool.ntp.org", "0.pool.ntp.org"] password: hackme chpasswd: { expire: False } @@ -17,4 +17,4 @@ apt_upgrade: true # Clone repo on init (Change repo url if needed) # If you need to clone a branch, use git clone --branch branchname repourl runcmd: - - git clone https://github.com/LibreTime/libretime.git \ No newline at end of file + - git clone https://github.com/LibreTime/libretime.git diff --git a/composer.json b/composer.json index 228c9ed84..8b53c29cc 100644 --- a/composer.json +++ b/composer.json @@ -1,45 +1,50 @@ { - "autoload": { - "classmap": ["airtime_mvc/application/"] - }, - "autoload-dev": { - "classmap": ["airtime_mvc/tests/application/", "vendor/phpunit/dbunit/src/"] - }, - "require": { - "james-heinrich/getid3": "dev-master", - "propel/propel1": "1.7.0-stable", - "aws/aws-sdk-php": "2.7.9", - "raven/raven": "0.12.0", - "massivescale/celery-php": "2.0.*@dev", - "simplepie/simplepie": "dev-master", - "zf1s/zend-application": "^1.12", - "composer/semver": "^1.4", - "php-amqplib/php-amqplib": "^2.6", - "zf1s/zend-acl": "^1.12", - "zf1s/zend-session": "^1.12", - "zf1s/zend-navigation": "^1.12", - "zf1s/zend-controller": "^1.12", - "zf1s/zend-log": "^1.12", - "zf1s/zend-version": "^1.12", - "zf1s/zend-rest": "dev-master@dev", - "zf1s/zend-layout": "^1.12", - "zf1s/zend-loader": "dev-master@dev", - "zf1s/zend-auth": "^1.12", - "zf1s/zend-filter": "^1.12", - "zf1s/zend-json": "^1.12", - "zf1s/zend-form": "^1.12", - "zf1s/zend-db": "^1.12", - "zf1s/zend-file": "^1.12", - "zf1s/zend-file-transfer": "^1.12", - "zf1s/zend-http": "^1.12", - "zf1s/zend-date": "^1.12", - "zf1s/zend-view": "dev-master@dev", - "zf1s/zend-validate": "dev-master@dev", - "zf1s/zend-cache": "dev-master@dev" - }, - "require-dev": { - "phpunit/phpunit": "^4.3", - "phpunit/dbunit": "^2.0", - "zf1s/zend-test": "^1.12" - } + "autoload": { + "classmap": [ + "airtime_mvc/application/" + ] + }, + "autoload-dev": { + "classmap": [ + "airtime_mvc/tests/application/", + "vendor/phpunit/dbunit/src/" + ] + }, + "require": { + "james-heinrich/getid3": "dev-master", + "propel/propel1": "1.7.0-stable", + "aws/aws-sdk-php": "2.7.9", + "raven/raven": "0.12.0", + "massivescale/celery-php": "2.0.*@dev", + "simplepie/simplepie": "dev-master", + "zf1s/zend-application": "^1.12", + "composer/semver": "^1.4", + "php-amqplib/php-amqplib": "^2.6", + "zf1s/zend-acl": "^1.12", + "zf1s/zend-session": "^1.12", + "zf1s/zend-navigation": "^1.12", + "zf1s/zend-controller": "^1.12", + "zf1s/zend-log": "^1.12", + "zf1s/zend-version": "^1.12", + "zf1s/zend-rest": "dev-master@dev", + "zf1s/zend-layout": "^1.12", + "zf1s/zend-loader": "dev-master@dev", + "zf1s/zend-auth": "^1.12", + "zf1s/zend-filter": "^1.12", + "zf1s/zend-json": "^1.12", + "zf1s/zend-form": "^1.12", + "zf1s/zend-db": "^1.12", + "zf1s/zend-file": "^1.12", + "zf1s/zend-file-transfer": "^1.12", + "zf1s/zend-http": "^1.12", + "zf1s/zend-date": "^1.12", + "zf1s/zend-view": "dev-master@dev", + "zf1s/zend-validate": "dev-master@dev", + "zf1s/zend-cache": "dev-master@dev" + }, + "require-dev": { + "phpunit/phpunit": "^4.3", + "phpunit/dbunit": "^2.0", + "zf1s/zend-test": "^1.12" + } } diff --git a/docs/404.md b/docs/404.md index 0be97108d..beb85b04b 100644 --- a/docs/404.md +++ b/docs/404.md @@ -8,4 +8,4 @@ photocredit: Top photo by At the moment, there is not a way to automatically restore a Libretime backup. -> To restore a failed Libretime instance, install a fresh copy, go through the -> standard setup process, and reupload the backed-up media files. A *Watched Folders* +> To restore a failed Libretime instance, install a fresh copy, go through the +> standard setup process, and reupload the backed-up media files. A _Watched Folders_ > feature is [currently in development](https://github.com/LibreTime/libretime/issues/70). -A backup script is supplied for your convenience in the *utils/* folder of the Libretime repo. +A backup script is supplied for your convenience in the _utils/_ folder of the Libretime repo. Run it using: ``` @@ -27,7 +27,7 @@ crontab with `sudo crontab -e`: > For more information on how Cron works, check out [this Redhat guide](https://www.redhat.com/sysadmin/automate-linux-tasks-cron). -If you wish to deploy your own backup solution, the following files and folders need to +If you wish to deploy your own backup solution, the following files and folders need to be backed up. ``` @@ -43,11 +43,11 @@ be backed up. liquidsoap.cfg - The main configuration file for Liquidsoap ``` -In addition, you should keep a copy of the database current to the backup. The below code +In addition, you should keep a copy of the database current to the backup. The below code can be used to export the Libretime database to a file. ``` -sudo -u postgres pg_dumpall filename +sudo -u postgres pg_dumpall filename # or to a zipped archive sudo -u postgres pg_dumpall | gzip -c > archivename.gz ``` @@ -58,9 +58,8 @@ the backup server also contains an LibreTime installation, it should be possible to switch playout to this second machine relatively quickly, in case of a hardware failure or other emergency on the production server.) -Two notible backup tools are [rsync](http://rsync.samba.org/) (without version control) and -[rdiff-backup](http://www.nongnu.org/rdiff-backup/) (with version control). *rsync* comes +Two notible backup tools are [rsync](http://rsync.samba.org/) (without version control) and +[rdiff-backup](http://www.nongnu.org/rdiff-backup/) (with version control). _rsync_ comes preinstalled with Ubuntu Server. -> **Note:** Standard *rsync* backups, which are used by the backup script, cannot restore files deleted in the backup itself - +> **Note:** Standard _rsync_ backups, which are used by the backup script, cannot restore files deleted in the backup itself diff --git a/docs/_docs/calendar.md b/docs/_docs/calendar.md index 56575c2dd..f277667f3 100644 --- a/docs/_docs/calendar.md +++ b/docs/_docs/calendar.md @@ -30,7 +30,7 @@ To add content to a show, click the show in any view on the Calendar, and select ![](/img/Screenshot488-Add_remove_content.png) -The **Schedule Tracks** action opens a window with the name of the show. Like when using the **Now Playing** page, you can search for content items and add them to the show schedule on the right side of the page. Refer to the *Now Playing* chapter for details. +The **Schedule Tracks** action opens a window with the name of the show. Like when using the **Now Playing** page, you can search for content items and add them to the show schedule on the right side of the page. Refer to the _Now Playing_ chapter for details. When your show has all the required content, click the **OK** button in the bottom right corner to close the window. Back in the **Calendar**, click the show and select **View** from the pop-up menu to view a list of content now included in the show. @@ -42,7 +42,7 @@ The **Contents of Show** window is a read-only interface featuring an orange bar ### Removing content from a show -To remove an individual item from a show, click on the show in the **Calendar**, and select **Schedule Tracks** from the pop-up menu. In the window which opens, click any item you wish to remove from the show, then click **Delete** on the pop-up menu, or check the box in the item's row then click the **Remove** icon at the top of the table. To remove all files and playlists from a show, click on the show in the **Calendar**, and select **Clear Show** from the pop-up menu.  +To remove an individual item from a show, click on the show in the **Calendar**, and select **Schedule Tracks** from the pop-up menu. In the window which opens, click any item you wish to remove from the show, then click **Delete** on the pop-up menu, or check the box in the item's row then click the **Remove** icon at the top of the table. To remove all files and playlists from a show, click on the show in the **Calendar**, and select **Clear Show** from the pop-up menu. ### Deleting an upcoming show @@ -54,4 +54,4 @@ You cannot delete or remove content from shows that have already played out. The ### Cancelling playout -If you wish to cancel playout of a show while it is running, click on the show in the **Calendar** and select **Cancel Show** from the pop-up menu. This action cannot be undone. \ No newline at end of file +If you wish to cancel playout of a show while it is running, click on the show in the **Calendar** and select **Cancel Show** from the pop-up menu. This action cannot be undone. diff --git a/docs/_docs/contribute.md b/docs/_docs/contribute.md index 358edfe27..9fcd9127c 100644 --- a/docs/_docs/contribute.md +++ b/docs/_docs/contribute.md @@ -6,7 +6,7 @@ permalink: /contribute --- > LibreTime is a fork of AirTime due to stalled development of the open source version. For background on this, -see this [open letter to the Airtime community](https://gist.github.com/hairmare/8c03b69c9accc90cfe31fd7e77c3b07d). +> see this [open letter to the Airtime community](https://gist.github.com/hairmare/8c03b69c9accc90cfe31fd7e77c3b07d). ## Bug reporting @@ -28,7 +28,7 @@ supported? Follow [this guide](/docs/interface-localization) to add your languag ## Write documentation Our site is now built by Jekyll, which has an installation guide [here](https://jekyllrb.com/docs/installation/) to help get you started. - After cloning our repo locally, enter the `docs/` directory and run +After cloning our repo locally, enter the `docs/` directory and run ``` bundle install @@ -55,6 +55,7 @@ Odclive has instructions [here](https://github.com/kessibi/libretime-docker) for and a more persistent install. ## Modifying the Database + LibreTime is designed to work with a [PostgreSQL](https://www.postgresql.org/) database server running locally. LibreTime uses [PropelORM](http://propelorm.org) to interact with the ZendPHP components and create the database. @@ -63,4 +64,4 @@ If you are a developer seeking to add new columns to the database here are the s 1. Modify `airtime_mvc/build/schema.xml` with any changes. 2. Run `dev_tools/propel_generate.sh` 3. Update the upgrade.sql under `airtime_mvc/application/controllers/upgrade_sql/VERSION` for example - `ALTER TABLE imported_podcast ADD COLUMN album_override boolean default 'f' NOT NULL;` \ No newline at end of file + `ALTER TABLE imported_podcast ADD COLUMN album_override boolean default 'f' NOT NULL;` diff --git a/docs/_docs/dashboard.md b/docs/_docs/dashboard.md index b3c57e977..0e30ffbf2 100644 --- a/docs/_docs/dashboard.md +++ b/docs/_docs/dashboard.md @@ -103,11 +103,11 @@ use the checkboxes on the left side of the library table to select specific items. Then drag one of the items into the show to add all of the selected items, or click the **Add to selected show** button, which has a plus icon. If you wish, you can also use the **Trashcan** button to permanently remove items -from LibreTime's library. Only *admin* users have permission to delete all +from LibreTime's library. Only _admin_ users have permission to delete all items. To insert checkbox selected items at a specific time in the show schedule, click -one of the songs in the schedule table. Then click the **Add to selected show** +one of the songs in the schedule table. Then click the **Add to selected show** button in the library table. This will insert the library songs after the selected scheduled song. diff --git a/docs/_docs/default-passwords.md b/docs/_docs/default-passwords.md index c3b87df8c..bc7471c7f 100644 --- a/docs/_docs/default-passwords.md +++ b/docs/_docs/default-passwords.md @@ -12,7 +12,7 @@ To change the password of the current user: 2. Click on the username in the upper right corner (next to Log Out) 3. Enter the new password twice and click **Save** -To change the password for a different user (requires *Administrator* privileges): +To change the password for a different user (requires _Administrator_ privileges): 1. Log in to Libretime 2. Go to **Settings** > **Manage Users** @@ -20,17 +20,17 @@ To change the password for a different user (requires *Administrator* privileges ### PostgreSQL -Two of the most important passwords that should be changed *immediately* after installation +Two of the most important passwords that should be changed _immediately_ after installation are the passwords used by the PostgreSQL database. It is strongly recommended that you do this before exposing your server to the internet beyond your internal network. 1. Login to PostgreSQL with `sudo -u postgres psql`. The PostgreSQL shell - `postgres=#` - means that you have logged in successfully. 2. Change the admin password with `ALTER USER postgres PASSWORD 'myPassword';`, where `myPassword` is the new password. -Make sure to include the semicolon at the end! A response of `ALTER ROLE` means that the command ran successfully. -3. Change the password for the *airtime* user with `ALTER USER airtime WITH PASSWORD 'new_password';` -A response of `ALTER ROLE` means that the command ran successfully. -4. If all is successful, logout of PostgreSQL with `\q`, go back to */etc/airtime/airtime.conf* to edit the password -in the config file, and restart all services mentioned in the previous section. + Make sure to include the semicolon at the end! A response of `ALTER ROLE` means that the command ran successfully. +3. Change the password for the _airtime_ user with `ALTER USER airtime WITH PASSWORD 'new_password';` + A response of `ALTER ROLE` means that the command ran successfully. +4. If all is successful, logout of PostgreSQL with `\q`, go back to _/etc/airtime/airtime.conf_ to edit the password + in the config file, and restart all services mentioned in the previous section. ### Icecast @@ -38,7 +38,7 @@ Random passwords are generated for Icecast during the installation. To look up a `/etc/icecast2/icecast.xml` -Replace the admin and *changeme* fields below. +Replace the admin and _changeme_ fields below. ``` @@ -68,4 +68,4 @@ To change the default password for Rabbitmq, run the following command sudo rabbitmqctl change_password airtime newpassword ``` -and then update the `/etc/airtime/airtime.conf` file with the new password. \ No newline at end of file +and then update the `/etc/airtime/airtime.conf` file with the new password. diff --git a/docs/_docs/freeipa.md b/docs/_docs/freeipa.md index 39bb64bc9..7a8d18da9 100644 --- a/docs/_docs/freeipa.md +++ b/docs/_docs/freeipa.md @@ -56,11 +56,11 @@ LibreTime needs direct access to LDAP so it can fetch additional information. It a [system account](https://www.freeipa.org/page/HowTo/LDAP#System_Accounts) that you need to set up beforehand. -You can configure everything pertaining to how LibreTime accesses LDAP in +You can configure everything pertaining to how LibreTime accesses LDAP in `/etc/airtime/airtime.conf`. The default file has the following values you need to change. ```ini -# +# # ---------------------------------------------------------------------- # L D A P # ---------------------------------------------------------------------- diff --git a/docs/_docs/hd-audio-modules.md b/docs/_docs/hd-audio-modules.md index 281d00485..43c72270f 100644 --- a/docs/_docs/hd-audio-modules.md +++ b/docs/_docs/hd-audio-modules.md @@ -5,7 +5,7 @@ git: hd-audio-modules.md category: admin --- -This listing is provided to help ensure that the correct model parameter is passed to the ALSA kernel module for an Intel HDA soundcard, if one is fitted to your LibreTime server. See the chapter *Preparing the server* in this book for more details. +This listing is provided to help ensure that the correct model parameter is passed to the ALSA kernel module for an Intel HDA soundcard, if one is fitted to your LibreTime server. See the chapter _Preparing the server_ in this book for more details. ``` Model name Description @@ -155,7 +155,7 @@ Conexant 5045 Conexant 5047 ============= - laptop Basic Laptop config + laptop Basic Laptop config laptop-hp Laptop config for some HP models (subdevice 30A5) laptop-eapd Laptop config with EAPD support test for testing/debugging purpose, almost all controls @@ -316,4 +316,4 @@ Cirrus Logic CS4208 VIA VT17xx/VT18xx/VT20xx ======================== auto BIOS setup (default) -``` \ No newline at end of file +``` diff --git a/docs/_docs/host-configuration.md b/docs/_docs/host-configuration.md index b528c469c..cb69011fa 100644 --- a/docs/_docs/host-configuration.md +++ b/docs/_docs/host-configuration.md @@ -4,15 +4,15 @@ layout: article category: install --- -The streaming host configuration for LibreTime is shown in the file */etc/airtime/liquidsoap.cfg* which is automatically generated by the **Streams** page, found on the **System** menu of the LibreTime administration interface. For this reason, you would not normally edit the streaming configuration manually, as any changes are likely to be overwritten by the administration interface. +The streaming host configuration for LibreTime is shown in the file _/etc/airtime/liquidsoap.cfg_ which is automatically generated by the **Streams** page, found on the **System** menu of the LibreTime administration interface. For this reason, you would not normally edit the streaming configuration manually, as any changes are likely to be overwritten by the administration interface. ## Database and RabbitMQ hosts {#database} -Optionally, you may wish to edit the file */etc/airtime/airtime.conf* to set the PostgreSQL database host, and the username and password to connect to the database with: +Optionally, you may wish to edit the file _/etc/airtime/airtime.conf_ to set the PostgreSQL database host, and the username and password to connect to the database with: sudo nano /etc/airtime/airtime.conf -You can also set options for RabbitMQ messaging and the LibreTime server in this file, although you should not normally need to adjust the defaults unless you are running a large LibreTime system distributed across multiple servers. To run the LibreTime server in demo mode, which changes the greeting on the login page and prevents user accounts from being created or modified, set the value of *demo* to 1. +You can also set options for RabbitMQ messaging and the LibreTime server in this file, although you should not normally need to adjust the defaults unless you are running a large LibreTime system distributed across multiple servers. To run the LibreTime server in demo mode, which changes the greeting on the login page and prevents user accounts from being created or modified, set the value of _demo_ to 1. [database] host = localhost @@ -43,7 +43,7 @@ You can also set options for RabbitMQ messaging and the LibreTime server in this [demo] demo = 0 -Save and close the file with **Ctrl+O** and **Ctrl+X**. In order to update the configuration +Save and close the file with **Ctrl+O** and **Ctrl+X**. In order to update the configuration used by the various components of LibreTime, run the following commands sudo systemctl restart libretime-liquidsoap @@ -53,11 +53,11 @@ used by the various components of LibreTime, run the following commands ## API client configuration {#api} -The LibreTime API enables many types of information about the broadcast schedule and configuration to be retrieved from the LibreTime server. Other than the live-info and week-info data fetched by website widgets (see the chapter *Exporting the schedule*), all API requests must be authenticated using the secret API key stored in the file */etc/airtime/api\_client.cfg* on the LibreTime server. This key is autogenerated during LibreTime installation and should be unique for each server. +The LibreTime API enables many types of information about the broadcast schedule and configuration to be retrieved from the LibreTime server. Other than the live-info and week-info data fetched by website widgets (see the chapter _Exporting the schedule_), all API requests must be authenticated using the secret API key stored in the file _/etc/airtime/api_client.cfg_ on the LibreTime server. This key is autogenerated during LibreTime installation and should be unique for each server. If you intend to use the LibreTime API across a public network, for security reasons it is highly recommended that all API requests are sent over encrypted https: and that the web server is configured to accept requests to the api/ directory from specific host names or IP addresses only. -If you have changed the *base\_url*, *base\_port* or *base\_dir* setting in */etc/airtime/airtime.conf* from the defaults, you will probably also have to update the *Hostname* settings in the file */etc/airtime/api\_client.cfg* accordingly.** +If you have changed the _base_url_, _base_port_ or _base_dir_ setting in _/etc/airtime/airtime.conf_ from the defaults, you will probably also have to update the _Hostname_ settings in the file _/etc/airtime/api_client.cfg_ accordingly.\*\* bin_dir = /usr/lib/airtime/api_clients api_key = 'XXXXXXXXXXXXXXXXXXXX' @@ -68,7 +68,7 @@ If you have changed the *base\_url*, *base\_port* or *base\_dir* setting in */et ## Apache max file size configuration {#apache} -By default, the maximum upload file size is 40 MB, which may not be large enough for some stations, especially if they are uploading prerecorded shows. The setting for this is located in */etc/apache2/sites-available/airtime.config*. Search for and update the following in megabytes: +By default, the maximum upload file size is 40 MB, which may not be large enough for some stations, especially if they are uploading prerecorded shows. The setting for this is located in _/etc/apache2/sites-available/airtime.config_. Search for and update the following in megabytes: ``` ; Maximum allowed size for uploaded files. @@ -78,7 +78,7 @@ upload_max_filesize = 40M post_max_size = 40M ``` -For quick reference, 1024 MB = 1 GB and 2048 MB = 2 GB, but most will be okay with rounding to the nearest thousand. After updating the config file, restart Apache. +For quick reference, 1024 MB = 1 GB and 2048 MB = 2 GB, but most will be okay with rounding to the nearest thousand. After updating the config file, restart Apache. ``` sudo systemctl restart apache2 @@ -86,7 +86,7 @@ sudo systemctl restart apache2 ## Playout settings {#playout} -Settings for pypo, the playout engine used by LibreTime, are found in the file */etc/airtime/airtime.conf*. After making changes to this file, you will have to issue the command: +Settings for pypo, the playout engine used by LibreTime, are found in the file _/etc/airtime/airtime.conf_. After making changes to this file, you will have to issue the command: sudo systemctl restart libretime-playout @@ -160,7 +160,7 @@ for the changes to take effect. # while 'otf' (on the fly) cues while loading into ls # (needs the post_processor patch) cue_style = pre - + ## RabbitMQ hostname changes If the Airtime logs indicate failures to connect to the RabbitMQ server, such as: @@ -171,9 +171,10 @@ If the Airtime logs indicate failures to connect to the RabbitMQ server, such as 2013-10-31 08:21:11,255 ERROR - \[pypomessagehandler.py : main() : line 99\] - Error connecting to RabbitMQ Server. Trying again in few seconds - See more at: http://forum.sourcefabric.org/discussion/16050/\#sthash.W8OJrNFm.dpuf ``` -but the RabbitMQ server is running normally, this error might be due to a change in the server's hostname since Libretime installation. Directory names under */var/lib/rabbitmq/mnesia/* indicate that RabbitMQ's database files are organised according to the hostname of the server (ex. `rabbit@airtime`) where the hostname is *airtime.example.com*. If the hostname has changed, it may be necessary to reconfigure RabbitMQ manually, as follows: -1. Delete the files in */var/lib/rabbitmq/mnesia/* +but the RabbitMQ server is running normally, this error might be due to a change in the server's hostname since Libretime installation. Directory names under _/var/lib/rabbitmq/mnesia/_ indicate that RabbitMQ's database files are organised according to the hostname of the server (ex. `rabbit@airtime`) where the hostname is _airtime.example.com_. If the hostname has changed, it may be necessary to reconfigure RabbitMQ manually, as follows: + +1. Delete the files in _/var/lib/rabbitmq/mnesia/_ ``` sudo rm -r /var/lib/rabbitmq/mnesia/* @@ -185,7 +186,7 @@ sudo rm -r /var/lib/rabbitmq/mnesia/* sudo systemctl restart rabbitmq-server ``` -3. Enter the following commands to set up authentication and grant permissions. The *rabbitmqctl add\_user* command requires the RabbitMQ password from the /etc/airtime/airtime.conf file as an argument. The *rabbitmqctl set\_permissions* command should be entered on one line, with the list of Airtime services repeated three times: +3. Enter the following commands to set up authentication and grant permissions. The _rabbitmqctl add_user_ command requires the RabbitMQ password from the /etc/airtime/airtime.conf file as an argument. The _rabbitmqctl set_permissions_ command should be entered on one line, with the list of Airtime services repeated three times: ``` rabbitmqctl add_vhost /airtime @@ -194,4 +195,4 @@ rabbitmqctl set_permissions -p /airtime airtime "airtime-pypo|pypo-fetch|airtime-analyzer|media-monitor"   "airtime-pypo|pypo-fetch|airtime-analyzer|media-monitor"  "airtime-pypo|pypo-fetch|airtime-analyzer|media-monitor" -``` \ No newline at end of file +``` diff --git a/docs/_docs/icecast.md b/docs/_docs/icecast.md index 70c59d3bb..46c3c5895 100644 --- a/docs/_docs/icecast.md +++ b/docs/_docs/icecast.md @@ -8,7 +8,7 @@ category: admin LibreTime supports direct connection to two popular streaming media servers, the open source **Icecast** () and the proprietary **SHOUTcast** (). Apart from the software license, the main difference between these two servers is that Icecast supports simultaneous MP3, AAC, Ogg Vorbis or Ogg Opus streaming from LibreTime, whereas SHOUTcast supports MP3 and AAC streams but not Ogg Vorbis or Opus. The royalty-free Ogg Vorbis format has the advantage of better sound quality than MP3 at lower bitrates, which has a direct impact on the amount of bandwidth that your station will require to serve the same number of listeners. Ogg Opus also benefits from good sound quality at low bitrates, with the added advantage of lower latency than other streaming formats. Opus is now an IETF standard () and requires Icecast 2.4 or later to be installed on the streaming server. -Ogg Vorbis playback is supported in **Mozilla Firefox**, **Google Chrome** and **Opera** browsers, via **jPlayer** (), and is also supported in several popular media players, including VideoLAN Client, also known as VLC (). (See the chapter *Stream player for your website* on how to deliver **jPlayer** to your audience). Ogg Opus is relatively new and is supported natively in the very latest browsers, such as Mozilla Firefox 25.0, and media players including VLC 2.0.4 or later. +Ogg Vorbis playback is supported in **Mozilla Firefox**, **Google Chrome** and **Opera** browsers, via **jPlayer** (), and is also supported in several popular media players, including VideoLAN Client, also known as VLC (). (See the chapter _Stream player for your website_ on how to deliver **jPlayer** to your audience). Ogg Opus is relatively new and is supported natively in the very latest browsers, such as Mozilla Firefox 25.0, and media players including VLC 2.0.4 or later. Streaming MP3 below a bitrate of 128kbps is not recommended for music, because of a perceptible loss of high audio frequencies in the broadcast playout. A 96kbps or 64kbps MP3 stream may be acceptable for voice broadcasts if there is a requirement for compatibility with legacy hardware playback devices which do not support Ogg Vorbis or Opus streams. @@ -18,18 +18,18 @@ Conversely, you may have a music station which wants to stream at 160kbps or 192 ## UTF-8 metadata in Icecast MP3 streams -When sending metadata about your stream to an Icecast server in non-Latin alphabets, you may find that Icecast does not display the characters correctly for an MP3 stream, even though they are displayed correctly for an Ogg Vorbis stream. In the following screenshot, Russian characters are being displayed incorrectly in the *Current Song* field for the MP3 stream: +When sending metadata about your stream to an Icecast server in non-Latin alphabets, you may find that Icecast does not display the characters correctly for an MP3 stream, even though they are displayed correctly for an Ogg Vorbis stream. In the following screenshot, Russian characters are being displayed incorrectly in the _Current Song_ field for the MP3 stream: ![](/img/Screenshot223-Icecast_UTF-8_metadata.png) -The solution is to specify that the metadata for the MP3 mount point you are using should be interpreted using UTF-8 encoding. You can do this by adding the following stanza to the */etc/icecast2/icecast.xml* file, where *libretime.mp3* is the name of your mount point: +The solution is to specify that the metadata for the MP3 mount point you are using should be interpreted using UTF-8 encoding. You can do this by adding the following stanza to the _/etc/icecast2/icecast.xml_ file, where _libretime.mp3_ is the name of your mount point:           /libretime.mp3        UTF-8   -After saving the */etc/icecast2/icecast.xml* file, you should restart the Icecast server: +After saving the _/etc/icecast2/icecast.xml_ file, you should restart the Icecast server: sudo invoke-rc.d icecast2 restart Restarting icecast2: Starting icecast2 @@ -38,17 +38,17 @@ After saving the */etc/icecast2/icecast.xml* file, you should restart the Icecas ## Icecast handover configuration -In a typical radio station configuration, the live output from the broadcast studio and the scheduled output from LibreTime are mixed together before being sent further along the broadcast chain, to a transmitter or streaming media server on the Internet. (This may not be the case if your LibreTime server is remote from the studio, and you are using the **Show Source Mount Point** or **Master Source Mount Point** to mix live and scheduled content. See the *Stream Settings* chapter for details). +In a typical radio station configuration, the live output from the broadcast studio and the scheduled output from LibreTime are mixed together before being sent further along the broadcast chain, to a transmitter or streaming media server on the Internet. (This may not be the case if your LibreTime server is remote from the studio, and you are using the **Show Source Mount Point** or **Master Source Mount Point** to mix live and scheduled content. See the _Stream Settings_ chapter for details). If your **Icecast** server is hosted in a remote data centre, you may not have the option to handover the streaming media source manually, because you have no physical access to connect a broadcast mixer to the server. Disconnecting the stream and beginning another is less than ideal, because the audience's media players will also be disconnected when that happens. -The **Icecast** server has a *fallback-mount* feature which can be used to move clients (media players used by listeners or viewers) from one source to another, as new sources become available. This makes it possible to handover from LibreTime output to a show from another source, and handover to LibreTime again once the other show has ended. +The **Icecast** server has a _fallback-mount_ feature which can be used to move clients (media players used by listeners or viewers) from one source to another, as new sources become available. This makes it possible to handover from LibreTime output to a show from another source, and handover to LibreTime again once the other show has ended. To enable fallback mounts, edit the main Icecast configuration file to define the mount points you will use, and the relationship between them. sudo nano /etc/icecast2/icecast.xml -The example ** section provided in the *icecast.xml* file is commented out by default. Before or after the commented section, add three mount point definitions. The default mount point used by LibreTime is */airtime\_128* which is shown in the */etc/airtime/liquidsoap.cfg* file. You must also define a mount point for the live source (called */live.ogg* in this example) and a mount point for the public to connect to (called */stream.ogg* in this example). +The example __ section provided in the _icecast.xml_ file is commented out by default. Before or after the commented section, add three mount point definitions. The default mount point used by LibreTime is _/airtime_128_ which is shown in the _/etc/airtime/liquidsoap.cfg_ file. You must also define a mount point for the live source (called _/live.ogg_ in this example) and a mount point for the public to connect to (called _/stream.ogg_ in this example). /airtime_128 @@ -69,25 +69,25 @@ The example ** section provided in the *icecast.xml* file is commented ou 0 -These mount point definitions mean that a client connecting to a URL such as *http://icecast.example.com:8000/stream.ogg* will first fall back to the */live.ogg* mount point if it is available. If not, the client will fall back in turn to the */airtime\_128* mount point for LibreTime playout. +These mount point definitions mean that a client connecting to a URL such as *http://icecast.example.com:8000/stream.ogg* will first fall back to the _/live.ogg_ mount point if it is available. If not, the client will fall back in turn to the _/airtime_128_ mount point for LibreTime playout. -Setting the value of ** to 1 (enabled) means that when the */live.ogg* mount point becomes available again, the client will be re-connected to it.  If you wish to hide the */airtime\_128* and */live.ogg* mount points from the public Icecast web interface, set the value of ** in each of these definitions to 1. +Setting the value of __ to 1 (enabled) means that when the _/live.ogg_ mount point becomes available again, the client will be re-connected to it.  If you wish to hide the _/airtime_128_ and _/live.ogg_ mount points from the public Icecast web interface, set the value of __ in each of these definitions to 1. ## Source configuration -Connect the other source to the Icecast server with the same parameters defined in the */etc/airtime/liquidsoap.cfg* file, except for the mount point. This should one of the mount points you have defined in the */etc/icecast2/icecast.xml* file, such as */live.ogg* in the example above. +Connect the other source to the Icecast server with the same parameters defined in the _/etc/airtime/liquidsoap.cfg_ file, except for the mount point. This should one of the mount points you have defined in the _/etc/icecast2/icecast.xml_ file, such as _/live.ogg_ in the example above. -To configure **Mixxx** for streaming to Icecast, click *Options*, *Preferences*, then *Live Broadcasting*. For server *Type*, select the default of *Icecast 2* when streaming to Debian or Ubuntu servers, as this is the current version of Icecast supplied with those GNU/Linux distributions. +To configure **Mixxx** for streaming to Icecast, click _Options_, _Preferences_, then _Live Broadcasting_. For server _Type_, select the default of _Icecast 2_ when streaming to Debian or Ubuntu servers, as this is the current version of Icecast supplied with those GNU/Linux distributions. -![](/img/Screenshot123-Mixxx_Preferences.png)  +![](/img/Screenshot123-Mixxx_Preferences.png) By default, Icecast streams are buffered to guard against network problems, which causes latency for remote listeners. When monitoring the stream from a remote location, you may have to begin the live stream a few seconds before the previous stream ends to enable a smooth transition. ## Promoting your station through Icecast If you have an Icecast server, you can put a link to the Icecast status page (by default at port 8000) on your station's homepage, -to provide an overview of available streams. See the chapter *Interface customization* for tips on theming the -Icecast status page. You can also use Now Playing widgets (see the chapter *Exporting the schedule*) or HTML5 stream players (see the chapter *Stream player for your website*) to help grow your audience. +to provide an overview of available streams. See the chapter _Interface customization_ for tips on theming the +Icecast status page. You can also use Now Playing widgets (see the chapter _Exporting the schedule_) or HTML5 stream players (see the chapter _Stream player for your website_) to help grow your audience. On an Icecast server, you can uncomment the `` section in the _/etc/icecast2/icecast.xml_ file to have your station automatically listed on the Icecast directory website which could help you pick @@ -100,14 +100,14 @@ up more listeners. http://dir.xiph.org/cgi-bin/yp-cgi -The Indymedia stream directory at links to grassroots independent radio projects around the world. You can add your station to their list with an additional ** section, as follows: +The Indymedia stream directory at links to grassroots independent radio projects around the world. You can add your station to their list with an additional __ section, as follows: 15 http://radio.indymedia.org/cgi-bin/yp-cgi -Another stream directory service is provided by the Liquidsoap Flows! site . The following section can be added to the file */usr/lib/airtime/pypo/bin/liquidsoap\_scripts/ls\_script.liq* after *add\_skip\_command(s)* on line 174, for a stream named '*ourstation*': +Another stream directory service is provided by the Liquidsoap Flows! site . The following section can be added to the file _/usr/lib/airtime/pypo/bin/liquidsoap_scripts/ls_script.liq_ after _add_skip_command(s)_ on line 174, for a stream named '_ourstation_': ourstation = register_flow( radio="Rock 'n Roll Radio", @@ -119,4 +119,4 @@ Another stream directory service is provided by the Liquidsoap Flows! site **Note:** For the time being, a stream can be registered on the Liquidsoap Flows! site with any username and password. Authenticated services may be offered in future. \ No newline at end of file +> **Note:** For the time being, a stream can be registered on the Liquidsoap Flows! site with any username and password. Authenticated services may be offered in future. diff --git a/docs/_docs/install.md b/docs/_docs/install.md index 8bdd6a32c..26ec5ca3e 100644 --- a/docs/_docs/install.md +++ b/docs/_docs/install.md @@ -18,7 +18,7 @@ permalink: /install - Wired internet connection and static IP address for on-prem install [DigitalOcean](https://www.digitalocean.com/pricing/#Compute) and [Linode](https://www.linode.com/pricing/#row--compute) - have similar plans that meet Cloud Install requirements. Both plans cost $10/month. +have similar plans that meet Cloud Install requirements. Both plans cost $10/month. ## Preparing the server @@ -68,7 +68,6 @@ sudo ufw allow 8001,8002/tcp - Installing LibreTime consists of running the following commands in the terminal: ``` diff --git a/docs/_docs/interface-customization.md b/docs/_docs/interface-customization.md index 6c09a4f76..ca08d8048 100644 --- a/docs/_docs/interface-customization.md +++ b/docs/_docs/interface-customization.md @@ -5,12 +5,11 @@ git: interface-customization.md category: admin --- - -The LibreTime administration interface, as a web application, is fully customizable using the same methods that you might use to modify a website. For instance, you may wish to increase certain font sizes or change the colours in the LibreTime interface to better suit staff users with impaired vision. To do so, open one of the CSS files in the */public/css/* directory under the LibreTime *DocumentRoot* directory in an editor such as **nano**: +The LibreTime administration interface, as a web application, is fully customizable using the same methods that you might use to modify a website. For instance, you may wish to increase certain font sizes or change the colours in the LibreTime interface to better suit staff users with impaired vision. To do so, open one of the CSS files in the _/public/css/_ directory under the LibreTime _DocumentRoot_ directory in an editor such as **nano**: sudo nano /usr/share/airtime/public/css/styles.css -To change the background colour of the administration interface from dark gray to white, the *background:* property of the body tag could be changed to *\#ffffff* as follows: +To change the background colour of the administration interface from dark gray to white, the _background:_ property of the body tag could be changed to _\#ffffff_ as follows: body { font-size: 62.5%; @@ -26,13 +25,13 @@ Any custom changes that you make to the administration interface should be backe # Modifying the Icecast interface -If you have installed Icecast, in the directory */etc/icecast2/web/* you will find several XSLT and other files which are used to generate the Icecast web interface. If you are familiar with HTML you should be able to modify these pages, as they are well commented. You do have to be careful with syntax, because something as simple as a missing bracket can cause the Icecast web interface to break down. +If you have installed Icecast, in the directory _/etc/icecast2/web/_ you will find several XSLT and other files which are used to generate the Icecast web interface. If you are familiar with HTML you should be able to modify these pages, as they are well commented. You do have to be careful with syntax, because something as simple as a missing bracket can cause the Icecast web interface to break down. -For example, you could change the *status.xsl* page: +For example, you could change the _status.xsl_ page: sudo nano /etc/icecast2/web/status.xsl -Modifying the *status.xsl* page is a good place to start, because this is the default page that site visitors see when they browse port 8000 on your Icecast server. The most obvious change to make in the XSLT pages is the content of the *<title>* and *<h2>* tags, to announce the name of your station. You can also modify the *style.css* file in this directory to change colour and layout options. +Modifying the _status.xsl_ page is a good place to start, because this is the default page that site visitors see when they browse port 8000 on your Icecast server. The most obvious change to make in the XSLT pages is the content of the _<title>_ and _<h2>_ tags, to announce the name of your station. You can also modify the _style.css_ file in this directory to change colour and layout options. After saving the file with Ctrl+O, refresh your web browser, and the new look should now be visible. diff --git a/docs/_docs/interface-localization.md b/docs/_docs/interface-localization.md index 17000612e..cd68f4186 100644 --- a/docs/_docs/interface-localization.md +++ b/docs/_docs/interface-localization.md @@ -18,13 +18,13 @@ GNU **gettext** means using a .po file for each language or dialect, a specially The first of these three lines starts with the hash symbol, and references where this string of text is found in the source code by its file name and line number. If this string is found more than once in the source code, you will see other reference lines here. The second line contains the **msgid**, which is the original version of the string. The third line contains the **msgstr**, which is the translation of that string for the localization that this particular .po file relates to. -If you use the cross-platform program **Poedit** () to edit the .po file, this formatting of the text is hidden by an easy-to-use GUI. The *poedit* package can be installed on most GNU/Linux distributions using the standard software installer. Versions of Poedit for Mac and Windows are available for free download from the project's homepage. +If you use the cross-platform program **Poedit** () to edit the .po file, this formatting of the text is hidden by an easy-to-use GUI. The _poedit_ package can be installed on most GNU/Linux distributions using the standard software installer. Versions of Poedit for Mac and Windows are available for free download from the project's homepage. Before manually translating strings in Poedit from scratch, you should take a look at the online translation services available, such as Lingohub () or Google's Translator Toolkit (), which both support gettext .po files. If using automatic translation, you can then use Poedit to fine-tune the localization and fix any formatting errors. If you don't already have a GitHub account, you can sign up at . Once you have a GitHub account, you can fork a copy () of the LibreTime project. Work for the next major version of the software is done in the **master** branch of each project, so that's the branch to **checkout** after you have made the initial **git clone**. -In the locale code *de\_CH*, for example, *de* represents the German language and the suffix *\_CH* indicates the dialect spoken in Switzerland. Some languages have a wide variety of dialect localizations, which can be differentiated with a suffix in this way. You should update the header information in the .po file, which includes the language code and a country code, using one of the existing .po files as a guide. +In the locale code _de_CH_, for example, _de_ represents the German language and the suffix _\_CH_ indicates the dialect spoken in Switzerland. Some languages have a wide variety of dialect localizations, which can be differentiated with a suffix in this way. You should update the header information in the .po file, which includes the language code and a country code, using one of the existing .po files as a guide. After forking the LibreTime git repository, make sure you're in the **master** branch: @@ -32,15 +32,15 @@ After forking the LibreTime git repository, make sure you're in the **master** b devel * master -Create a new locale directory (e.g. *airtime\_mvc/locale/de\_CH/LC\_MESSAGES/* for German as spoken in Switzerland): +Create a new locale directory (e.g. _airtime_mvc/locale/de_CH/LC_MESSAGES/_ for German as spoken in Switzerland): mkdir -p airtime_mvc/locale/de_CH/LC_MESSAGES/ -Copy the template *airtime.po* file into the directory you just created: +Copy the template _airtime.po_ file into the directory you just created: cp airtime_mvc_locale/template/airtime.po airtime_mvc/locale/de_CH/LC_MESSAGES -and update the header information in the new copy of the *airtime.po* file using the **nano** editor: +and update the header information in the new copy of the _airtime.po_ file using the **nano** editor: nano airtime_mvc/locale/de_CH/LC_MESSAGES/airtime.po diff --git a/docs/_docs/listener-stats.md b/docs/_docs/listener-stats.md index 539316e41..6f4b59d6b 100644 --- a/docs/_docs/listener-stats.md +++ b/docs/_docs/listener-stats.md @@ -8,8 +8,8 @@ The Listener Stats page on the Analytics menu shows graphs of listener connectio ![](/img/portfolio/stream-stats.jpg) -If the status indicator is red, check that the **Admin User** and **Admin Password** settings are correct under **Additional Options** for the named mount point, such as *libretime\_128*, on the **Streams** page of the **Settings** menu. +If the status indicator is red, check that the **Admin User** and **Admin Password** settings are correct under **Additional Options** for the named mount point, such as _libretime_128_, on the **Streams** page of the **Settings** menu. To choose which particular streams should have statistics displayed, click the check boxes for the individual colour-coded mount points, just below the graph. -By default, statistics for the last 24 hours of streaming are shown. To change this date and time range, click the calendar and clock icons in the lower left corner of the page, then click the magnifying glass icon. \ No newline at end of file +By default, statistics for the last 24 hours of streaming are shown. To change this date and time range, click the calendar and clock icons in the lower left corner of the page, then click the magnifying glass icon. diff --git a/docs/_docs/live-broadcast.md b/docs/_docs/live-broadcast.md index f5b05a62f..9fe277118 100644 --- a/docs/_docs/live-broadcast.md +++ b/docs/_docs/live-broadcast.md @@ -39,36 +39,36 @@ for remote input connection details. **Setup** 1. Download and install [BUTT](https://danielnoethen.de/) for your OS. -*Note: be sure you have butt version 0.1.17 or newer installed* + _Note: be sure you have butt version 0.1.17 or newer installed_ 2. Open up BUTT 3. Click **settings** 4. Under **Main** > **Server** click **ADD** - * Type LibreTime (or your station) under Name - * Click the radio button next to **IceCast** under Type - * Type your stations URL (webpage address) under **Address**: - * Type **8002** under **Port**: - * Type your DJ login password under **Password** - * Type **/show** under IceCast mountpoint: - * Type your dj login under **IceCast user:** + - Type LibreTime (or your station) under Name + - Click the radio button next to **IceCast** under Type + - Type your stations URL (webpage address) under **Address**: + - Type **8002** under **Port**: + - Type your DJ login password under **Password** + - Type **/show** under IceCast mountpoint: + - Type your dj login under **IceCast user:** 5. Click **ADD** 6. Still in settings click, **Audio** and select your external sound card under -**Audio Device** *Note: if you only have an internal sound card you maybe able -to use it but that is OS specific and outside of this tutorial. We are assuming -you have a mic and mixer or a USB mixer hooked up to or as an external soundcard* + **Audio Device** _Note: if you only have an internal sound card you maybe able + to use it but that is OS specific and outside of this tutorial. We are assuming + you have a mic and mixer or a USB mixer hooked up to or as an external soundcard_ **Show Time** 1. When its almost your show time go to your LibreTime page and look at the time -in the top right when your show starts go to Butt. + in the top right when your show starts go to Butt. 2. Click the white Play button (third button in the middle). 3. If it says connecting… and then stream time with a counter– congratulations, -your are connected! -4. Go to the LibreTime page and at the top right under Source Streams the -tab besides Show Source is to the left and Orange – if it is and Current -shows Live Show you are connected. + your are connected! +4. Go to the LibreTime page and at the top right under Source Streams the + tab besides Show Source is to the left and Orange – if it is and Current + shows Live Show you are connected. 5. If it is gray, click on the **Show Source** switch to the right of it and it -will toggle your show on and you will be broadcasting. *Note: whether auto -connect is turned on is a station specific setting so it could work either way* + will toggle your show on and you will be broadcasting. _Note: whether auto + connect is turned on is a station specific setting so it could work either way_ ### Recording your show diff --git a/docs/_docs/microsite.md b/docs/_docs/microsite.md index 8592ed9ff..7acbabbc8 100644 --- a/docs/_docs/microsite.md +++ b/docs/_docs/microsite.md @@ -12,7 +12,7 @@ podcast tabs, and a live feed of your station with information on the the curren ## Modifying the LibreTime Radio Page -The background of the mini-site that appears when you visit the server's domain in your web browser can be changed by modifying the page's CSS file, located at */usr/share/airtime/php/airtime_mvc/public/css/radio-page/radio-page.css*. +The background of the mini-site that appears when you visit the server's domain in your web browser can be changed by modifying the page's CSS file, located at _/usr/share/airtime/php/airtime_mvc/public/css/radio-page/radio-page.css_. ``` html { diff --git a/docs/_docs/multipass.md b/docs/_docs/multipass.md index efd05168b..8843911ee 100644 --- a/docs/_docs/multipass.md +++ b/docs/_docs/multipass.md @@ -30,6 +30,7 @@ If you want to delete the image and start again, run `multipass delete ltTEST && ### Cloud-init options in cloud-init.yaml You may wish to change the below fields as per your location. + ``` timezone: America/New York # change as needed ntp: @@ -42,4 +43,4 @@ modify the URL on this line: ``` - cd / && git clone https://github.com/LibreTime/libretime.git -``` \ No newline at end of file +``` diff --git a/docs/_docs/playlists.md b/docs/_docs/playlists.md index d93ca3611..dd46f16e0 100644 --- a/docs/_docs/playlists.md +++ b/docs/_docs/playlists.md @@ -6,7 +6,7 @@ category: interface > **About Autoloading Playlists** > -> Libretime will schedule tracks from a selected playlist an hour before a show is +> Libretime will schedule tracks from a selected playlist an hour before a show is > scheduled to air. This is a great way to automatically schedule weekly shows which are received > via. podcasts. @@ -40,20 +40,20 @@ Smart blocks are automatically filled with media files from the LibreTime librar To create a smart block, click the **Smartblocks** button on the left sidebar, and select **New** from the toolbar. Like a playlist, smart blocks can have a title and description, which you can edit. This helps you find relevant smart blocks in searches. -Fill out the smart block's **Name**, **Search Criteria**, and **Limit to** sections. The search criteria can be any one of LibreTime's metadata categories, such as **Title**, **Creator** or **Genre**. The modifier depends on whether the metadata in question contains letters or numbers. For example, **Title** has modifiers including *contains* and *starts with*, whereas the modifiers for **BPM** include *is greater than* and *is in the range*. +Fill out the smart block's **Name**, **Search Criteria**, and **Limit to** sections. The search criteria can be any one of LibreTime's metadata categories, such as **Title**, **Creator** or **Genre**. The modifier depends on whether the metadata in question contains letters or numbers. For example, **Title** has modifiers including _contains_ and _starts with_, whereas the modifiers for **BPM** include _is greater than_ and _is in the range_. If you have a large number of files which meet the criteria that you specify, you may wish to limit the duration of the smart block using the **Limit to** field, so that it fits within the show you have in mind. Select **hours**, **minutes** or **items** from the drop-down menu, and click the **Generate** button again, if it is a static smart block. Then click the **Save** button. > **Note:** Smart Blocks by default will not overflow the length of a scheduled show. -> This is to prevent tracks from being cut-off because they exceed the time limit of a show. +> This is to prevent tracks from being cut-off because they exceed the time limit of a show. > If you want a smartblock to schedule tracks until it is longer than the Time Limit you can check **"Allow last track to exceed time limit"** > (helpful for avoiding dead air on autoscheduled shows). ![](/img/Smartblock-advanced.png) -You can also set the **smart block type**. A **Static** smart block will save the criteria and generate the block content immediately. This enables you to edit the contents of the block in the **Library** page before adding it to a show. A **Dynamic** smart block will only save the criteria, and the specific content will be generated at the time the block is added to a show. After that, the content of the show can be changed or re-ordered in the **Now Playing** page.  +You can also set the **smart block type**. A **Static** smart block will save the criteria and generate the block content immediately. This enables you to edit the contents of the block in the **Library** page before adding it to a show. A **Dynamic** smart block will only save the criteria, and the specific content will be generated at the time the block is added to a show. After that, the content of the show can be changed or re-ordered in the **Now Playing** page. -Click the **plus button** on the left to add OR criteria, such as **Creator** containing *beck* OR *jimi*. To add AND criteria, such as **Creator** containing *jimi* AND BPM in the range *120* to *130*, click the **plus button** on the right. (The criteria are not case sensitive). Click **Preview** to see the results. +Click the **plus button** on the left to add OR criteria, such as **Creator** containing _beck_ OR _jimi_. To add AND criteria, such as **Creator** containing _jimi_ AND BPM in the range _120_ to _130_, click the **plus button** on the right. (The criteria are not case sensitive). Click **Preview** to see the results. > If you see the message **0 files meet the criteria**, it might mean that the files in the Library have not been tagged with the correct metadata. See the chapter [Preparing media](/docs/preparing-media) for tips on tagging content. diff --git a/docs/_docs/playout-history.md b/docs/_docs/playout-history.md index bde7c662b..9d8605d5f 100644 --- a/docs/_docs/playout-history.md +++ b/docs/_docs/playout-history.md @@ -32,14 +32,13 @@ The **History Templates** page on the History menu enables you to prepare report ![](/img/new-hist-temp.png) -Either of these actions opens a page in which you can name the new template, and add or remove elements from the list on the left. To add a new element from the list on the right, click the plus icon for the item you require. If the element you require is not listed, you can use the **Add New Field** box at the lower end of the right side column. Select *string*, *boolean*, *integer*, or *float*, depending on the type of data that you wish to log, and then click the **+ Add** button. +Either of these actions opens a page in which you can name the new template, and add or remove elements from the list on the left. To add a new element from the list on the right, click the plus icon for the item you require. If the element you require is not listed, you can use the **Add New Field** box at the lower end of the right side column. Select _string_, _boolean_, *integer*, or _float_, depending on the type of data that you wish to log, and then click the **+ Add** button. When the template is in the format you require, click the **Save** button, and **Set Default Template** if you wish. The new template will now be listed on the History Templates page. If you have set a new default template, any changes will be visible on the tabs of the Playout History page. - ## Exporting the schedule {#exporting} -LibreTime has a feature which enables your station's show and schedule information to be displayed on remote websites. This feature is included in LibreTime because you would not usually invite the general public to access your LibreTime server directly. If you had very large numbers of people requesting data from the LibreTime server at once, the burst of network traffic might overload the server, potentially disrupting your broadcasts. If carried out maliciously, this network overload is known as a *denial of service attack*. +LibreTime has a feature which enables your station's show and schedule information to be displayed on remote websites. This feature is included in LibreTime because you would not usually invite the general public to access your LibreTime server directly. If you had very large numbers of people requesting data from the LibreTime server at once, the burst of network traffic might overload the server, potentially disrupting your broadcasts. If carried out maliciously, this network overload is known as a _denial of service attack_. Instead, your public-facing web server can retrieve the schedule information from the LibreTime API. It can be presented using Javascript widgets and styled with CSS, in any format that you require. @@ -167,17 +166,17 @@ In this case, the metadata returned would be in a different format from the abov "sunday":[], "AIRTIME_API_VERSION":"1.1"}) -If you see the message *You are not allowed to access this resource* when attempting to display schedule information in your web browser, log in to the LibreTime administration interface, click *System* in the main menu, then *Preferences*. Set **Allow Remote Websites To Access "Schedule" Info?** to **Enabled**, click the **Save** button, then refresh the browser window opened on the schedule export URL. If you do not wish to make schedule information available to the public, set this option to **Disabled** instead. +If you see the message _You are not allowed to access this resource_ when attempting to display schedule information in your web browser, log in to the LibreTime administration interface, click _System_ in the main menu, then _Preferences_. Set **Allow Remote Websites To Access "Schedule" Info?** to **Enabled**, click the **Save** button, then refresh the browser window opened on the schedule export URL. If you do not wish to make schedule information available to the public, set this option to **Disabled** instead. ### Caching schedule information If the LibreTime server is behind a firewall, or you want to protect the LibreTime server from large numbers of schedule requests, you may wish to cache the schedule information on a public-facing or intermediate server. You can then create a firewall rule that only allows the schedule server to connect to the LibreTime server, in addition to any remote users of the LibreTime web interface. -Your system administrator can set up schedule caching on a standard Apache and PHP enabled web server with the *curl* program installed, using the following steps: +Your system administrator can set up schedule caching on a standard Apache and PHP enabled web server with the _curl_ program installed, using the following steps: 1. Create a shell script on the schedule server (schedule.example.com) that polls the remote LibreTime server (libretime.example.com), and writes the metadata returned into a pair of local temporary files: - sudo nano /usr/local/bin/libretime-schedule.sh + sudo nano /usr/local/bin/libretime-schedule.sh The content of this file should be like the following script, replacing libretime.example.com with the name of your LibreTime server: @@ -189,27 +188,27 @@ The content of this file should be like the following script, replacing libretim 2. Make the script executable: - sudo chmod +x /usr/local/bin/libretime-schedule.sh + sudo chmod +x /usr/local/bin/libretime-schedule.sh 3. Create an Apache VirtualHost configuration for the schedule server: - sudo nano /etc/apache2/sites-available/schedule + sudo nano /etc/apache2/sites-available/schedule -containing a definition like the following, replacing *schedule.example.com* with the name of your schedule server: +containing a definition like the following, replacing _schedule.example.com_ with the name of your schedule server: ServerName schedule.example.com DocumentRoot /var/www/schedule/ -4. In the schedule server's DocumentRoot folder, create the folders *api/live-info/* and *api/week-info/* +4. In the schedule server's DocumentRoot folder, create the folders _api/live-info/_ and _api/week-info/_ - sudo mkdir -p /var/www/schedule/api/live-info/ - sudo mkdir -p /var/www/schedule/api/week-info/ + sudo mkdir -p /var/www/schedule/api/live-info/ + sudo mkdir -p /var/www/schedule/api/week-info/ -5. Create an index.php file in the *api/live-info/* folder: +5. Create an index.php file in the _api/live-info/_ folder: - sudo nano /var/www/schedule/api/live-info/index.php + sudo nano /var/www/schedule/api/live-info/index.php containing the following code: @@ -226,9 +225,9 @@ containing the following code: echo $content; ?> -6. Create an index.php file in the *api/week-info/* folder: +6. Create an index.php file in the _api/week-info/_ folder: - sudo nano /var/www/schedule/api/week-info/index.php + sudo nano /var/www/schedule/api/week-info/index.php containing the following code: @@ -247,12 +246,12 @@ containing the following code: 7. Enable the new configuration and reload the Apache web server: - sudo a2ensite schedule - sudo /etc/init.d/apache2 reload + sudo a2ensite schedule + sudo /etc/init.d/apache2 reload 8. Create a cron job to run the shell script each minute: - sudo nano /etc/cron.d/libretime-schedule + sudo nano /etc/cron.d/libretime-schedule containing the line: diff --git a/docs/_docs/podcasts.md b/docs/_docs/podcasts.md index 80a574ef4..b5c2c7a26 100644 --- a/docs/_docs/podcasts.md +++ b/docs/_docs/podcasts.md @@ -6,7 +6,7 @@ category: interface The Podcasts page allows you add subscriptions to podcasts which are often used to syndicated audio files using a URL called a RSS feed. This allows your LibreTime instance to automatically download new shows from the web. -In order to add a podcast you need to get the RSS feed. All podcasts available on iTunes have a RSS feed but it is sometimes hidden. See this issue on our github page [#510](https://github.com/LibreTime/libretime/issues/510) for more information. RSS feeds that do not end in *.xml* may be accepted by LibreTime but might fail to download episodes; in that case, download the episode using a podcast client such as [gpodder](https://gpodder.github.io/) and then manually upload and schedule the episode. Podcast feeds coming from Anchor.fm have been known to have this issue. +In order to add a podcast you need to get the RSS feed. All podcasts available on iTunes have a RSS feed but it is sometimes hidden. See this issue on our github page [#510](https://github.com/LibreTime/libretime/issues/510) for more information. RSS feeds that do not end in _.xml_ may be accepted by LibreTime but might fail to download episodes; in that case, download the episode using a podcast client such as [gpodder](https://gpodder.github.io/) and then manually upload and schedule the episode. Podcast feeds coming from Anchor.fm have been known to have this issue. The podcast interfaces provides you with the ability to generate [Smartblocks](/docs/playlists) that can be used in conjunction with autoloading playlists to schedule the newest episode of a podcast without human intervention. @@ -21,7 +21,7 @@ The podcast interfaces provides you with the ability to generate [Smartblocks](/ The podcasts dashboard is similar to the tracks view, allowing you to add, edit, and remove podcasts by the toolbar, in addition to sorting by columns. -To add a podcast, click on the **+ Add** button on the toolbar and provide the podcast's RSS feed, which usually ends in *.xml*. +To add a podcast, click on the **+ Add** button on the toolbar and provide the podcast's RSS feed, which usually ends in _.xml_. Once the podcast's feed is recognized, the editor pane opens for the podcast. ### Editor @@ -33,4 +33,4 @@ A search box is available to search for episodes within the feed. - To import an episode directly into LibreTime, double-click on an episode or select it and click **+ Import**. The podcast will appear under tracks with the Podcast Name as the Album. - To delete an episode from LibreTime, select the episode and click on the red trash can on the toolbar. -- If you would like LibreTime to automatically download the latest episodes of a podcast, make sure *Download latest episodes* is checked. This can be used in conjunction with Smartblocks and Playlists to automate downloading and scheduling shows that are received via podcast feed. +- If you would like LibreTime to automatically download the latest episodes of a podcast, make sure _Download latest episodes_ is checked. This can be used in conjunction with Smartblocks and Playlists to automate downloading and scheduling shows that are received via podcast feed. diff --git a/docs/_docs/preparing-media.md b/docs/_docs/preparing-media.md index aa53cf824..1478842c9 100644 --- a/docs/_docs/preparing-media.md +++ b/docs/_docs/preparing-media.md @@ -10,20 +10,20 @@ Before uploading media to an LibreTime server, there are a number of factors whi LibreTime automatically imports any metadata that is in the files' ID3 tags. If these tags are incorrect or are missing information, you will have to either edit the metadata manually, or suffer the consequences. For example, if the files have creator or genre metadata missing, it will be impossible to search for, create playlists or generate smart blocks according to these criteria until you add it. -There are a number of programs available which can be used to correct mistakes or incomplete information in ID3 tags. You can use a music library manager (like Apple Music, Rhythmbox, or Windows Media Player) to edit ID3 tags as well, but you may be required to import the files into your library, which may not always be convenient. +There are a number of programs available which can be used to correct mistakes or incomplete information in ID3 tags. You can use a music library manager (like Apple Music, Rhythmbox, or Windows Media Player) to edit ID3 tags as well, but you may be required to import the files into your library, which may not always be convenient. - [TagScanner](https://www.xdlab.ru/en/) (Windows) - [Mp3tag](https://www.mp3tag.de/en/index.html) (Windows) - [MusicBrainz Picard](https://picard.musicbrainz.org/) (Mac, Windows, Linux) - [Ex Falso](http://code.google.com/p/quodlibet/) (Linux) -The *Tags From Path* feature of Ex Falso is a particularly useful time saver if you have a large archive of untagged files. Sometimes there is useful creator or title information in the file name or directory path structure, which can be converted into an ID3 tag automatically. +The _Tags From Path_ feature of Ex Falso is a particularly useful time saver if you have a large archive of untagged files. Sometimes there is useful creator or title information in the file name or directory path structure, which can be converted into an ID3 tag automatically. ![](/img/Screenshot175-Ex_Falso.png) ## Metadata in legacy character sets -LibreTime expects file tag metadata to be stored in the international *UTF-8* character set. Programs such as **Ex Falso** (described above) encode metadata in UTF-8 by default. If you have an archive of files encoded with metadata in a legacy character set, such as the Cyrillic encoding *Windows-1251*, you should convert these files before import. +LibreTime expects file tag metadata to be stored in the international _UTF-8_ character set. Programs such as **Ex Falso** (described above) encode metadata in UTF-8 by default. If you have an archive of files encoded with metadata in a legacy character set, such as the Cyrillic encoding _Windows-1251_, you should convert these files before import. The program **mid3iconv** (part of the **python-mutagen** package in Debian and Ubuntu) can be used to batch convert the metadata character set of files on the command line. You can install **python-mutagen** with the command: @@ -41,27 +41,27 @@ To actually convert all of the tags and strip any legacy ID3v1 tag present from The name of the original character set follows the **-e** option. Other legacy character sets that mid3iconv can convert to UTF-8 include: - KOI8-R: Russian - KOI8-U: Ukrainian + KOI8-R: Russian + KOI8-U: Ukrainian - GBK: Traditional Chinese - GB2312: Simplified Chinese + GBK: Traditional Chinese + GB2312: Simplified Chinese - EUC-KR: Korean - EUC-JP: Japanese + EUC-KR: Korean + EUC-JP: Japanese - CP1253: Greek - CP1254: Turkish - CP1255: Hebrew - CP1256: Arabic + CP1253: Greek + CP1254: Turkish + CP1255: Hebrew + CP1256: Arabic ## Audio loudness -On file ingest, LibreTime analyzes each Ogg Vorbis, MP3, AAC or FLAC file's loudness, and stores a *ReplayGain* value for that file in its database. At playout time, the ReplayGain value is provided to Liquidsoap so that gain can be automatically adjusted to provide an average output of -14 dBFS loudness (14 decibels below full scale). See for more details of ReplayGain. +On file ingest, LibreTime analyzes each Ogg Vorbis, MP3, AAC or FLAC file's loudness, and stores a _ReplayGain_ value for that file in its database. At playout time, the ReplayGain value is provided to Liquidsoap so that gain can be automatically adjusted to provide an average output of -14 dBFS loudness (14 decibels below full scale). See for more details of ReplayGain. Because of this automatic gain adjustment, any files with average loudness higher than -14 dBFS will not sound louder than quieter files at playout time, but the lower crest factor in the louder files (their relatively low peak-to-average ratio) may be apparent in the output, making those files sound less dynamic. This may be an issue for contemporary popular music, which can average at -9 dBFS or louder before ReplayGain adjustment. (See for a detailed analysis of the problem). -Your station's producers should therefore aim for 14dB between peak and average loudness to maintain the crest factor of their prepared material (also known as *DR14* on some dynamic range meters, such as the command-line DR14 T.meter available from ). If the producers are working to a different loudness standard, the ReplayGain modifier in LibreTime's Stream Settings page can be adjusted to suit their material. +Your station's producers should therefore aim for 14dB between peak and average loudness to maintain the crest factor of their prepared material (also known as _DR14_ on some dynamic range meters, such as the command-line DR14 T.meter available from ). If the producers are working to a different loudness standard, the ReplayGain modifier in LibreTime's Stream Settings page can be adjusted to suit their material. Large transient peaks in otherwise quiet files should be avoided, to guard against the need for peak limiting when ReplayGain is applied to those quieter files. @@ -85,7 +85,7 @@ And here is an example of a very loud file, with lower crest factor, where the o ----------+-------+-------+----------+------  -7.86 dB | 36592 |  0.40 |    14804 | Snoop_Dogg-Doggfather.ogg -In the output from vorbisgain, *Peak* is the maximum sample value of the file before any ReplayGain has been applied, where a value of 32,767 represents full scale when decoding to signed 16 bit samples. Note that lossy compressed files can have peaks greater than full scale, due to encoding artifacts. The *New Peak* value for the Snoop Dogg file may be relatively low due to the hard limiting used in the mastering of that piece of music. +In the output from vorbisgain, _Peak_ is the maximum sample value of the file before any ReplayGain has been applied, where a value of 32,767 represents full scale when decoding to signed 16 bit samples. Note that lossy compressed files can have peaks greater than full scale, due to encoding artifacts. The _New Peak_ value for the Snoop Dogg file may be relatively low due to the hard limiting used in the mastering of that piece of music. ## Silence in media files diff --git a/docs/_docs/reverse-proxy.md b/docs/_docs/reverse-proxy.md index 0104efb13..25dba180a 100644 --- a/docs/_docs/reverse-proxy.md +++ b/docs/_docs/reverse-proxy.md @@ -4,7 +4,6 @@ layout: article category: install --- - In some deployments, the LibreTime server is deployed behind a reverse proxy, for example in containerization use-cases such as Docker and LXC. LibreTime makes extensive use of its API for some site functionality, which causes diff --git a/docs/_docs/scheduling-shows.md b/docs/_docs/scheduling-shows.md index 48995fafb..bf988f72b 100644 --- a/docs/_docs/scheduling-shows.md +++ b/docs/_docs/scheduling-shows.md @@ -33,29 +33,29 @@ check the **Repeats?** box and fill out the repeat information. A description of are in the table below. Finially, click on the grey **+ Add this show** button at the top of the pane to add your show to the calendar. -| Field | Description | -|-------|-------| -| _What_ | | -| Name (Required) | The name of your show | -| URL | The URL of your show. Not used on the public page. | -| Genre | The genre of your show. Not used on the public page. | -| Description | Description of your show. Not used on the public page. | -| _When_ | | -| Start Time (Required) | The time and date the show starts. Note that the time element is in 24 hour time. If the **Now** option is selected, the show will be created to immediately start. | -| End Time (Required) | The time and date the show ends. Defaults to a time one hour after the start time, which can be seen in the **Duration** field, which is uneditable. | -| Repeats? | If checked, allows for options to schedule a repeated show. Shows can repeat weekly up to monthly in increments of one week and can be scheduled on multiple days of the same week. An end date can be set, otherwise the show can be deleted by clicking on its entry in the calendar and clicking Delete > Future Occurrences. If **Linked ?** is checked, the playlist scheduled for the next show will also play for all future shows. | -| _Autoloading Playlist_ | | -| Add Autoloading Playlist? | If checked, allows for the following options | -| Select Playlist | Select the playlist the show will autofill from (shows autofill exactly one hour before air). If you wish to use a smartblock you must add it to a playlist and then select that playlist. This can be used to auto-schedule new podcast episodes to air. | -| Repeat Playlist Until Show Is Full | If checked, the playlist will be added to the show multiple times until the slot is full. Useful for applying a one-hour music playlist made up of smartblocks to a two-hour show. | -| _Live Stream Input_ | | -| Use LibreTime/Custom Authentication | | -| Show Source | | -| _Who_ | | -| Search Users, DJs | Program Managers and Admins may assign DJs to a show, giving DJs access to schedule tracks for said show. DJs cannot create shows on their own. | -| _Style_ | | -| Background/Text Color | Set the color of the background and text of entries on the calendar. If not set, LibreTime will select contrasting colors for easy readability. | -| Show Logo | If desired, you can upload a show logo here. The logo does not appear on the public page. | +| Field | Description | +| ----------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| _What_ | | +| Name (Required) | The name of your show | +| URL | The URL of your show. Not used on the public page. | +| Genre | The genre of your show. Not used on the public page. | +| Description | Description of your show. Not used on the public page. | +| _When_ | | +| Start Time (Required) | The time and date the show starts. Note that the time element is in 24 hour time. If the **Now** option is selected, the show will be created to immediately start. | +| End Time (Required) | The time and date the show ends. Defaults to a time one hour after the start time, which can be seen in the **Duration** field, which is uneditable. | +| Repeats? | If checked, allows for options to schedule a repeated show. Shows can repeat weekly up to monthly in increments of one week and can be scheduled on multiple days of the same week. An end date can be set, otherwise the show can be deleted by clicking on its entry in the calendar and clicking Delete > Future Occurrences. If **Linked ?** is checked, the playlist scheduled for the next show will also play for all future shows. | +| _Autoloading Playlist_ | | +| Add Autoloading Playlist? | If checked, allows for the following options | +| Select Playlist | Select the playlist the show will autofill from (shows autofill exactly one hour before air). If you wish to use a smartblock you must add it to a playlist and then select that playlist. This can be used to auto-schedule new podcast episodes to air. | +| Repeat Playlist Until Show Is Full | If checked, the playlist will be added to the show multiple times until the slot is full. Useful for applying a one-hour music playlist made up of smartblocks to a two-hour show. | +| _Live Stream Input_ | | +| Use LibreTime/Custom Authentication | | +| Show Source | | +| _Who_ | | +| Search Users, DJs | Program Managers and Admins may assign DJs to a show, giving DJs access to schedule tracks for said show. DJs cannot create shows on their own. | +| _Style_ | | +| Background/Text Color | Set the color of the background and text of entries on the calendar. If not set, LibreTime will select contrasting colors for easy readability. | +| Show Logo | If desired, you can upload a show logo here. The logo does not appear on the public page. | Once your show is created, click on it to open its context menu. Select **Schedule Tracks** to open the track scheduler. @@ -80,4 +80,3 @@ When media is playing, the **On Air** indicator at the top will turn red. You can listen to your stream by going to `yourserverIP:8000` or by clicking the **Listen** button under the On Air indicator. - diff --git a/docs/_docs/set-server-time.md b/docs/_docs/set-server-time.md index b96c45e63..0ef1379f8 100644 --- a/docs/_docs/set-server-time.md +++ b/docs/_docs/set-server-time.md @@ -7,7 +7,7 @@ category: install Accurate time keeping on your server is vital for LibreTime performance. You can confirm that the date and time of your server are set correctly with the `date` command. The server should respond with the date, time, time zone and year in a format similar to the following example: - Tue Jul 2 15:08:57 BST 2013 + Tue Jul 2 15:08:57 BST 2013 If the time on your server is wrong, it is strongly recommended that you take LibreTime off-air until the problem is fixed. @@ -21,7 +21,7 @@ Optionally, open the **ntp** configuration file in the **nano** editor to add fu sudo nano /etc/ntp.conf -On Ubuntu GNU/Linux, the default time server is *ntp.ubuntu.com*, but there are many other time servers available on the public Internet, including the group of servers listed at for each country. Using a variety of NTP servers located closely to your LibreTime server should produce the most accurate results. For example, for a server in the United Kingdom you could use the following list: +On Ubuntu GNU/Linux, the default time server is _ntp.ubuntu.com_, but there are many other time servers available on the public Internet, including the group of servers listed at for each country. Using a variety of NTP servers located closely to your LibreTime server should produce the most accurate results. For example, for a server in the United Kingdom you could use the following list: # You do need to talk to an NTP server or two (or three). server ntp.ubuntu.com @@ -30,7 +30,7 @@ On Ubuntu GNU/Linux, the default time server is *ntp.ubuntu.com*, but there are server 2.uk.pool.ntp.org server 3.uk.pool.ntp.org -Enter the server names you require, press **Ctrl+O** to write out the */etc/ntp.conf* file, then **Ctrl+X** to exit **nano**. Restart the **ntp** service with: +Enter the server names you require, press **Ctrl+O** to write out the _/etc/ntp.conf_ file, then **Ctrl+X** to exit **nano**. Restart the **ntp** service with: sudo invoke-rc.d ntp restart @@ -52,7 +52,7 @@ Then use the **ntpq -p** command to confirm that **ntp** is working. This comman ### Adjusting the server time zone -The data centre which hosts your LibreTime server could be located anywhere in the world. Some servers are set to *Coordinated Universal Time* or UTC (similar to *Greenwich Mean Time* or GMT), regardless of their location. LibreTime uses UTC time in its database for scheduling purposes, independent of the server time zone. +The data centre which hosts your LibreTime server could be located anywhere in the world. Some servers are set to _Coordinated Universal Time_ or UTC (similar to _Greenwich Mean Time_ or GMT), regardless of their location. LibreTime uses UTC time in its database for scheduling purposes, independent of the server time zone. If the server time zone is not appropriate for integration with your station's other systems, on a Debian or Ubuntu server you can reconfigure the **tzdata** (time zone data) package with the command: diff --git a/docs/_docs/settings.md b/docs/_docs/settings.md index 0cdc2af94..e1ad692bc 100644 --- a/docs/_docs/settings.md +++ b/docs/_docs/settings.md @@ -4,7 +4,6 @@ title: Settings category: admin --- - ## General Settings ![](/img/station-info-settings.png) @@ -19,7 +18,7 @@ Description** and **Station Logo** here. The **Default Interface Language** drop-down menu sets the default localization for your LibreTime instance, and the **Station Timezone** drop-down menu can be used to display local time at your station. LibreTime stores show times -internally in UTC format (similar to *Greenwich Mean Time*), but displays local +internally in UTC format (similar to _Greenwich Mean Time_), but displays local time for the convenience of your station staff. You can also set the day of the week that you wish to start your station's weekly schedule on, which defaults to Sunday. @@ -29,7 +28,7 @@ The **Track Type Default** enables you to select a track type default for upload Initially, the **Default Fade In** and **Default Fade Out** times for automated fades are set to half a second, and the **Default Crossfade Duration** is set to zero seconds. Custom fade and crossfade times can be set for adjacent items in a -playlist or static smart block. See the chapter *Library* for details. +playlist or static smart block. See the chapter _Library_ for details. The **Intro Autoloading Playlist** enables you to select a playlist that will be scheduled at the beginning of every show that has enabled an autoloading @@ -60,8 +59,8 @@ refactors. You can switch back at any time. You can enable live, read-only access to the LibreTime schedule calendar for your station's public website with the **Public LibreTime API** option, if you wish. (There is more about this feature in the -[*Exporting the schedule*](/docs/playout-history) chapter, in the -*Advanced Configuration* section of this book). +[_Exporting the schedule_](/docs/playout-history) chapter, in the +_Advanced Configuration_ section of this book). The **Allowed CORS URLs** is intended to deal with situations where you want a remote site with a different domain to access the API. This is relevant when @@ -90,7 +89,7 @@ Individual LibreTime users can choose another interface localization when they log in, or set personal preferences for localization and time zone by clicking their username on the right side of the menu bar. ----- +--- ## Track Types {#types} @@ -105,7 +104,7 @@ their username on the right side of the menu bar. 1. On the "Visibility" drop down menu, choose to enable or disable the track type. By default, it is enabled. If disabled, it won't be shown across Libretime or in the API for developers. 1. Click **Save**. ----- +--- ## Stream Settings @@ -115,11 +114,11 @@ their username on the right side of the menu bar. You can configure direct Icecast and SHOUTcast streams and sound card output by clicking **Streams** on the **System** menu. -At the top left of the **Stream Settings** page are global settings including **Hardware Audio Output**, which enables playout from the default sound card on the server, if one is fitted. The default **Output Type** of *ALSA* on the drop-down menu will be suitable for most servers with a sound card. If not, you have the option to choose from other Liquidsoap interfaces available, such as *OSS* or *PortAudio*. +At the top left of the **Stream Settings** page are global settings including **Hardware Audio Output**, which enables playout from the default sound card on the server, if one is fitted. The default **Output Type** of _ALSA_ on the drop-down menu will be suitable for most servers with a sound card. If not, you have the option to choose from other Liquidsoap interfaces available, such as _OSS_ or _PortAudio_. The second checkbox under Global Settings enables the sending of **Icecast Vorbis Metadata** with direct streams. This setting is optional, because some media players have a bug which makes them disconnect from Ogg Vorbis streams when an Icecast server notifies the player that a new track is starting. -The **Stream Label** radio button allows you to set the metadata that will be sent with direct streams; *Artist* and *Title*, *Show*, *Artist* and *Title*, or *Station name* and *Show name*. +The **Stream Label** radio button allows you to set the metadata that will be sent with direct streams; _Artist_ and _Title_, _Show_, _Artist_ and _Title_, or _Station name_ and _Show name_. The **Off Air Metadata** field configures the text that will be sent to any configured streaming servers, and from there on to media players, when Airtime is not streaming any output. @@ -137,7 +136,7 @@ Airtime supports two types of live input stream; the **Show Source**, which enab The **Auto Switch Off** and **Auto Switch On** checkboxes enable playout to be switched automatically to the highest priority source whenever an authenticated input source disconnects from or connects to Airtime, respectively. The field **Switch Transition Fade** sets the length of the audio fade as scheduled playout is switched to a remote input source, and back. -Each type of input stream requires a username and password before the remote broadcaster can connect to Airtime. The **Master Username** and **Master Password** can be set in the Input Stream Settings box, while the authentication for individual Show Sources is set up in Airtime's schedule calendar. See the *Calendar* chapter for details. +Each type of input stream requires a username and password before the remote broadcaster can connect to Airtime. The **Master Username** and **Master Password** can be set in the Input Stream Settings box, while the authentication for individual Show Sources is set up in Airtime's schedule calendar. See the _Calendar_ chapter for details. Input streams must have a **Port** for the remote broadcaster to connect to, which should be a number in the range from 1024 to 49151. If you have the Icecast or SHOUTcast streaming server running on the same machine as Airtime, you should avoid using port 8000 or 8001 for either type of Airtime input stream. This is because both Icecast and SHOUTcast use port 8000, and SHOUTcast also uses port 8001. If the usernames and passwords were similar, remote broadcasters might accidentally connect to the streaming server directly, bypassing Airtime. @@ -155,7 +154,7 @@ If you have checked the **Auto Switch On** box in the Stream Settings page, the ![](/img/libretime-show-source-stream.png) -If you have the **Auto Switch Off** box checked LibreTime will resume scheduled playback whenever a stream disconnects. Otherwise you will need to slide to disable a source after a DJ stops streaming. +If you have the **Auto Switch Off** box checked LibreTime will resume scheduled playback whenever a stream disconnects. Otherwise you will need to slide to disable a source after a DJ stops streaming. You can also force disconnection of a live remote source, for example when the remote input source has crashed and is no longer sending audio data, click the **X** icon to the left of the source name. @@ -167,11 +166,10 @@ On the right side of the page, you can configure up to three independent output To configure another stream, click the bar with the stream number to expand its box, and make sure **Enabled** is checked. Enter at least the streaming **Server** IP address or domain name, and **Port** details. The default port for Icecast and SHOUTcast servers is 8000. -Click **Additional Options** to expand a box in which you can enter the usernames, passwords and metadata to send to the streaming server. The default **Username** for Icecast servers is *source*, and if this the name in use on your streaming server, you can leave this field empty. The **Admin User** and **Admin Password** settings are optional, and are used to query the streaming server for audience numbers by the **Listener Stats** page on the **System** menu. +Click **Additional Options** to expand a box in which you can enter the usernames, passwords and metadata to send to the streaming server. The default **Username** for Icecast servers is _source_, and if this the name in use on your streaming server, you can leave this field empty. The **Admin User** and **Admin Password** settings are optional, and are used to query the streaming server for audience numbers by the **Listener Stats** page on the **System** menu. You can also set the specific **Mount Point** that listeners will connect to here. Then click one of the **Save** buttons in the upper or lower right corner of the page to update the Airtime server's settings. Airtime supports output to Icecast in Ogg Vorbis, Ogg Opus, MP3 and AAC formats. When selecting a SHOUTcast server from the **Service Type** drop-down menu, you are restricted to using MP3 or AAC formats only, so the choice of Ogg Vorbis and Opus formats is greyed out in the **Stream Type** drop-down menu. The SHOUTcast username for stream sources is fixed, so you do not need to enter this value under **Additional Options**, but you will usually have to enter a password. -Any connection problems between Liquidsoap and Icecast or SHOUTcast are shown on the Stream Settings page. For example, if you enter the wrong password, you will see an *Authentication Required* error message. To fix this, enter the correct password in the **Additional Options** box, and click the **Save** button. If the streaming server is down for any reason, or you have entered an incorrect **Server** name or **Port** number, you will see the message *Can not connect to the streaming server*. - +Any connection problems between Liquidsoap and Icecast or SHOUTcast are shown on the Stream Settings page. For example, if you enter the wrong password, you will see an _Authentication Required_ error message. To fix this, enter the correct password in the **Additional Options** box, and click the **Save** button. If the streaming server is down for any reason, or you have entered an incorrect **Server** name or **Port** number, you will see the message _Can not connect to the streaming server_. diff --git a/docs/_docs/ssl.md b/docs/_docs/ssl.md index 255a371e4..a0e336547 100644 --- a/docs/_docs/ssl.md +++ b/docs/_docs/ssl.md @@ -10,6 +10,7 @@ To increase the security of your server, you can enable encrypted access to the One of the fastest, easiest, and cheapest ways to get an SSL certificate is through [Certbot](https://certbot.eff.org/), as created by the Electronic Frontier Foundation. There are some requirements for this process: + - you have an HTTP website (already installed and configured by default by the LibreTime installer) and - this website is open to the public internet (likely via. port forwarding if your computer is behind a firewall) and - the server is accessible to the public via. port 80 @@ -45,11 +46,11 @@ Head to your server's IP address to check to see that the installation worked. ### Deploying a self-signed certificate -The Debian/Ubuntu package *ssl-cert* creates a *snakeoil* certificate and key based on your server's hostname. This gratis certificate and key pair created under the */etc/ssl/certs*/ and */etc/ssl/private/* directories will not be recognised by users' browsers without manual intervention. You can install the *ssl-cert* package with the command: +The Debian/Ubuntu package _ssl-cert_ creates a _snakeoil_ certificate and key based on your server's hostname. This gratis certificate and key pair created under the _/etc/ssl/certs_/ and _/etc/ssl/private/_ directories will not be recognised by users' browsers without manual intervention. You can install the _ssl-cert_ package with the command: sudo apt-get install ssl-cert -If the hostname of your server does not match the domain name you intend to use with the LibreTime virtual host, the user's browser will present an additional security warning. You can set the domain name of the certificate by editing the file */usr/share/ssl-cert/ssleay.cnf* to replace the *@HostName@* variable: +If the hostname of your server does not match the domain name you intend to use with the LibreTime virtual host, the user's browser will present an additional security warning. You can set the domain name of the certificate by editing the file _/usr/share/ssl-cert/ssleay.cnf_ to replace the _@HostName@_ variable: commonName = @HostName@ @@ -69,9 +70,9 @@ Next, edit the virtual host configuration for your LibreTime server to include a sudo nano /etc/apache2/sites-available/airtime-vhost.conf -Using the following configuration for Apache 2.2 as a guide, replace *airtime.example.com* with the name of your server and *admin@example.com* with your email address. The older SSLv2 and SSLv3 protocols and SSL compression should be disabled, as they are generally believed to be insecure. You may wish to create a *ServerAlias* for users to access the administration interface over https:// if required. +Using the following configuration for Apache 2.2 as a guide, replace _airtime.example.com_ with the name of your server and *admin@example.com* with your email address. The older SSLv2 and SSLv3 protocols and SSL compression should be disabled, as they are generally believed to be insecure. You may wish to create a _ServerAlias_ for users to access the administration interface over https:// if required. -On port 80, Apache's *alias* module is used to set a *Redirect permanent* for the login page. Optionally, access could be denied to all sites except *localhost* and any other LibreTime servers on your network, so that unencrypted communication between LibreTime components can continue. +On port 80, Apache's _alias_ module is used to set a _Redirect permanent_ for the login page. Optionally, access could be denied to all sites except _localhost_ and any other LibreTime servers on your network, so that unencrypted communication between LibreTime components can continue. ``` @@ -114,7 +115,7 @@ On port 80, Apache's *alias* module is used to set a *Redirect permanent* for th Order allow,deny Allow from all - + ``` Save the file with **Ctrl+O** and exit the **nano** editor with **Ctrl+X**. Then restart Apache with the command: @@ -129,7 +130,7 @@ The first time you access an LibreTime server with a self-signed certificate ove ![](/img/Screenshot547-connection_untrusted.png) -On the next page in Firefox, click the **Get Certificate** button to inspect the details of the self-signed certificate. If all is well, click the **Confirm Security Exception** button. You should now be able to proceed to the https:// login page.   +On the next page in Firefox, click the **Get Certificate** button to inspect the details of the self-signed certificate. If all is well, click the **Confirm Security Exception** button. You should now be able to proceed to the https:// login page. ![](/img/Screenshot548-confirm_exception.png) diff --git a/docs/_docs/status.md b/docs/_docs/status.md index 9b35d669b..5ebc5b57e 100644 --- a/docs/_docs/status.md +++ b/docs/_docs/status.md @@ -14,5 +14,5 @@ If any of the check mark icons in the **Status** column have changed to a red wa administrator for assistance. (The chapter [Troubleshooting](/docs/troubleshooting) contains some tips). LibreTime will do its best to restart any failing services, but sometimes manual intervention may be required; for example, in the case of hardware failure. -If you have run out of storage space, a LibreTime user with *admin* privileges could log in and delete media files +If you have run out of storage space, a LibreTime user with _admin_ privileges could log in and delete media files that are no longer required from the **Library**. Alternatively, you could ask your system administrator to install additional storage capacity. diff --git a/docs/_docs/troubleshooting.md b/docs/_docs/troubleshooting.md index fc70e1199..4f98809c3 100644 --- a/docs/_docs/troubleshooting.md +++ b/docs/_docs/troubleshooting.md @@ -4,16 +4,16 @@ title: Troubleshooting category: admin --- -Is something not working for your Libretime installation? Here's a quick guide to help you +Is something not working for your Libretime installation? Here's a quick guide to help you troubleshoot most issues you'll run into. ## 1. Let's check the basics Is your server on? (We hate to ask.) Is it connected to the internet? Is it connected to your -broadcast console or mixer if being used for soundcard output? If you're using a cloud host, +broadcast console or mixer if being used for soundcard output? If you're using a cloud host, does your cloud provider's status page indicate any system outages? -Once you know your physical (or virtual) system is functional, was a show scheduled for the +Once you know your physical (or virtual) system is functional, was a show scheduled for the current time with tracks or an autoplaylist scheduled? ## 2. Are all services working? @@ -23,8 +23,8 @@ A fully working server should have green checkmarks next to all services. ![](/img/Screenshot521-System_status_240.png) -If one of the services isn't working, text will display with a terminal command to restart the service -or get status information for a particular service. For example (for Ubuntu 18.04), the following +If one of the services isn't working, text will display with a terminal command to restart the service +or get status information for a particular service. For example (for Ubuntu 18.04), the following commands would restart or check the status of Libretime's Liquidsoap instance, respectively. ``` @@ -38,7 +38,7 @@ If the service isn't wanting to restart, look at its status for clues as to why ## 3. Known problems -If you have one of these issues, please try to resolve it with the instructions below before moving on in the +If you have one of these issues, please try to resolve it with the instructions below before moving on in the troubleshooting checklist. - **Streaming player on Microsite and Listen player on Dashboard not working?** The problem could be caused by a bug in writing to the database during the setup wizard. This can be fixed by going to **Settings** -> **Stream Settings** and toggling the **Default Streaming** and **Custom/ 3rd Party Streaming** option, accepting the popup dialogues, and clicking **Save** at the top of the settings page. @@ -53,7 +53,7 @@ Our main documentation listing is [here](/docs) and can be searched [here](/sear ## 5. Reach out to the developers -Libretime is still in active development, meaning bugs and issues are expected to pop up every so often. -See if an issue is still open by looking at our [Issues page](https://github.com/LibreTime/libretime/issues). -If you don't get the help you need, please [open an issue](https://github.com/LibreTime/libretime/issues/new/choose) +Libretime is still in active development, meaning bugs and issues are expected to pop up every so often. +See if an issue is still open by looking at our [Issues page](https://github.com/LibreTime/libretime/issues). +If you don't get the help you need, please [open an issue](https://github.com/LibreTime/libretime/issues/new/choose) so we can take a look at it. diff --git a/docs/_docs/upgrading.md b/docs/_docs/upgrading.md index bf80c1d36..d5dd8304d 100644 --- a/docs/_docs/upgrading.md +++ b/docs/_docs/upgrading.md @@ -19,14 +19,13 @@ of dot separated identifiers immediately following the patch version. This pre-r that the version is unstable in a sense that it might contain incomplete features or not satisfy the intended compatibility requirements as per semver. -## Upgrading +## Upgrading > After your LibreTime server has been deployed for a few years, you may need to -upgrade the GNU/Linux distribution that it runs in order to maintain security -update support. If the upgrade does not go smoothly, it may cause significant -downtime, so you should always have a fallback system available during the -upgrade to ensure broadcast continuity. - +> upgrade the GNU/Linux distribution that it runs in order to maintain security +> update support. If the upgrade does not go smoothly, it may cause significant +> downtime, so you should always have a fallback system available during the +> upgrade to ensure broadcast continuity. Before upgrading a production LibreTime server, you should back up both the PostgreSQL database and the storage server used by LibreTime. This is especially important if you have not already @@ -34,14 +33,14 @@ set up a regular back up routine. This extra back up is a safety measure in case during the upgrade, for example due to the wrong command being entered when moving files. See [Backing up the server](/docs/backing-up-the-server) in this manual for details of how to perform these back ups. -The LibreTime [installation script](/install) will detect an existing LibreTime or Airtime deployment and back up any configuration files that it finds. We recommend taking your own manual backups of the configuration yourself nevertheless. The install script also tries to restart the needed services during an upgrade. In any case you should monitor if this happened and also take a quick look at the logs files to be sure everything is still fine. Now might be the time to reboot the system or virtual machine LibreTime is running on since regular reboots are part of a healthy system anyway. +The LibreTime [installation script](/install) will detect an existing LibreTime or Airtime deployment and back up any configuration files that it finds. We recommend taking your own manual backups of the configuration yourself nevertheless. The install script also tries to restart the needed services during an upgrade. In any case you should monitor if this happened and also take a quick look at the logs files to be sure everything is still fine. Now might be the time to reboot the system or virtual machine LibreTime is running on since regular reboots are part of a healthy system anyway. -After the upgrade has completed, you may need to clear your web browser's cache before logging into the new version of the LibreTime administration interface. If the playout engine starts up and detects that a show should be playing at the current time, it will skip to the correct point in the current item and start playing. +After the upgrade has completed, you may need to clear your web browser's cache before logging into the new version of the LibreTime administration interface. If the playout engine starts up and detects that a show should be playing at the current time, it will skip to the correct point in the current item and start playing. There will be tested ways to switch from a LibreTime pre-release to a packaged version of LibreTime. Airtime 2.5.x versions support upgrading from version 2.3.0 and above. If you are running a production server with a version of Airtime prior to 2.3.0, you should -upgrade it to version 2.3.0 before continuing.  +upgrade it to version 2.3.0 before continuing. -> **Note:** Airtime's *linked files* and *watched folders* features currently do not work in Libretime. +> **Note:** Airtime's _linked files_ and _watched folders_ features currently do not work in Libretime. diff --git a/docs/_docs/users.md b/docs/_docs/users.md index f198e3f7b..043956bd4 100644 --- a/docs/_docs/users.md +++ b/docs/_docs/users.md @@ -5,12 +5,12 @@ category: interface --- > Note: if your Libretime server is accessible from the public Internet (ex. being hosted in a cloud VM) -it is strongly recommended to create a second administrator account with a secure password and then -delete the `admin` account. +> it is strongly recommended to create a second administrator account with a secure password and then +> delete the `admin` account. ## User Account Types -To add further user accounts to the system, one for each of your station staff that need access to Airtime, click the **New User** button with the plus icon. Enter a user name, password and contact details, and then select the **User Type** from the drop down menu, which can be *Admin*, *Program Manager*, *DJ*, or *Guest*. The difference between these user types is: +To add further user accounts to the system, one for each of your station staff that need access to Airtime, click the **New User** button with the plus icon. Enter a user name, password and contact details, and then select the **User Type** from the drop down menu, which can be _Admin_, _Program Manager_, _DJ_, or _Guest_. The difference between these user types is: **Guests** @@ -58,4 +58,3 @@ side of its row in the table. You cannot delete your own user account, and usern Users can update their own password, and their contact, language and time zone details, by clicking their username on the right side of the main menu bar, next to the **Logout** link. - diff --git a/docs/_docs/vagrant.md b/docs/_docs/vagrant.md index 0239366b9..e3c82e9b5 100644 --- a/docs/_docs/vagrant.md +++ b/docs/_docs/vagrant.md @@ -91,13 +91,13 @@ directory. With the above instructions LibreTime is installed on Ubuntu Xenial Xerus. The Vagrant setup offers the option to choose a different operation system according to you needs. -| OS | Command | Comment | -| ------ | ------------------- | ------- | -| Debian 10 | `vagrant up debian-buster` | Install on Debian Buster. | -| Debian 9 | `vagrant up debian-stretch` | Install on current Debian Stretch. | -| Ubuntu 18.04 | `vagrant up ubuntu-bionic` | Install on current Ubuntu Bionic Beaver. | -| Ubuntu 16.04 | `vagrant up ubuntu-xenial` | Install on Ubuntu Xenial Xerus. | -| CentOS | `vagrant up centos` | CentOS 8 with native systemd support and activated SELinux. | +| OS | Command | Comment | +| ------------ | --------------------------- | ----------------------------------------------------------- | +| Debian 10 | `vagrant up debian-buster` | Install on Debian Buster. | +| Debian 9 | `vagrant up debian-stretch` | Install on current Debian Stretch. | +| Ubuntu 18.04 | `vagrant up ubuntu-bionic` | Install on current Ubuntu Bionic Beaver. | +| Ubuntu 16.04 | `vagrant up ubuntu-xenial` | Install on Ubuntu Xenial Xerus. | +| CentOS | `vagrant up centos` | CentOS 8 with native systemd support and activated SELinux. | ## Troubleshooting diff --git a/docs/_docs/webstreams.md b/docs/_docs/webstreams.md index 7f21b27bc..30e28e386 100644 --- a/docs/_docs/webstreams.md +++ b/docs/_docs/webstreams.md @@ -4,18 +4,18 @@ layout: article category: interface --- - ### Adding a webstream -A web stream URL and metadata can be added to the LibreTime library, so that a remote stream can be searched for and scheduled to be *pulled* into a show. For example, at the top of the hour your station may pull a news report from journalists working in another studio. This is a different concept from **Master Source** and **Show Source** remote streams which are *pushed* into the LibreTime playout schedule. + +A web stream URL and metadata can be added to the LibreTime library, so that a remote stream can be searched for and scheduled to be _pulled_ into a show. For example, at the top of the hour your station may pull a news report from journalists working in another studio. This is a different concept from **Master Source** and **Show Source** remote streams which are _pushed_ into the LibreTime playout schedule. To add a web stream, click the **+ New** button on the left side of the Webstreams page. Like a playlist, web streams in the Library can have a title and **Description**, which may help you find them in searches later. ![](/img/webstream.jpg) -The **Stream URL** setting must include the *port number* (such as 8000) and *mount point* (such as remote\_stream) of the remote stream, in addition to the streaming server name. A **Default Length** for the remote stream can also be set. If the stream is added at the end of a show which becomes overbooked as a result, it will be faded out when the show ends. +The **Stream URL** setting must include the _port number_ (such as 8000) and _mount point_ (such as remote_stream) of the remote stream, in addition to the streaming server name. A **Default Length** for the remote stream can also be set. If the stream is added at the end of a show which becomes overbooked as a result, it will be faded out when the show ends. Note: LibreTime checks the remote webstream's status upon editing stream settings, so an offline stream will result in an error. There are many tools such as [BUTT](https://danielnoethen.de/butt/) and [MIXXX](https://www.mixxx.org) that can be used to send a test stream to LibreTime can save it; read more [here](/docs/live-broadcast). diff --git a/docs/_docs/widgets.md b/docs/_docs/widgets.md index 0b7be1cd8..4c3e9cf4f 100644 --- a/docs/_docs/widgets.md +++ b/docs/_docs/widgets.md @@ -12,7 +12,7 @@ Before using the widgets, make sure Libretime's Public API is enabled in **Setti ![](/img/widgets_settings.png) -> **Note:** Your Libretime instance needs to be accessible to the public *without the use of a VPN or SSH tunneling* in order for the widgets to work. +> **Note:** Your Libretime instance needs to be accessible to the public _without the use of a VPN or SSH tunneling_ in order for the widgets to work. ## Streaming Player Widget @@ -28,4 +28,4 @@ From **Widgets** > **Player**, enter a title for your streaming widget and selec ![](/img/widgets_schedule.png) -The show schedule widget displays the upcoming shows for the next seven days. There are no customizable settings for this widget. \ No newline at end of file +The show schedule widget displays the upcoming shows for the next seven days. There are no customizable settings for this widget. diff --git a/docs/api/openapi.yaml b/docs/api/openapi.yaml index 6e49f49db..0896d12f8 100644 --- a/docs/api/openapi.yaml +++ b/docs/api/openapi.yaml @@ -20,9 +20,9 @@ paths: while interval will return shows in the next 48 hours schema: enum: - - 'endofday' - - 'interval' - default: 'interval' + - "endofday" + - "interval" + default: "interval" required: false - name: limit in: path @@ -32,269 +32,278 @@ paths: default: 5 required: false responses: - '200': + "200": description: 200 response for default request content: application/json: - example: { - "env": "production", - "schedulerTime": "2019-10-21 17:52:45", - "previous": { - "starts": "2019-10-21 17:47:25.000000", - "ends": "2019-10-21 17:52:13.000000", - "type": "track", - "name": "Disclosure - F For You (feat. Mary J. Blige)", - "metadata": { - "id": 8, - "name": "", - "mime": "audio/mp3", - "ftype": "audioclip", - "directory": 1, - "filepath": "imported/1/Disclosure/www.mmibty.com/01-F-For-You-feat.-Mary-J.-Blige.mp3", - "import_status": 0, - "currentlyaccessing": 0, - "editedby": null, - "mtime": "2019-10-21 17:19:03", - "utime": "2019-10-21 17:18:57", - "lptime": "2019-10-21 17:47:25", - "md5": "e008616551750aea49820a16d1fb1527", - "track_title": "F For You (feat. Mary J. Blige)", - "artist_name": "Disclosure", - "bit_rate": 251628, - "sample_rate": 44100, - "format": null, - "length": "00:04:48.026122", - "album_title": "www.mmibty.com", - "genre": "Electronic", - "comments": null, - "year": "2014", - "track_number": 1, - "channels": 2, - "url": null, - "bpm": null, - "rating": null, - "encoded_by": null, - "disc_number": null, - "mood": null, - "label": null, - "composer": null, - "encoder": null, - "checksum": null, - "lyrics": null, - "orchestra": null, - "conductor": null, - "lyricist": null, - "original_lyricist": null, - "radio_station_name": null, - "info_url": null, - "artist_url": null, - "audio_source_url": null, - "radio_station_url": null, - "buy_this_url": null, - "isrc_number": null, - "catalog_number": null, - "original_artist": null, - "copyright": null, - "report_datetime": null, - "report_location": null, - "report_organization": null, - "subject": null, - "contributor": null, - "language": null, - "replay_gain": "-5.58", - "owner_id": 1, - "cuein": "00:00:00", - "cueout": "00:04:48.026122", - "hidden": false, - "filesize": 9271626, - "description": null, - "artwork": "imported/1/artwork/01-F-For-You-feat.-Mary-J.-Blige", - "artwork_url": "http://localhost:8080/api/track?id=8&return=artwork" - } - }, - "current": { - "starts": "2019-10-21 17:52:13", - "ends": "2019-10-21 17:56:27", - "type": "track", - "name": "Armin van Buuren - Ping Pong", - "media_item_played": true, - "metadata": { - "id": 2, - "name": "", - "mime": "audio/mp3", - "ftype": "audioclip", - "directory": 1, - "filepath": "imported/1/Armin van Buuren/A State of Trance 2014/2-18 Armin van Buuren - Ping Pong.mp3", - "import_status": 0, - "currentlyaccessing": 0, - "editedby": null, - "mtime": "2019-10-21 17:18:02", - "utime": "2019-10-21 17:18:00", - "lptime": "2019-10-21 17:52:13", - "md5": "04c26823902065db0706d121d0e703a2", - "track_title": "Ping Pong", - "artist_name": "Armin van Buuren", - "bit_rate": 32000, - "sample_rate": 44100, - "format": null, - "length": "00:04:14.171429", - "album_title": "A State of Trance 2014", - "genre": "Trance;Electronic;Dance", - "comments": null, - "year": "2014", - "track_number": 18, - "channels": 2, - "url": null, - "bpm": null, - "rating": null, - "encoded_by": null, - "disc_number": null, - "mood": null, - "label": "Armada Music", - "composer": null, - "encoder": null, - "checksum": null, - "lyrics": null, - "orchestra": null, - "conductor": null, - "lyricist": null, - "original_lyricist": null, - "radio_station_name": null, - "info_url": null, - "artist_url": null, - "audio_source_url": null, - "radio_station_url": null, - "buy_this_url": null, - "isrc_number": null, - "catalog_number": null, - "original_artist": null, - "copyright": null, - "report_datetime": null, - "report_location": null, - "report_organization": null, - "subject": null, - "contributor": null, - "language": null, - "replay_gain": "-5.07", - "owner_id": 1, - "cuein": "00:00:00", - "cueout": "00:04:14.171429", - "hidden": false, - "filesize": 6136238, - "description": null, - "artwork": "imported/1/artwork/2-18 Armin van Buuren - Ping Pong", - "artwork_url": "http://localhost:8080/api/track?id=2&return=artwork" - }, - "record": "0" - }, - "next": { - "starts": "2019-10-21 17:56:27.000000", - "ends": "2019-10-21 18:00:28.000000", - "type": "track", - "name": "Bastille - No Angels (feat. Ella)", - "metadata": { - "id": 4, - "name": "", - "mime": "audio/mp3", - "ftype": "audioclip", - "directory": 1, - "filepath": "imported/1/Bastille/Other People's Heartache, Pt. 2/03 Bastille - No Angels (feat. Ella).mp3", - "import_status": 0, - "currentlyaccessing": 0, - "editedby": null, - "mtime": "2019-10-21 17:18:16", - "utime": "2019-10-21 17:18:14", - "lptime": "2019-10-21 17:24:46", - "md5": "87bf83451d7618eefc0141c262aead2a", - "track_title": "No Angels (feat. Ella)", - "artist_name": "Bastille", - "bit_rate": 128000, - "sample_rate": 44100, - "format": null, - "length": "00:04:00.752438", - "album_title": "Other People's Heartache, Pt. 2", - "genre": null, - "comments": null, - "year": "2012", - "track_number": 3, - "channels": 2, - "url": null, - "bpm": null, - "rating": null, - "encoded_by": null, - "disc_number": null, - "mood": null, - "label": "[no label]", - "composer": null, - "encoder": null, - "checksum": null, - "lyrics": null, - "orchestra": null, - "conductor": null, - "lyricist": null, - "original_lyricist": null, - "radio_station_name": null, - "info_url": null, - "artist_url": null, - "audio_source_url": null, - "radio_station_url": null, - "buy_this_url": null, - "isrc_number": null, - "catalog_number": null, - "original_artist": null, - "copyright": null, - "report_datetime": null, - "report_location": null, - "report_organization": null, - "subject": null, - "contributor": null, - "language": null, - "replay_gain": "-8.57", - "owner_id": 1, - "cuein": "00:00:00", - "cueout": "00:04:00.752438", - "hidden": false, - "filesize": 3858688, - "description": null, - "artwork": "" - } - }, - "currentShow": [ - { - "start_timestamp": "2019-10-21 17:20:00", - "end_timestamp": "2019-10-21 18:31:00", - "name": "Show 1", - "description": "A show", - "id": 1, - "instance_id": 1, - "record": 0, - "url": "https://example.com", - "image_path": "", - "starts": "2019-10-21 17:20:00", - "ends": "2019-10-21 18:31:00" - } - ], - "nextShow": [ - { - "id": 2, - "instance_id": 2, - "name": "Reading", - "description": "A reading of After the EMP by Harley Tate", - "url": "https://example.com", - "start_timestamp": "2019-10-21 18:31:00", - "end_timestamp": "2019-10-22 10:45:00", - "starts": "2019-10-21 18:31:00", - "ends": "2019-10-22 10:45:00", - "record": 0, - "image_path": "", - "type": "show" - } - ], - "source_enabled": "Scheduled", - "timezone": "UTC", - "timezoneOffset": "0", - "AIRTIME_API_VERSION": "1.1" - } + example: + { + "env": "production", + "schedulerTime": "2019-10-21 17:52:45", + "previous": + { + "starts": "2019-10-21 17:47:25.000000", + "ends": "2019-10-21 17:52:13.000000", + "type": "track", + "name": "Disclosure - F For You (feat. Mary J. Blige)", + "metadata": + { + "id": 8, + "name": "", + "mime": "audio/mp3", + "ftype": "audioclip", + "directory": 1, + "filepath": "imported/1/Disclosure/www.mmibty.com/01-F-For-You-feat.-Mary-J.-Blige.mp3", + "import_status": 0, + "currentlyaccessing": 0, + "editedby": null, + "mtime": "2019-10-21 17:19:03", + "utime": "2019-10-21 17:18:57", + "lptime": "2019-10-21 17:47:25", + "md5": "e008616551750aea49820a16d1fb1527", + "track_title": "F For You (feat. Mary J. Blige)", + "artist_name": "Disclosure", + "bit_rate": 251628, + "sample_rate": 44100, + "format": null, + "length": "00:04:48.026122", + "album_title": "www.mmibty.com", + "genre": "Electronic", + "comments": null, + "year": "2014", + "track_number": 1, + "channels": 2, + "url": null, + "bpm": null, + "rating": null, + "encoded_by": null, + "disc_number": null, + "mood": null, + "label": null, + "composer": null, + "encoder": null, + "checksum": null, + "lyrics": null, + "orchestra": null, + "conductor": null, + "lyricist": null, + "original_lyricist": null, + "radio_station_name": null, + "info_url": null, + "artist_url": null, + "audio_source_url": null, + "radio_station_url": null, + "buy_this_url": null, + "isrc_number": null, + "catalog_number": null, + "original_artist": null, + "copyright": null, + "report_datetime": null, + "report_location": null, + "report_organization": null, + "subject": null, + "contributor": null, + "language": null, + "replay_gain": "-5.58", + "owner_id": 1, + "cuein": "00:00:00", + "cueout": "00:04:48.026122", + "hidden": false, + "filesize": 9271626, + "description": null, + "artwork": "imported/1/artwork/01-F-For-You-feat.-Mary-J.-Blige", + "artwork_url": "http://localhost:8080/api/track?id=8&return=artwork", + }, + }, + "current": + { + "starts": "2019-10-21 17:52:13", + "ends": "2019-10-21 17:56:27", + "type": "track", + "name": "Armin van Buuren - Ping Pong", + "media_item_played": true, + "metadata": + { + "id": 2, + "name": "", + "mime": "audio/mp3", + "ftype": "audioclip", + "directory": 1, + "filepath": "imported/1/Armin van Buuren/A State of Trance 2014/2-18 Armin van Buuren - Ping Pong.mp3", + "import_status": 0, + "currentlyaccessing": 0, + "editedby": null, + "mtime": "2019-10-21 17:18:02", + "utime": "2019-10-21 17:18:00", + "lptime": "2019-10-21 17:52:13", + "md5": "04c26823902065db0706d121d0e703a2", + "track_title": "Ping Pong", + "artist_name": "Armin van Buuren", + "bit_rate": 32000, + "sample_rate": 44100, + "format": null, + "length": "00:04:14.171429", + "album_title": "A State of Trance 2014", + "genre": "Trance;Electronic;Dance", + "comments": null, + "year": "2014", + "track_number": 18, + "channels": 2, + "url": null, + "bpm": null, + "rating": null, + "encoded_by": null, + "disc_number": null, + "mood": null, + "label": "Armada Music", + "composer": null, + "encoder": null, + "checksum": null, + "lyrics": null, + "orchestra": null, + "conductor": null, + "lyricist": null, + "original_lyricist": null, + "radio_station_name": null, + "info_url": null, + "artist_url": null, + "audio_source_url": null, + "radio_station_url": null, + "buy_this_url": null, + "isrc_number": null, + "catalog_number": null, + "original_artist": null, + "copyright": null, + "report_datetime": null, + "report_location": null, + "report_organization": null, + "subject": null, + "contributor": null, + "language": null, + "replay_gain": "-5.07", + "owner_id": 1, + "cuein": "00:00:00", + "cueout": "00:04:14.171429", + "hidden": false, + "filesize": 6136238, + "description": null, + "artwork": "imported/1/artwork/2-18 Armin van Buuren - Ping Pong", + "artwork_url": "http://localhost:8080/api/track?id=2&return=artwork", + }, + "record": "0", + }, + "next": + { + "starts": "2019-10-21 17:56:27.000000", + "ends": "2019-10-21 18:00:28.000000", + "type": "track", + "name": "Bastille - No Angels (feat. Ella)", + "metadata": + { + "id": 4, + "name": "", + "mime": "audio/mp3", + "ftype": "audioclip", + "directory": 1, + "filepath": "imported/1/Bastille/Other People's Heartache, Pt. 2/03 Bastille - No Angels (feat. Ella).mp3", + "import_status": 0, + "currentlyaccessing": 0, + "editedby": null, + "mtime": "2019-10-21 17:18:16", + "utime": "2019-10-21 17:18:14", + "lptime": "2019-10-21 17:24:46", + "md5": "87bf83451d7618eefc0141c262aead2a", + "track_title": "No Angels (feat. Ella)", + "artist_name": "Bastille", + "bit_rate": 128000, + "sample_rate": 44100, + "format": null, + "length": "00:04:00.752438", + "album_title": "Other People's Heartache, Pt. 2", + "genre": null, + "comments": null, + "year": "2012", + "track_number": 3, + "channels": 2, + "url": null, + "bpm": null, + "rating": null, + "encoded_by": null, + "disc_number": null, + "mood": null, + "label": "[no label]", + "composer": null, + "encoder": null, + "checksum": null, + "lyrics": null, + "orchestra": null, + "conductor": null, + "lyricist": null, + "original_lyricist": null, + "radio_station_name": null, + "info_url": null, + "artist_url": null, + "audio_source_url": null, + "radio_station_url": null, + "buy_this_url": null, + "isrc_number": null, + "catalog_number": null, + "original_artist": null, + "copyright": null, + "report_datetime": null, + "report_location": null, + "report_organization": null, + "subject": null, + "contributor": null, + "language": null, + "replay_gain": "-8.57", + "owner_id": 1, + "cuein": "00:00:00", + "cueout": "00:04:00.752438", + "hidden": false, + "filesize": 3858688, + "description": null, + "artwork": "", + }, + }, + "currentShow": + [ + { + "start_timestamp": "2019-10-21 17:20:00", + "end_timestamp": "2019-10-21 18:31:00", + "name": "Show 1", + "description": "A show", + "id": 1, + "instance_id": 1, + "record": 0, + "url": "https://example.com", + "image_path": "", + "starts": "2019-10-21 17:20:00", + "ends": "2019-10-21 18:31:00", + }, + ], + "nextShow": + [ + { + "id": 2, + "instance_id": 2, + "name": "Reading", + "description": "A reading of After the EMP by Harley Tate", + "url": "https://example.com", + "start_timestamp": "2019-10-21 18:31:00", + "end_timestamp": "2019-10-22 10:45:00", + "starts": "2019-10-21 18:31:00", + "ends": "2019-10-22 10:45:00", + "record": 0, + "image_path": "", + "type": "show", + }, + ], + "source_enabled": "Scheduled", + "timezone": "UTC", + "timezoneOffset": "0", + "AIRTIME_API_VERSION": "1.1", + } /live-info-v2: get: summary: Retrieve the currently playing and upcoming shows @@ -325,200 +334,211 @@ paths: default: "$server_timezone" required: false responses: - '200': + "200": description: 200 response for default request content: application/json: - example: { - "station": { - "env": "production", - "schedulerTime": "2019-10-21 17:29:40", - "source_enabled": "Scheduled", - "timezone": "UTC", - "AIRTIME_API_VERSION": "1.1" - }, - "tracks": { - "previous": { - "starts": "2019-10-21 17:24:45", - "ends": "2019-10-21 17:28:46", - "type": "track", - "name": "Bastille - No Angels (feat. Ella)", - "metadata": { - "id": 4, - "name": "", - "mime": "audio/mp3", - "ftype": "audioclip", - "directory": 1, - "filepath": "imported/1/Bastille/Other People's Heartache, Pt. 2/03 Bastille - No Angels (feat. Ella).mp3", - "import_status": 0, - "currentlyaccessing": 0, - "editedby": null, - "mtime": "2019-10-21 17:18:16", - "utime": "2019-10-21 17:18:14", - "lptime": "2019-10-21 17:24:46", - "md5": "87bf83451d7618eefc0141c262aead2a", - "track_title": "No Angels (feat. Ella)", - "artist_name": "Bastille", - "bit_rate": 128000, - "sample_rate": 44100, - "format": null, - "length": "00:04:00.752438", - "album_title": "Other People's Heartache, Pt. 2", - "genre": null, - "comments": null, - "year": "2012", - "track_number": 3, - "channels": 2, - "url": null, - "bpm": null, - "rating": null, - "encoded_by": null, - "disc_number": null, - "mood": null, - "label": "[no label]", - "composer": null, - "encoder": null, - "checksum": null, - "lyrics": null, - "orchestra": null, - "conductor": null, - "lyricist": null, - "original_lyricist": null, - "radio_station_name": null, - "info_url": null, - "artist_url": null, - "audio_source_url": null, - "radio_station_url": null, - "buy_this_url": null, - "isrc_number": null, - "catalog_number": null, - "original_artist": null, - "copyright": null, - "report_datetime": null, - "report_location": null, - "report_organization": null, - "subject": null, - "contributor": null, - "language": null, - "replay_gain": "-8.57", - "owner_id": 1, - "cuein": "00:00:00", - "cueout": "00:04:00.752438", - "hidden": false, - "filesize": 3858688, - "description": null, - "artwork": "" - } - }, - "current": null, - "next": { - "starts": "2019-10-21 17:32:49", - "ends": "2019-10-21 17:36:44", - "type": "track", - "name": "Bob Marley - Could You Be Loved", - "metadata": { - "id": 14, - "name": "", - "mime": "audio/mp3", - "ftype": "audioclip", - "directory": 1, - "filepath": "imported/1/Bob Marley/Greatest Hits/02. Could You Be Loved.mp3", - "import_status": 0, - "currentlyaccessing": 0, - "editedby": null, - "mtime": "2019-10-21 17:19:16", - "utime": "2019-10-21 17:18:59", - "lptime": null, - "md5": "75e49569fd6af61cc8c18f5660beadc2", - "track_title": "Could You Be Loved", - "artist_name": "Bob Marley", - "bit_rate": 128000, - "sample_rate": 44100, - "format": null, - "length": "00:03:55.11", - "album_title": "Greatest Hits", - "genre": "Various", - "comments": null, - "year": null, - "track_number": 2, - "channels": 2, - "url": null, - "bpm": 103, - "rating": null, - "encoded_by": null, - "disc_number": null, - "mood": null, - "label": null, - "composer": null, - "encoder": null, - "checksum": null, - "lyrics": null, - "orchestra": null, - "conductor": null, - "lyricist": null, - "original_lyricist": null, - "radio_station_name": null, - "info_url": null, - "artist_url": null, - "audio_source_url": null, - "radio_station_url": null, - "buy_this_url": null, - "isrc_number": null, - "catalog_number": null, - "original_artist": null, - "copyright": null, - "report_datetime": null, - "report_location": null, - "report_organization": null, - "subject": null, - "contributor": null, - "language": null, - "replay_gain": "-1.2", - "owner_id": 1, - "cuein": "00:00:00", - "cueout": "00:03:55.11", - "hidden": false, - "filesize": 3773820, - "description": null, - "artwork": "" - } - } - }, - "shows": { - "previous": [], - "current": { - "name": "Show 1", - "description": "A show", - "genre": "HipHop", - "id": 1, - "instance_id": 1, - "record": 0, - "url": "https://example.com", - "image_path": "", - "starts": "2019-10-21 17:20:00", - "ends": "2019-10-21 18:31:00" - }, - "next": [ + example: + { + "station": { - "name": "Reading", - "description": "A reading of After the EMP by Harley Tate", - "genre": "Sci-fi", - "id": 2, - "instance_id": 2, - "record": 0, - "url": "https://example.com", - "image_path": "", - "starts": "2019-10-21 18:31:00", - "ends": "2019-10-22 10:45:00" - } - ] - }, - "sources": { - "livedj": "off", - "masterdj": "off", - "scheduledplay": "on" + "env": "production", + "schedulerTime": "2019-10-21 17:29:40", + "source_enabled": "Scheduled", + "timezone": "UTC", + "AIRTIME_API_VERSION": "1.1", + }, + "tracks": + { + "previous": + { + "starts": "2019-10-21 17:24:45", + "ends": "2019-10-21 17:28:46", + "type": "track", + "name": "Bastille - No Angels (feat. Ella)", + "metadata": + { + "id": 4, + "name": "", + "mime": "audio/mp3", + "ftype": "audioclip", + "directory": 1, + "filepath": "imported/1/Bastille/Other People's Heartache, Pt. 2/03 Bastille - No Angels (feat. Ella).mp3", + "import_status": 0, + "currentlyaccessing": 0, + "editedby": null, + "mtime": "2019-10-21 17:18:16", + "utime": "2019-10-21 17:18:14", + "lptime": "2019-10-21 17:24:46", + "md5": "87bf83451d7618eefc0141c262aead2a", + "track_title": "No Angels (feat. Ella)", + "artist_name": "Bastille", + "bit_rate": 128000, + "sample_rate": 44100, + "format": null, + "length": "00:04:00.752438", + "album_title": "Other People's Heartache, Pt. 2", + "genre": null, + "comments": null, + "year": "2012", + "track_number": 3, + "channels": 2, + "url": null, + "bpm": null, + "rating": null, + "encoded_by": null, + "disc_number": null, + "mood": null, + "label": "[no label]", + "composer": null, + "encoder": null, + "checksum": null, + "lyrics": null, + "orchestra": null, + "conductor": null, + "lyricist": null, + "original_lyricist": null, + "radio_station_name": null, + "info_url": null, + "artist_url": null, + "audio_source_url": null, + "radio_station_url": null, + "buy_this_url": null, + "isrc_number": null, + "catalog_number": null, + "original_artist": null, + "copyright": null, + "report_datetime": null, + "report_location": null, + "report_organization": null, + "subject": null, + "contributor": null, + "language": null, + "replay_gain": "-8.57", + "owner_id": 1, + "cuein": "00:00:00", + "cueout": "00:04:00.752438", + "hidden": false, + "filesize": 3858688, + "description": null, + "artwork": "", + }, + }, + "current": null, + "next": + { + "starts": "2019-10-21 17:32:49", + "ends": "2019-10-21 17:36:44", + "type": "track", + "name": "Bob Marley - Could You Be Loved", + "metadata": + { + "id": 14, + "name": "", + "mime": "audio/mp3", + "ftype": "audioclip", + "directory": 1, + "filepath": "imported/1/Bob Marley/Greatest Hits/02. Could You Be Loved.mp3", + "import_status": 0, + "currentlyaccessing": 0, + "editedby": null, + "mtime": "2019-10-21 17:19:16", + "utime": "2019-10-21 17:18:59", + "lptime": null, + "md5": "75e49569fd6af61cc8c18f5660beadc2", + "track_title": "Could You Be Loved", + "artist_name": "Bob Marley", + "bit_rate": 128000, + "sample_rate": 44100, + "format": null, + "length": "00:03:55.11", + "album_title": "Greatest Hits", + "genre": "Various", + "comments": null, + "year": null, + "track_number": 2, + "channels": 2, + "url": null, + "bpm": 103, + "rating": null, + "encoded_by": null, + "disc_number": null, + "mood": null, + "label": null, + "composer": null, + "encoder": null, + "checksum": null, + "lyrics": null, + "orchestra": null, + "conductor": null, + "lyricist": null, + "original_lyricist": null, + "radio_station_name": null, + "info_url": null, + "artist_url": null, + "audio_source_url": null, + "radio_station_url": null, + "buy_this_url": null, + "isrc_number": null, + "catalog_number": null, + "original_artist": null, + "copyright": null, + "report_datetime": null, + "report_location": null, + "report_organization": null, + "subject": null, + "contributor": null, + "language": null, + "replay_gain": "-1.2", + "owner_id": 1, + "cuein": "00:00:00", + "cueout": "00:03:55.11", + "hidden": false, + "filesize": 3773820, + "description": null, + "artwork": "", + }, + }, + }, + "shows": + { + "previous": [], + "current": + { + "name": "Show 1", + "description": "A show", + "genre": "HipHop", + "id": 1, + "instance_id": 1, + "record": 0, + "url": "https://example.com", + "image_path": "", + "starts": "2019-10-21 17:20:00", + "ends": "2019-10-21 18:31:00", + }, + "next": + [ + { + "name": "Reading", + "description": "A reading of After the EMP by Harley Tate", + "genre": "Sci-fi", + "id": 2, + "instance_id": 2, + "record": 0, + "url": "https://example.com", + "image_path": "", + "starts": "2019-10-21 18:31:00", + "ends": "2019-10-22 10:45:00", + }, + ], + }, + "sources": + { + "livedj": "off", + "masterdj": "off", + "scheduledplay": "on", + }, } - } /week-info: get: summary: Retrieve the schedule for the week @@ -528,56 +548,58 @@ paths: description: The API key to use for authentication required: false responses: - '200': + "200": description: 200 response for default request content: application/json: - example: { - "monday": [ - { - "start_timestamp": "2019-10-21 17:20:00", - "end_timestamp": "2019-10-21 18:31:00", - "name": "Show 1", - "description": "A show", - "id": 1, - "instance_id": 1, - "instance_description": "", - "record": 0, - "url": "https://example.com", - "image_path": "", - "starts": "2019-10-21 17:20:00", - "ends": "2019-10-21 18:31:00" - }, - { - "start_timestamp": "2019-10-21 18:31:00", - "end_timestamp": "2019-10-22 10:45:00", - "name": "Reading", - "description": "A reading of After the EMP by Harley Tate", - "id": 2, - "instance_id": 2, - "instance_description": "", - "record": 0, - "url": "https://example.com", - "image_path": "", - "starts": "2019-10-21 18:31:00", - "ends": "2019-10-22 10:45:00" - } - ], - "tuesday": [], - "wednesday": [], - "thursday": [], - "friday": [], - "saturday": [], - "sunday": [], - "nextmonday": [], - "nexttuesday": [], - "nextwednesday": [], - "nextthursday": [], - "nextfriday": [], - "nextsaturday": [], - "nextsunday": [], - "AIRTIME_API_VERSION": "1.1" - } + example: + { + "monday": + [ + { + "start_timestamp": "2019-10-21 17:20:00", + "end_timestamp": "2019-10-21 18:31:00", + "name": "Show 1", + "description": "A show", + "id": 1, + "instance_id": 1, + "instance_description": "", + "record": 0, + "url": "https://example.com", + "image_path": "", + "starts": "2019-10-21 17:20:00", + "ends": "2019-10-21 18:31:00", + }, + { + "start_timestamp": "2019-10-21 18:31:00", + "end_timestamp": "2019-10-22 10:45:00", + "name": "Reading", + "description": "A reading of After the EMP by Harley Tate", + "id": 2, + "instance_id": 2, + "instance_description": "", + "record": 0, + "url": "https://example.com", + "image_path": "", + "starts": "2019-10-21 18:31:00", + "ends": "2019-10-22 10:45:00", + }, + ], + "tuesday": [], + "wednesday": [], + "thursday": [], + "friday": [], + "saturday": [], + "sunday": [], + "nextmonday": [], + "nexttuesday": [], + "nextwednesday": [], + "nextthursday": [], + "nextfriday": [], + "nextsaturday": [], + "nextsunday": [], + "AIRTIME_API_VERSION": "1.1", + } /station-metadata: get: summary: BROKEN - Retrieve the schedule for the week @@ -639,30 +661,31 @@ paths: description: The show instance ID required: false responses: - '200': + "200": description: The 200 default response content: application/json: - example: [ - { - "starts": "2019-10-21 18:19:07", - "ends": "2019-10-21 18:23:55", - "history_id": 16, - "instance_id": 1, - "track_title": "F For You (feat. Mary J. Blige)", - "artist_name": "Disclosure", - "checkbox": "" - }, - { - "starts": "2019-10-21 17:20:31", - "ends": "2019-10-21 17:24:45", - "history_id": 1, - "instance_id": 1, - "track_title": "Ping Pong", - "artist_name": "Armin van Buuren", - "checkbox": "" - }, - ] + example: + [ + { + "starts": "2019-10-21 18:19:07", + "ends": "2019-10-21 18:23:55", + "history_id": 16, + "instance_id": 1, + "track_title": "F For You (feat. Mary J. Blige)", + "artist_name": "Disclosure", + "checkbox": "", + }, + { + "starts": "2019-10-21 17:20:31", + "ends": "2019-10-21 17:24:45", + "history_id": 1, + "instance_id": 1, + "track_title": "Ping Pong", + "artist_name": "Armin van Buuren", + "checkbox": "", + }, + ] /shows: get: summary: Retrieve the show info (without schedule for given show_id @@ -676,25 +699,26 @@ paths: description: The ID of the show required: false response: - '200': + "200": description: The response with a show_id of 1 content: application/json: - example: [ - { - "name": "Show 1", - "id": 1, - "url": "https://example.com", - "genre": "HipHop", - "description": "A show", - "color": "", - "background_color": "", - "linked": false, - "has_autoplaylist": false, - "autoplaylist_id": null, - "autoplaylist_repeat": false - } - ] + example: + [ + { + "name": "Show 1", + "id": 1, + "url": "https://example.com", + "genre": "HipHop", + "description": "A show", + "color": "", + "background_color": "", + "linked": false, + "has_autoplaylist": false, + "autoplaylist_id": null, + "autoplaylist_repeat": false, + }, + ] /show-tracks: get: summary: Display the track listing for given instance_id @@ -708,32 +732,33 @@ paths: description: The ID of the show required: true response: - '200': + "200": description: The response with a instance_id of 1 content: application/json: - example: [ - { - "title": "Ping Pong", - "artist": "Armin van Buuren", - "position": 0, - "id": 1, - "mime": "audio/mp3", - "starts": "2019-10-21 17:20:31", - "length": "4:14.2", - "file_id": 2 - }, - { - "title": "No Angels (feat. Ella)", - "artist": "Bastille", - "position": 1, - "id": 2, - "mime": "audio/mp3", - "starts": "2019-10-21 17:24:45", - "length": "4:00.8", - "file_id": 4 - } - ] + example: + [ + { + "title": "Ping Pong", + "artist": "Armin van Buuren", + "position": 0, + "id": 1, + "mime": "audio/mp3", + "starts": "2019-10-21 17:20:31", + "length": "4:14.2", + "file_id": 2, + }, + { + "title": "No Angels (feat. Ella)", + "artist": "Bastille", + "position": 1, + "id": 2, + "mime": "audio/mp3", + "starts": "2019-10-21 17:24:45", + "length": "4:00.8", + "file_id": 4, + }, + ] /show-schedules: get: summary: Display the show schedule for given show_id @@ -759,33 +784,34 @@ paths: description: The timezone that the times are in required: false response: - '200': + "200": description: The response with a instance_id of 1 content: application/json: - example: [ - { - "starts": "2019-10-21 17:20:00", - "ends": "2019-10-21 18:31:00", - "record": 0, - "rebroadcast": 0, - "parent_starts": null, - "record_id": null, - "show_id": 1, - "name": "Show 1", - "description": "A show", - "color": "", - "background_color": "", - "image_path": "", - "linked": false, - "file_id": null, - "instance_id": 1, - "instance_description": "", - "created": "2019-10-21 17:20:22", - "last_scheduled": "2019-10-21 17:20:50", - "time_filled": "01:14:39.265872" - } - ] + example: + [ + { + "starts": "2019-10-21 17:20:00", + "ends": "2019-10-21 18:31:00", + "record": 0, + "rebroadcast": 0, + "parent_starts": null, + "record_id": null, + "show_id": 1, + "name": "Show 1", + "description": "A show", + "color": "", + "background_color": "", + "image_path": "", + "linked": false, + "file_id": null, + "instance_id": 1, + "instance_description": "", + "created": "2019-10-21 17:20:22", + "last_scheduled": "2019-10-21 17:20:50", + "time_filled": "01:14:39.265872", + }, + ] /show-logo: get: summary: Fetch the show logo. Returns the station logo if none exists @@ -820,46 +846,47 @@ paths: - artwork required: true responses: - '200': + "200": description: The 200 response content: application/json: - example: { - "MDATA_KEY_FILEPATH": "imported\/1\/Armin van Buuren\/Another You (feat. Mr. Probz)\/01 Another You (feat. Mr. Probz).mp3", - "MDATA_KEY_DIRECTORY": 1, - "MDATA_KEY_TITLE": "Another You (feat. Mr. Probz)", - "MDATA_KEY_CREATOR": "Armin van Buuren", - "MDATA_KEY_SOURCE": "Another You (feat. Mr. Probz)", - "MDATA_KEY_DURATION": "00:03:19.183673", - "MDATA_KEY_MIME": "audio\/mp3", - "MDATA_KEY_FTYPE": "audioclip", - "MDATA_KEY_URL": null, - "MDATA_KEY_GENRE": null, - "MDATA_KEY_MOOD": null, - "MDATA_KEY_LABEL": "Armin Audio B.V.", - "MDATA_KEY_COMPOSER": null, - "MDATA_KEY_DESCRIPTION": null, - "MDATA_KEY_SAMPLERATE": 44100, - "MDATA_KEY_BITRATE": 192000, - "MDATA_KEY_ENCODER": null, - "MDATA_KEY_ISRC": null, - "MDATA_KEY_COPYRIGHT": null, - "MDATA_KEY_YEAR": "2015", - "MDATA_KEY_BPM": null, - "MDATA_KEY_TRACKNUMBER": 1, - "MDATA_KEY_CONDUCTOR": null, - "MDATA_KEY_LANGUAGE": null, - "MDATA_KEY_REPLAYGAIN": "-8.36", - "MDATA_KEY_OWNER_ID": 1, - "MDATA_KEY_CUE_IN": "00:00:00", - "MDATA_KEY_CUE_OUT": "00:03:19.183673", - "MDATA_KEY_ARTWORK": "imported\/1\/artwork\/01 Another You (feat. Mr. Probz)" - } + example: + { + "MDATA_KEY_FILEPATH": "imported\/1\/Armin van Buuren\/Another You (feat. Mr. Probz)\/01 Another You (feat. Mr. Probz).mp3", + "MDATA_KEY_DIRECTORY": 1, + "MDATA_KEY_TITLE": "Another You (feat. Mr. Probz)", + "MDATA_KEY_CREATOR": "Armin van Buuren", + "MDATA_KEY_SOURCE": "Another You (feat. Mr. Probz)", + "MDATA_KEY_DURATION": "00:03:19.183673", + "MDATA_KEY_MIME": "audio\/mp3", + "MDATA_KEY_FTYPE": "audioclip", + "MDATA_KEY_URL": null, + "MDATA_KEY_GENRE": null, + "MDATA_KEY_MOOD": null, + "MDATA_KEY_LABEL": "Armin Audio B.V.", + "MDATA_KEY_COMPOSER": null, + "MDATA_KEY_DESCRIPTION": null, + "MDATA_KEY_SAMPLERATE": 44100, + "MDATA_KEY_BITRATE": 192000, + "MDATA_KEY_ENCODER": null, + "MDATA_KEY_ISRC": null, + "MDATA_KEY_COPYRIGHT": null, + "MDATA_KEY_YEAR": "2015", + "MDATA_KEY_BPM": null, + "MDATA_KEY_TRACKNUMBER": 1, + "MDATA_KEY_CONDUCTOR": null, + "MDATA_KEY_LANGUAGE": null, + "MDATA_KEY_REPLAYGAIN": "-8.36", + "MDATA_KEY_OWNER_ID": 1, + "MDATA_KEY_CUE_IN": "00:00:00", + "MDATA_KEY_CUE_OUT": "00:03:19.183673", + "MDATA_KEY_ARTWORK": "imported\/1\/artwork\/01 Another You (feat. Mr. Probz)", + } /stream-m3u: get: summary: Returns m3u playlist file for the station's output stream response: - '200': + "200": description: The M3U file for the stream content: application/x-mpegurl /version: @@ -871,14 +898,12 @@ paths: description: The API key to use for authentication required: false responses: - '200': + "200": description: 200 response content: application/json: - example: { - "airtime_version": "3.0.0~alpha.5", - "api_version": "1.1" - } + example: + { "airtime_version": "3.0.0~alpha.5", "api_version": "1.1" } /recorded-shows: get: summary: BROKEN - Unclear what this did, not implemented in ApiController @@ -1170,36 +1195,34 @@ paths: description: The API key to use for authentication required: true responses: - '200': + "200": description: 200 response for default request content: application/json: - example: [ - { - "id": 9, - "fp": "/srv/airtime/stor/" - }, - { - "id": 12, - "fp": "/srv/airtime/stor/imported/1/Sam Smith Feat John Legend/The Official Uk Top 40 Singles Chart 03-22-2015/01 Sam Smith Feat John Legend - Lay Me Down.mp3" - }, - { - "id": 13, - "fp": "/srv/airtime/stor/imported/1/Mumford & Sons/Wilder Mind [ Deluxe Edition ]/01 - Tompkins Square Park.mp3" - }, - { - "id": 3, - "fp": "/srv/airtime/stor/imported/1/Bastille/All This Bad Blood/1-02 Things We Lost in the Fire.mp3" - }, - { - "id": 1, - "fp": "/srv/airtime/stor/imported/1/Armin van Buuren/Another You (feat. Mr. Probz)/01 Another You (feat. Mr. Probz).mp3" - }, - { - "id": 15, - "fp": "/srv/airtime/stor/imported/1/Harley Tate/Harley Tate - After the EMP 01 - After the EMP/Harley Tate - After the EMP 01 - After the EMP.mp3" - } - ] + example: + [ + { "id": 9, "fp": "/srv/airtime/stor/" }, + { + "id": 12, + "fp": "/srv/airtime/stor/imported/1/Sam Smith Feat John Legend/The Official Uk Top 40 Singles Chart 03-22-2015/01 Sam Smith Feat John Legend - Lay Me Down.mp3", + }, + { + "id": 13, + "fp": "/srv/airtime/stor/imported/1/Mumford & Sons/Wilder Mind [ Deluxe Edition ]/01 - Tompkins Square Park.mp3", + }, + { + "id": 3, + "fp": "/srv/airtime/stor/imported/1/Bastille/All This Bad Blood/1-02 Things We Lost in the Fire.mp3", + }, + { + "id": 1, + "fp": "/srv/airtime/stor/imported/1/Armin van Buuren/Another You (feat. Mr. Probz)/01 Another You (feat. Mr. Probz).mp3", + }, + { + "id": 15, + "fp": "/srv/airtime/stor/imported/1/Harley Tate/Harley Tate - After the EMP 01 - After the EMP/Harley Tate - After the EMP 01 - After the EMP.mp3", + }, + ] /reload-metadata-group: get: summary: |- diff --git a/docs/index.md b/docs/index.md index 8afe618b4..988c695af 100644 --- a/docs/index.md +++ b/docs/index.md @@ -6,14 +6,14 @@ linktext: Get Libretime img: /img/radio-unsplash.jpg photocredit: Top photo by Leo Wieling on Unsplash actions: -- title: Stable Release - text: The best so far. Just extract and run sudo bash install -fiap. - linkto: https://github.com/LibreTime/libretime/releases/download/3.0.0-alpha.8/libretime-3.0.0-alpha.8.tar.gz - linktext: Download 3.0-alpha-8 -- title: Rolling Commits - text: Want the latest and greatest? Install from the source code. - linkto: /install - linktext: Install from Source + - title: Stable Release + text: The best so far. Just extract and run sudo bash install -fiap. + linkto: https://github.com/LibreTime/libretime/releases/download/3.0.0-alpha.8/libretime-3.0.0-alpha.8.tar.gz + linktext: Download 3.0-alpha-8 + - title: Rolling Commits + text: Want the latest and greatest? Install from the source code. + linkto: /install + linktext: Install from Source --- # BROADCAST WITHOUT LIMITS @@ -22,4 +22,4 @@ Libretime is an open source radio automation and broadcasting solution helping c The platform can be easily deployed on dedicated hardware and VMs, on-prem or in the cloud, "free as in freedom" free. -Let your station underwrite its own destiny. \ No newline at end of file +Let your station underwrite its own destiny. From c27f020d730dce207ee7c1e27c0f7545e78cde9f Mon Sep 17 00:00:00 2001 From: jo Date: Thu, 27 May 2021 16:23:02 +0200 Subject: [PATCH 05/28] Format code using black --- api/libretimeapi/apps.py | 7 +- api/libretimeapi/managers.py | 16 +- api/libretimeapi/models/authentication.py | 31 +- api/libretimeapi/models/celery.py | 6 +- api/libretimeapi/models/countries.py | 3 +- api/libretimeapi/models/files.py | 36 +- api/libretimeapi/models/playlists.py | 6 +- api/libretimeapi/models/playout.py | 22 +- api/libretimeapi/models/podcasts.py | 28 +- api/libretimeapi/models/preferences.py | 12 +- api/libretimeapi/models/schedule.py | 10 +- api/libretimeapi/models/services.py | 3 +- api/libretimeapi/models/shows.py | 14 +- api/libretimeapi/models/smart_blocks.py | 41 ++- api/libretimeapi/models/tracks.py | 6 +- api/libretimeapi/models/user_constants.py | 16 +- api/libretimeapi/models/webstreams.py | 8 +- api/libretimeapi/permission_constants.py | 187 ++++++----- api/libretimeapi/permissions.py | 55 +-- api/libretimeapi/serializers.py | 237 +++++++------ api/libretimeapi/settings.py | 184 +++++----- api/libretimeapi/tests/runners.py | 11 +- api/libretimeapi/tests/test_models.py | 43 ++- api/libretimeapi/tests/test_permissions.py | 130 +++---- api/libretimeapi/tests/test_views.py | 29 +- api/libretimeapi/urls.py | 84 ++--- api/libretimeapi/utils.py | 10 +- api/libretimeapi/views.py | 137 +++++--- api/libretimeapi/wsgi.py | 2 +- api/setup.py | 32 +- dev_tools/compare_cc_files_to_fs.py | 57 ++-- .../airtime_analyzer/airtime_analyzer.py | 37 +- .../airtime_analyzer/analyzer.py | 3 +- .../airtime_analyzer/analyzer_pipeline.py | 58 +++- .../airtime_analyzer/airtime_analyzer/cli.py | 44 ++- .../airtime_analyzer/config_file.py | 1 + .../airtime_analyzer/cuepoint_analyzer.py | 77 +++-- .../airtime_analyzer/filemover_analyzer.py | 88 +++-- .../airtime_analyzer/message_listener.py | 180 ++++++---- .../airtime_analyzer/metadata_analyzer.py | 132 ++++---- .../airtime_analyzer/playability_analyzer.py | 35 +- .../airtime_analyzer/replaygain_analyzer.py | 37 +- .../airtime_analyzer/status_reporter.py | 163 +++++---- .../tests/airtime_analyzer_tests.py | 4 +- .../tests/analyzer_pipeline_tests.py | 48 +-- .../airtime_analyzer/tests/analyzer_tests.py | 5 +- .../tests/cuepoint_analyzer_tests.py | 72 ++-- .../tests/filemover_analyzer_tests.py | 92 ++--- .../tests/metadata_analyzer_tests.py | 210 +++++++----- .../tests/playability_analyzer_tests.py | 70 +++- .../tests/replaygain_analyzer_tests.py | 98 ++++-- python_apps/api_clients/api_clients/utils.py | 110 +++--- .../api_clients/api_clients/version1.py | 316 +++++++++++------- .../api_clients/api_clients/version2.py | 149 +++++---- python_apps/api_clients/setup.py | 30 +- python_apps/api_clients/tests/test_apcurl.py | 18 +- .../api_clients/tests/test_apirequest.py | 32 +- .../api_clients/tests/test_requestprovider.py | 25 +- python_apps/api_clients/tests/test_utils.py | 59 ++-- .../icecast2/install/icecast2-install.py | 12 +- python_apps/pypo/bin/airtime-liquidsoap | 4 +- python_apps/pypo/bin/airtime-playout | 1 - python_apps/pypo/bin/pyponotify | 134 +++++--- .../liquidsoap/generate_liquidsoap_cfg.py | 20 +- .../pypo/liquidsoap/liquidsoap_auth.py | 14 +- .../liquidsoap_prepare_terminate.py | 9 +- python_apps/pypo/pypo/__main__.py | 1 + python_apps/pypo/pypo/listenerstat.py | 97 +++--- python_apps/pypo/pypo/pure.py | 7 +- python_apps/pypo/pypo/pypofetch.py | 207 +++++++----- python_apps/pypo/pypo/pypofile.py | 64 ++-- python_apps/pypo/pypo/pypoliqqueue.py | 49 +-- python_apps/pypo/pypo/pypoliquidsoap.py | 129 +++---- python_apps/pypo/pypo/pypomessagehandler.py | 53 +-- python_apps/pypo/pypo/pypopush.py | 51 +-- python_apps/pypo/pypo/recorder.py | 166 +++++---- python_apps/pypo/pypo/telnetliquidsoap.py | 162 ++++----- python_apps/pypo/pypo/testpypoliqqueue.py | 96 +++--- python_apps/pypo/pypo/timeout.py | 9 +- python_apps/pypo/setup.py | 95 +++--- utils/airtime-import/airtime-import | 313 ++++++++++------- utils/airtime-silan | 30 +- utils/airtime-test-soundcard.py | 36 +- utils/airtime-test-stream.py | 33 +- utils/upgrade.py | 33 +- 85 files changed, 3238 insertions(+), 2243 deletions(-) diff --git a/api/libretimeapi/apps.py b/api/libretimeapi/apps.py index d02db415c..992351ab6 100644 --- a/api/libretimeapi/apps.py +++ b/api/libretimeapi/apps.py @@ -2,7 +2,8 @@ from django.apps import AppConfig from django.db.models.signals import pre_save + class LibreTimeAPIConfig(AppConfig): - name = 'libretimeapi' - verbose_name = 'LibreTime API' - default_auto_field = 'django.db.models.AutoField' + name = "libretimeapi" + verbose_name = "LibreTime API" + default_auto_field = "django.db.models.AutoField" diff --git a/api/libretimeapi/managers.py b/api/libretimeapi/managers.py index f5950acb9..1ef686f04 100644 --- a/api/libretimeapi/managers.py +++ b/api/libretimeapi/managers.py @@ -1,21 +1,23 @@ # -*- coding: utf-8 -*- from django.contrib.auth.models import BaseUserManager + class UserManager(BaseUserManager): def create_user(self, username, type, email, first_name, last_name, password): - user = self.model(username=username, - type=type, - email=email, - first_name=first_name, - last_name=last_name) + user = self.model( + username=username, + type=type, + email=email, + first_name=first_name, + last_name=last_name, + ) user.set_password(password) user.save(using=self._db) return user def create_superuser(self, username, email, first_name, last_name, password): - user = self.create_user(username, 'A', email, first_name, last_name, password) + user = self.create_user(username, "A", email, first_name, last_name, password) return user def get_by_natural_key(self, username): return self.get(username=username) - diff --git a/api/libretimeapi/models/authentication.py b/api/libretimeapi/models/authentication.py index f75023dac..8230153d7 100644 --- a/api/libretimeapi/models/authentication.py +++ b/api/libretimeapi/models/authentication.py @@ -15,18 +15,20 @@ class LoginAttempt(models.Model): class Meta: managed = False - db_table = 'cc_login_attempts' + db_table = "cc_login_attempts" class Session(models.Model): sessid = models.CharField(primary_key=True, max_length=32) - userid = models.ForeignKey('User', models.DO_NOTHING, db_column='userid', blank=True, null=True) + userid = models.ForeignKey( + "User", models.DO_NOTHING, db_column="userid", blank=True, null=True + ) login = models.CharField(max_length=255, blank=True, null=True) ts = models.DateTimeField(blank=True, null=True) class Meta: managed = False - db_table = 'cc_sess' + db_table = "cc_sess" USER_TYPE_CHOICES = () @@ -35,12 +37,14 @@ for item in USER_TYPES.items(): class User(AbstractBaseUser): - username = models.CharField(db_column='login', unique=True, max_length=255) - password = models.CharField(db_column='pass', max_length=255) # Field renamed because it was a Python reserved word. + username = models.CharField(db_column="login", unique=True, max_length=255) + password = models.CharField( + db_column="pass", max_length=255 + ) # Field renamed because it was a Python reserved word. type = models.CharField(max_length=1, choices=USER_TYPE_CHOICES) first_name = models.CharField(max_length=255) last_name = models.CharField(max_length=255) - last_login = models.DateTimeField(db_column='lastlogin', blank=True, null=True) + last_login = models.DateTimeField(db_column="lastlogin", blank=True, null=True) lastfail = models.DateTimeField(blank=True, null=True) skype_contact = models.CharField(max_length=1024, blank=True, null=True) jabber_contact = models.CharField(max_length=1024, blank=True, null=True) @@ -48,13 +52,13 @@ class User(AbstractBaseUser): cell_phone = models.CharField(max_length=1024, blank=True, null=True) login_attempts = models.IntegerField(blank=True, null=True) - USERNAME_FIELD = 'username' - EMAIL_FIELD = 'email' - REQUIRED_FIELDS = ['type', 'email', 'first_name', 'last_name'] + USERNAME_FIELD = "username" + EMAIL_FIELD = "email" + REQUIRED_FIELDS = ["type", "email", "first_name", "last_name"] objects = UserManager() def get_full_name(self): - return '{} {}'.format(self.first_name, self.last_name) + return "{} {}".format(self.first_name, self.last_name) def get_short_name(self): return self.first_name @@ -66,7 +70,7 @@ class User(AbstractBaseUser): self.password = hashlib.md5(password.encode()).hexdigest() def is_staff(self): - print('is_staff') + print("is_staff") return self.type == ADMIN def check_password(self, password): @@ -82,6 +86,7 @@ class User(AbstractBaseUser): (managed = True), then this can be replaced with django.contrib.auth.models.PermissionMixin. """ + def is_superuser(self): return self.type == ADMIN @@ -125,7 +130,7 @@ class User(AbstractBaseUser): class Meta: managed = False - db_table = 'cc_subjs' + db_table = "cc_subjs" class UserToken(models.Model): @@ -139,4 +144,4 @@ class UserToken(models.Model): class Meta: managed = False - db_table = 'cc_subjs_token' + db_table = "cc_subjs_token" diff --git a/api/libretimeapi/models/celery.py b/api/libretimeapi/models/celery.py index 1527f3b35..ca19378d2 100644 --- a/api/libretimeapi/models/celery.py +++ b/api/libretimeapi/models/celery.py @@ -4,11 +4,13 @@ from django.db import models class CeleryTask(models.Model): task_id = models.CharField(max_length=256) - track_reference = models.ForeignKey('ThirdPartyTrackReference', models.DO_NOTHING, db_column='track_reference') + track_reference = models.ForeignKey( + "ThirdPartyTrackReference", models.DO_NOTHING, db_column="track_reference" + ) name = models.CharField(max_length=256, blank=True, null=True) dispatch_time = models.DateTimeField(blank=True, null=True) status = models.CharField(max_length=256) class Meta: managed = False - db_table = 'celery_tasks' + db_table = "celery_tasks" diff --git a/api/libretimeapi/models/countries.py b/api/libretimeapi/models/countries.py index e0fcf96d9..0bdecaabd 100644 --- a/api/libretimeapi/models/countries.py +++ b/api/libretimeapi/models/countries.py @@ -8,5 +8,4 @@ class Country(models.Model): class Meta: managed = False - db_table = 'cc_country' - + db_table = "cc_country" diff --git a/api/libretimeapi/models/files.py b/api/libretimeapi/models/files.py index fb154b11f..2c1d24cb1 100644 --- a/api/libretimeapi/models/files.py +++ b/api/libretimeapi/models/files.py @@ -6,11 +6,20 @@ class File(models.Model): name = models.CharField(max_length=255) mime = models.CharField(max_length=255) ftype = models.CharField(max_length=128) - directory = models.ForeignKey('MusicDir', models.DO_NOTHING, db_column='directory', blank=True, null=True) + directory = models.ForeignKey( + "MusicDir", models.DO_NOTHING, db_column="directory", blank=True, null=True + ) filepath = models.TextField(blank=True, null=True) import_status = models.IntegerField() - currently_accessing = models.IntegerField(db_column='currentlyaccessing') - edited_by = models.ForeignKey('User', models.DO_NOTHING, db_column='editedby', blank=True, null=True, related_name='edited_files') + currently_accessing = models.IntegerField(db_column="currentlyaccessing") + edited_by = models.ForeignKey( + "User", + models.DO_NOTHING, + db_column="editedby", + blank=True, + null=True, + related_name="edited_files", + ) mtime = models.DateTimeField(blank=True, null=True) utime = models.DateTimeField(blank=True, null=True) lptime = models.DateTimeField(blank=True, null=True) @@ -59,8 +68,10 @@ class File(models.Model): contributor = models.CharField(max_length=512, blank=True, null=True) language = models.CharField(max_length=512, blank=True, null=True) file_exists = models.BooleanField(blank=True, null=True) - replay_gain = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True) - owner = models.ForeignKey('User', models.DO_NOTHING, blank=True, null=True) + replay_gain = models.DecimalField( + max_digits=8, decimal_places=2, blank=True, null=True + ) + owner = models.ForeignKey("User", models.DO_NOTHING, blank=True, null=True) cuein = models.DurationField(blank=True, null=True) cueout = models.DurationField(blank=True, null=True) silan_check = models.BooleanField(blank=True, null=True) @@ -77,10 +88,10 @@ class File(models.Model): class Meta: managed = False - db_table = 'cc_files' + db_table = "cc_files" permissions = [ - ('change_own_file', 'Change the files where they are the owner'), - ('delete_own_file', 'Delete the files where they are the owner'), + ("change_own_file", "Change the files where they are the owner"), + ("delete_own_file", "Delete the files where they are the owner"), ] @@ -92,15 +103,16 @@ class MusicDir(models.Model): class Meta: managed = False - db_table = 'cc_music_dirs' + db_table = "cc_music_dirs" class CloudFile(models.Model): storage_backend = models.CharField(max_length=512) resource_id = models.TextField() - filename = models.ForeignKey(File, models.DO_NOTHING, blank=True, null=True, - db_column='cc_file_id') + filename = models.ForeignKey( + File, models.DO_NOTHING, blank=True, null=True, db_column="cc_file_id" + ) class Meta: managed = False - db_table = 'cloud_file' + db_table = "cloud_file" diff --git a/api/libretimeapi/models/playlists.py b/api/libretimeapi/models/playlists.py index f3f955517..af2d6250b 100644 --- a/api/libretimeapi/models/playlists.py +++ b/api/libretimeapi/models/playlists.py @@ -8,7 +8,7 @@ class Playlist(models.Model): name = models.CharField(max_length=255) mtime = models.DateTimeField(blank=True, null=True) utime = models.DateTimeField(blank=True, null=True) - creator = models.ForeignKey('User', models.DO_NOTHING, blank=True, null=True) + creator = models.ForeignKey("User", models.DO_NOTHING, blank=True, null=True) description = models.CharField(max_length=512, blank=True, null=True) length = models.DurationField(blank=True, null=True) @@ -17,7 +17,7 @@ class Playlist(models.Model): class Meta: managed = False - db_table = 'cc_playlist' + db_table = "cc_playlist" class PlaylistContent(models.Model): @@ -39,4 +39,4 @@ class PlaylistContent(models.Model): class Meta: managed = False - db_table = 'cc_playlistcontents' + db_table = "cc_playlistcontents" diff --git a/api/libretimeapi/models/playout.py b/api/libretimeapi/models/playout.py index 808cc5d08..1e6c150e9 100644 --- a/api/libretimeapi/models/playout.py +++ b/api/libretimeapi/models/playout.py @@ -4,13 +4,13 @@ from .files import File class ListenerCount(models.Model): - timestamp = models.ForeignKey('Timestamp', models.DO_NOTHING) - mount_name = models.ForeignKey('MountName', models.DO_NOTHING) + timestamp = models.ForeignKey("Timestamp", models.DO_NOTHING) + mount_name = models.ForeignKey("MountName", models.DO_NOTHING) listener_count = models.IntegerField() class Meta: managed = False - db_table = 'cc_listener_count' + db_table = "cc_listener_count" class LiveLog(models.Model): @@ -20,18 +20,20 @@ class LiveLog(models.Model): class Meta: managed = False - db_table = 'cc_live_log' + db_table = "cc_live_log" class PlayoutHistory(models.Model): file = models.ForeignKey(File, models.DO_NOTHING, blank=True, null=True) starts = models.DateTimeField() ends = models.DateTimeField(blank=True, null=True) - instance = models.ForeignKey('ShowInstance', models.DO_NOTHING, blank=True, null=True) + instance = models.ForeignKey( + "ShowInstance", models.DO_NOTHING, blank=True, null=True + ) class Meta: managed = False - db_table = 'cc_playout_history' + db_table = "cc_playout_history" class PlayoutHistoryMetadata(models.Model): @@ -41,7 +43,7 @@ class PlayoutHistoryMetadata(models.Model): class Meta: managed = False - db_table = 'cc_playout_history_metadata' + db_table = "cc_playout_history_metadata" class PlayoutHistoryTemplate(models.Model): @@ -50,7 +52,7 @@ class PlayoutHistoryTemplate(models.Model): class Meta: managed = False - db_table = 'cc_playout_history_template' + db_table = "cc_playout_history_template" class PlayoutHistoryTemplateField(models.Model): @@ -63,7 +65,7 @@ class PlayoutHistoryTemplateField(models.Model): class Meta: managed = False - db_table = 'cc_playout_history_template_field' + db_table = "cc_playout_history_template_field" class Timestamp(models.Model): @@ -71,4 +73,4 @@ class Timestamp(models.Model): class Meta: managed = False - db_table = 'cc_timestamp' + db_table = "cc_timestamp" diff --git a/api/libretimeapi/models/podcasts.py b/api/libretimeapi/models/podcasts.py index 2e7ba817a..2d55c126a 100644 --- a/api/libretimeapi/models/podcasts.py +++ b/api/libretimeapi/models/podcasts.py @@ -8,14 +8,14 @@ class ImportedPodcast(models.Model): auto_ingest = models.BooleanField() auto_ingest_timestamp = models.DateTimeField(blank=True, null=True) album_override = models.BooleanField() - podcast = models.ForeignKey('Podcast', models.DO_NOTHING) + podcast = models.ForeignKey("Podcast", models.DO_NOTHING) def get_owner(self): return self.podcast.owner class Meta: managed = False - db_table = 'imported_podcast' + db_table = "imported_podcast" class Podcast(models.Model): @@ -32,17 +32,19 @@ class Podcast(models.Model): itunes_subtitle = models.CharField(max_length=4096, blank=True, null=True) itunes_category = models.CharField(max_length=4096, blank=True, null=True) itunes_explicit = models.CharField(max_length=4096, blank=True, null=True) - owner = models.ForeignKey(User, models.DO_NOTHING, db_column='owner', blank=True, null=True) + owner = models.ForeignKey( + User, models.DO_NOTHING, db_column="owner", blank=True, null=True + ) def get_owner(self): return self.owner class Meta: managed = False - db_table = 'podcast' + db_table = "podcast" permissions = [ - ('change_own_podcast', 'Change the podcasts where they are the owner'), - ('delete_own_podcast', 'Delete the podcasts where they are the owner'), + ("change_own_podcast", "Change the podcasts where they are the owner"), + ("delete_own_podcast", "Delete the podcasts where they are the owner"), ] @@ -60,10 +62,16 @@ class PodcastEpisode(models.Model): class Meta: managed = False - db_table = 'podcast_episodes' + db_table = "podcast_episodes" permissions = [ - ('change_own_podcastepisode', 'Change the episodes of podcasts where they are the owner'), - ('delete_own_podcastepisode', 'Delete the episodes of podcasts where they are the owner'), + ( + "change_own_podcastepisode", + "Change the episodes of podcasts where they are the owner", + ), + ( + "delete_own_podcastepisode", + "Delete the episodes of podcasts where they are the owner", + ), ] @@ -75,4 +83,4 @@ class StationPodcast(models.Model): class Meta: managed = False - db_table = 'station_podcast' + db_table = "station_podcast" diff --git a/api/libretimeapi/models/preferences.py b/api/libretimeapi/models/preferences.py index 2fd2e066b..341f9cf9c 100644 --- a/api/libretimeapi/models/preferences.py +++ b/api/libretimeapi/models/preferences.py @@ -3,14 +3,16 @@ from django.db import models class Preference(models.Model): - subjid = models.ForeignKey('User', models.DO_NOTHING, db_column='subjid', blank=True, null=True) + subjid = models.ForeignKey( + "User", models.DO_NOTHING, db_column="subjid", blank=True, null=True + ) keystr = models.CharField(unique=True, max_length=255, blank=True, null=True) valstr = models.TextField(blank=True, null=True) class Meta: managed = False - db_table = 'cc_pref' - unique_together = (('subjid', 'keystr'),) + db_table = "cc_pref" + unique_together = (("subjid", "keystr"),) class MountName(models.Model): @@ -18,7 +20,7 @@ class MountName(models.Model): class Meta: managed = False - db_table = 'cc_mount_name' + db_table = "cc_mount_name" class StreamSetting(models.Model): @@ -28,4 +30,4 @@ class StreamSetting(models.Model): class Meta: managed = False - db_table = 'cc_stream_setting' + db_table = "cc_stream_setting" diff --git a/api/libretimeapi/models/schedule.py b/api/libretimeapi/models/schedule.py index 5f51a6344..c982622c0 100644 --- a/api/libretimeapi/models/schedule.py +++ b/api/libretimeapi/models/schedule.py @@ -7,14 +7,14 @@ class Schedule(models.Model): starts = models.DateTimeField() ends = models.DateTimeField() file = models.ForeignKey(File, models.DO_NOTHING, blank=True, null=True) - stream = models.ForeignKey('Webstream', models.DO_NOTHING, blank=True, null=True) + stream = models.ForeignKey("Webstream", models.DO_NOTHING, blank=True, null=True) clip_length = models.DurationField(blank=True, null=True) fade_in = models.TimeField(blank=True, null=True) fade_out = models.TimeField(blank=True, null=True) cue_in = models.DurationField() cue_out = models.DurationField() media_item_played = models.BooleanField(blank=True, null=True) - instance = models.ForeignKey('ShowInstance', models.DO_NOTHING) + instance = models.ForeignKey("ShowInstance", models.DO_NOTHING) playout_status = models.SmallIntegerField() broadcasted = models.SmallIntegerField() position = models.IntegerField() @@ -24,8 +24,8 @@ class Schedule(models.Model): class Meta: managed = False - db_table = 'cc_schedule' + db_table = "cc_schedule" permissions = [ - ('change_own_schedule', 'Change the content on their shows'), - ('delete_own_schedule', 'Delete the content on their shows'), + ("change_own_schedule", "Change the content on their shows"), + ("delete_own_schedule", "Delete the content on their shows"), ] diff --git a/api/libretimeapi/models/services.py b/api/libretimeapi/models/services.py index 8106b5449..747013efd 100644 --- a/api/libretimeapi/models/services.py +++ b/api/libretimeapi/models/services.py @@ -8,5 +8,4 @@ class ServiceRegister(models.Model): class Meta: managed = False - db_table = 'cc_service_register' - + db_table = "cc_service_register" diff --git a/api/libretimeapi/models/shows.py b/api/libretimeapi/models/shows.py index 8acc09d6b..ab2ac40af 100644 --- a/api/libretimeapi/models/shows.py +++ b/api/libretimeapi/models/shows.py @@ -27,7 +27,7 @@ class Show(models.Model): class Meta: managed = False - db_table = 'cc_show' + db_table = "cc_show" class ShowDays(models.Model): @@ -47,16 +47,16 @@ class ShowDays(models.Model): class Meta: managed = False - db_table = 'cc_show_days' + db_table = "cc_show_days" class ShowHost(models.Model): show = models.ForeignKey(Show, models.DO_NOTHING) - subjs = models.ForeignKey('User', models.DO_NOTHING) + subjs = models.ForeignKey("User", models.DO_NOTHING) class Meta: managed = False - db_table = 'cc_show_hosts' + db_table = "cc_show_hosts" class ShowInstance(models.Model): @@ -66,7 +66,7 @@ class ShowInstance(models.Model): show = models.ForeignKey(Show, models.DO_NOTHING) record = models.SmallIntegerField(blank=True, null=True) rebroadcast = models.SmallIntegerField(blank=True, null=True) - instance = models.ForeignKey('self', models.DO_NOTHING, blank=True, null=True) + instance = models.ForeignKey("self", models.DO_NOTHING, blank=True, null=True) file = models.ForeignKey(File, models.DO_NOTHING, blank=True, null=True) time_filled = models.DurationField(blank=True, null=True) created = models.DateTimeField() @@ -79,7 +79,7 @@ class ShowInstance(models.Model): class Meta: managed = False - db_table = 'cc_show_instances' + db_table = "cc_show_instances" class ShowRebroadcast(models.Model): @@ -92,4 +92,4 @@ class ShowRebroadcast(models.Model): class Meta: managed = False - db_table = 'cc_show_rebroadcast' + db_table = "cc_show_rebroadcast" diff --git a/api/libretimeapi/models/smart_blocks.py b/api/libretimeapi/models/smart_blocks.py index 5b3c23f85..d4f11f676 100644 --- a/api/libretimeapi/models/smart_blocks.py +++ b/api/libretimeapi/models/smart_blocks.py @@ -6,7 +6,7 @@ class SmartBlock(models.Model): name = models.CharField(max_length=255) mtime = models.DateTimeField(blank=True, null=True) utime = models.DateTimeField(blank=True, null=True) - creator = models.ForeignKey('User', models.DO_NOTHING, blank=True, null=True) + creator = models.ForeignKey("User", models.DO_NOTHING, blank=True, null=True) description = models.CharField(max_length=512, blank=True, null=True) length = models.DurationField(blank=True, null=True) type = models.CharField(max_length=7, blank=True, null=True) @@ -16,16 +16,22 @@ class SmartBlock(models.Model): class Meta: managed = False - db_table = 'cc_block' + db_table = "cc_block" permissions = [ - ('change_own_smartblock', 'Change the smartblocks where they are the owner'), - ('delete_own_smartblock', 'Delete the smartblocks where they are the owner'), + ( + "change_own_smartblock", + "Change the smartblocks where they are the owner", + ), + ( + "delete_own_smartblock", + "Delete the smartblocks where they are the owner", + ), ] class SmartBlockContent(models.Model): block = models.ForeignKey(SmartBlock, models.DO_NOTHING, blank=True, null=True) - file = models.ForeignKey('File', models.DO_NOTHING, blank=True, null=True) + file = models.ForeignKey("File", models.DO_NOTHING, blank=True, null=True) position = models.IntegerField(blank=True, null=True) trackoffset = models.FloatField() cliplength = models.DurationField(blank=True, null=True) @@ -39,10 +45,16 @@ class SmartBlockContent(models.Model): class Meta: managed = False - db_table = 'cc_blockcontents' + db_table = "cc_blockcontents" permissions = [ - ('change_own_smartblockcontent', 'Change the content of smartblocks where they are the owner'), - ('delete_own_smartblockcontent', 'Delete the content of smartblocks where they are the owner'), + ( + "change_own_smartblockcontent", + "Change the content of smartblocks where they are the owner", + ), + ( + "delete_own_smartblockcontent", + "Delete the content of smartblocks where they are the owner", + ), ] @@ -59,9 +71,14 @@ class SmartBlockCriteria(models.Model): class Meta: managed = False - db_table = 'cc_blockcriteria' + db_table = "cc_blockcriteria" permissions = [ - ('change_own_smartblockcriteria', 'Change the criteria of smartblocks where they are the owner'), - ('delete_own_smartblockcriteria', 'Delete the criteria of smartblocks where they are the owner'), + ( + "change_own_smartblockcriteria", + "Change the criteria of smartblocks where they are the owner", + ), + ( + "delete_own_smartblockcriteria", + "Delete the criteria of smartblocks where they are the owner", + ), ] - diff --git a/api/libretimeapi/models/tracks.py b/api/libretimeapi/models/tracks.py index 192480bef..3a784dc8d 100644 --- a/api/libretimeapi/models/tracks.py +++ b/api/libretimeapi/models/tracks.py @@ -12,7 +12,8 @@ class ThirdPartyTrackReference(models.Model): class Meta: managed = False - db_table = 'third_party_track_references' + db_table = "third_party_track_references" + class TrackType(models.Model): code = models.CharField(max_length=16, unique=True) @@ -22,5 +23,4 @@ class TrackType(models.Model): class Meta: managed = False - db_table = 'cc_track_types' - + db_table = "cc_track_types" diff --git a/api/libretimeapi/models/user_constants.py b/api/libretimeapi/models/user_constants.py index 56afd552d..aa85c3904 100644 --- a/api/libretimeapi/models/user_constants.py +++ b/api/libretimeapi/models/user_constants.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- -GUEST = 'G' -DJ = 'H' -PROGRAM_MANAGER = 'P' -ADMIN = 'A' +GUEST = "G" +DJ = "H" +PROGRAM_MANAGER = "P" +ADMIN = "A" USER_TYPES = { - GUEST: 'Guest', - DJ: 'DJ', - PROGRAM_MANAGER: 'Program Manager', - ADMIN: 'Admin', + GUEST: "Guest", + DJ: "DJ", + PROGRAM_MANAGER: "Program Manager", + ADMIN: "Admin", } diff --git a/api/libretimeapi/models/webstreams.py b/api/libretimeapi/models/webstreams.py index ed940df2e..5251007c4 100644 --- a/api/libretimeapi/models/webstreams.py +++ b/api/libretimeapi/models/webstreams.py @@ -21,10 +21,10 @@ class Webstream(models.Model): class Meta: managed = False - db_table = 'cc_webstream' + db_table = "cc_webstream" permissions = [ - ('change_own_webstream', 'Change the webstreams where they are the owner'), - ('delete_own_webstream', 'Delete the webstreams where they are the owner'), + ("change_own_webstream", "Change the webstreams where they are the owner"), + ("delete_own_webstream", "Delete the webstreams where they are the owner"), ] @@ -38,4 +38,4 @@ class WebstreamMetadata(models.Model): class Meta: managed = False - db_table = 'cc_webstream_metadata' + db_table = "cc_webstream_metadata" diff --git a/api/libretimeapi/permission_constants.py b/api/libretimeapi/permission_constants.py index 323fdd873..8e77e07ab 100644 --- a/api/libretimeapi/permission_constants.py +++ b/api/libretimeapi/permission_constants.py @@ -5,98 +5,101 @@ from .models.user_constants import GUEST, DJ, PROGRAM_MANAGER, USER_TYPES logger = logging.getLogger(__name__) -GUEST_PERMISSIONS = ['view_schedule', - 'view_show', - 'view_showdays', - 'view_showhost', - 'view_showinstance', - 'view_showrebroadcast', - 'view_file', - 'view_podcast', - 'view_podcastepisode', - 'view_playlist', - 'view_playlistcontent', - 'view_smartblock', - 'view_smartblockcontent', - 'view_smartblockcriteria', - 'view_webstream', - 'view_apiroot', - ] -DJ_PERMISSIONS = GUEST_PERMISSIONS + ['add_file', - 'add_podcast', - 'add_podcastepisode', - 'add_playlist', - 'add_playlistcontent', - 'add_smartblock', - 'add_smartblockcontent', - 'add_smartblockcriteria', - 'add_webstream', - 'change_own_schedule', - 'change_own_file', - 'change_own_podcast', - 'change_own_podcastepisode', - 'change_own_playlist', - 'change_own_playlistcontent', - 'change_own_smartblock', - 'change_own_smartblockcontent', - 'change_own_smartblockcriteria', - 'change_own_webstream', - 'delete_own_schedule', - 'delete_own_file', - 'delete_own_podcast', - 'delete_own_podcastepisode', - 'delete_own_playlist', - 'delete_own_playlistcontent', - 'delete_own_smartblock', - 'delete_own_smartblockcontent', - 'delete_own_smartblockcriteria', - 'delete_own_webstream', - ] -PROGRAM_MANAGER_PERMISSIONS = GUEST_PERMISSIONS + ['add_show', - 'add_showdays', - 'add_showhost', - 'add_showinstance', - 'add_showrebroadcast', - 'add_file', - 'add_podcast', - 'add_podcastepisode', - 'add_playlist', - 'add_playlistcontent', - 'add_smartblock', - 'add_smartblockcontent', - 'add_smartblockcriteria', - 'add_webstream', - 'change_schedule', - 'change_show', - 'change_showdays', - 'change_showhost', - 'change_showinstance', - 'change_showrebroadcast', - 'change_file', - 'change_podcast', - 'change_podcastepisode', - 'change_playlist', - 'change_playlistcontent', - 'change_smartblock', - 'change_smartblockcontent', - 'change_smartblockcriteria', - 'change_webstream', - 'delete_schedule', - 'delete_show', - 'delete_showdays', - 'delete_showhost', - 'delete_showinstance', - 'delete_showrebroadcast', - 'delete_file', - 'delete_podcast', - 'delete_podcastepisode', - 'delete_playlist', - 'delete_playlistcontent', - 'delete_smartblock', - 'delete_smartblockcontent', - 'delete_smartblockcriteria', - 'delete_webstream', - ] +GUEST_PERMISSIONS = [ + "view_schedule", + "view_show", + "view_showdays", + "view_showhost", + "view_showinstance", + "view_showrebroadcast", + "view_file", + "view_podcast", + "view_podcastepisode", + "view_playlist", + "view_playlistcontent", + "view_smartblock", + "view_smartblockcontent", + "view_smartblockcriteria", + "view_webstream", + "view_apiroot", +] +DJ_PERMISSIONS = GUEST_PERMISSIONS + [ + "add_file", + "add_podcast", + "add_podcastepisode", + "add_playlist", + "add_playlistcontent", + "add_smartblock", + "add_smartblockcontent", + "add_smartblockcriteria", + "add_webstream", + "change_own_schedule", + "change_own_file", + "change_own_podcast", + "change_own_podcastepisode", + "change_own_playlist", + "change_own_playlistcontent", + "change_own_smartblock", + "change_own_smartblockcontent", + "change_own_smartblockcriteria", + "change_own_webstream", + "delete_own_schedule", + "delete_own_file", + "delete_own_podcast", + "delete_own_podcastepisode", + "delete_own_playlist", + "delete_own_playlistcontent", + "delete_own_smartblock", + "delete_own_smartblockcontent", + "delete_own_smartblockcriteria", + "delete_own_webstream", +] +PROGRAM_MANAGER_PERMISSIONS = GUEST_PERMISSIONS + [ + "add_show", + "add_showdays", + "add_showhost", + "add_showinstance", + "add_showrebroadcast", + "add_file", + "add_podcast", + "add_podcastepisode", + "add_playlist", + "add_playlistcontent", + "add_smartblock", + "add_smartblockcontent", + "add_smartblockcriteria", + "add_webstream", + "change_schedule", + "change_show", + "change_showdays", + "change_showhost", + "change_showinstance", + "change_showrebroadcast", + "change_file", + "change_podcast", + "change_podcastepisode", + "change_playlist", + "change_playlistcontent", + "change_smartblock", + "change_smartblockcontent", + "change_smartblockcriteria", + "change_webstream", + "delete_schedule", + "delete_show", + "delete_showdays", + "delete_showhost", + "delete_showinstance", + "delete_showrebroadcast", + "delete_file", + "delete_podcast", + "delete_podcastepisode", + "delete_playlist", + "delete_playlistcontent", + "delete_smartblock", + "delete_smartblockcontent", + "delete_smartblockcriteria", + "delete_webstream", +] GROUPS = { GUEST: GUEST_PERMISSIONS, diff --git a/api/libretimeapi/permissions.py b/api/libretimeapi/permissions.py index 943ec562d..93c663fe6 100644 --- a/api/libretimeapi/permissions.py +++ b/api/libretimeapi/permissions.py @@ -4,21 +4,22 @@ from django.conf import settings from .models.user_constants import DJ REQUEST_PERMISSION_TYPE_MAP = { - 'GET': 'view', - 'HEAD': 'view', - 'OPTIONS': 'view', - 'POST': 'change', - 'PUT': 'change', - 'DELETE': 'delete', - 'PATCH': 'change', + "GET": "view", + "HEAD": "view", + "OPTIONS": "view", + "POST": "change", + "PUT": "change", + "DELETE": "delete", + "PATCH": "change", } + def get_own_obj(request, view): user = request.user if user is None or user.type != DJ: - return '' - if request.method == 'GET': - return '' + return "" + if request.method == "GET": + return "" qs = view.queryset.all() try: model_owners = [] @@ -27,32 +28,34 @@ def get_own_obj(request, view): if owner not in model_owners: model_owners.append(owner) if len(model_owners) == 1 and user in model_owners: - return 'own_' + return "own_" except AttributeError: - return '' - return '' + return "" + return "" + def get_permission_for_view(request, view): try: permission_type = REQUEST_PERMISSION_TYPE_MAP[request.method] - if view.__class__.__name__ == 'APIRootView': - return '{}_apiroot'.format(permission_type) + if view.__class__.__name__ == "APIRootView": + return "{}_apiroot".format(permission_type) model = view.model_permission_name own_obj = get_own_obj(request, view) - return '{permission_type}_{own_obj}{model}'.format(permission_type=permission_type, - own_obj=own_obj, - model=model) + return "{permission_type}_{own_obj}{model}".format( + permission_type=permission_type, own_obj=own_obj, model=model + ) except AttributeError: return None -def check_authorization_header(request): - auth_header = request.META.get('Authorization') - if not auth_header: - auth_header = request.META.get('HTTP_AUTHORIZATION', '') - if auth_header.startswith('Api-Key'): +def check_authorization_header(request): + auth_header = request.META.get("Authorization") + if not auth_header: + auth_header = request.META.get("HTTP_AUTHORIZATION", "") + + if auth_header.startswith("Api-Key"): token = auth_header.split()[1] - if token == settings.CONFIG.get('general', 'api_key'): + if token == settings.CONFIG.get("general", "api_key"): return True return False @@ -63,6 +66,7 @@ class IsAdminOrOwnUser(BasePermission): Django's standard permission system. For details see https://www.django-rest-framework.org/api-guide/permissions/#custom-permissions """ + def has_permission(self, request, view): if request.user.is_superuser(): return True @@ -84,6 +88,7 @@ class IsSystemTokenOrUser(BasePermission): an API-Key header. All standard-users (i.e. not using the API-Key) have their permissions checked against Django's standard permission system. """ + def has_permission(self, request, view): if request.user and request.user.is_authenticated: perm = get_permission_for_view(request, view) @@ -91,7 +96,7 @@ class IsSystemTokenOrUser(BasePermission): # model. This use-case allows users to view the base of the API # explorer. Their assigned group permissions determine further access # into the explorer. - if perm == 'view_apiroot': + if perm == "view_apiroot": return True return request.user.has_perm(perm) return check_authorization_header(request) diff --git a/api/libretimeapi/serializers.py b/api/libretimeapi/serializers.py index 2b9466125..2f69b62bb 100644 --- a/api/libretimeapi/serializers.py +++ b/api/libretimeapi/serializers.py @@ -3,264 +3,305 @@ from django.contrib.auth import get_user_model from rest_framework import serializers from .models import * + class UserSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = get_user_model() fields = [ - 'item_url', - 'username', - 'type', - 'first_name', - 'last_name', - 'lastfail', - 'skype_contact', - 'jabber_contact', - 'email', - 'cell_phone', - 'login_attempts', + "item_url", + "username", + "type", + "first_name", + "last_name", + "lastfail", + "skype_contact", + "jabber_contact", + "email", + "cell_phone", + "login_attempts", ] + class SmartBlockSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = SmartBlock - fields = '__all__' + fields = "__all__" + class SmartBlockContentSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = SmartBlockContent - fields = '__all__' + fields = "__all__" + class SmartBlockCriteriaSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = SmartBlockCriteria - fields = '__all__' + fields = "__all__" + class CountrySerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Country - fields = '__all__' + fields = "__all__" + class FileSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = File - fields = '__all__' + fields = "__all__" + class ListenerCountSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = ListenerCount - fields = '__all__' + fields = "__all__" + class LiveLogSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = LiveLog - fields = '__all__' + fields = "__all__" + class LoginAttemptSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = LoginAttempt - fields = '__all__' + fields = "__all__" + class MountNameSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = MountName - fields = '__all__' + fields = "__all__" + class MusicDirSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = MusicDir - fields = '__all__' + fields = "__all__" + class PlaylistSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Playlist - fields = '__all__' + fields = "__all__" + class PlaylistContentSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = PlaylistContent - fields = '__all__' + fields = "__all__" + class PlayoutHistorySerializer(serializers.HyperlinkedModelSerializer): class Meta: model = PlayoutHistory - fields = '__all__' + fields = "__all__" + class PlayoutHistoryMetadataSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = PlayoutHistoryMetadata - fields = '__all__' + fields = "__all__" + class PlayoutHistoryTemplateSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = PlayoutHistoryTemplate - fields = '__all__' + fields = "__all__" + class PlayoutHistoryTemplateFieldSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = PlayoutHistoryTemplateField - fields = '__all__' + fields = "__all__" + class PreferenceSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Preference - fields = '__all__' + fields = "__all__" + class ScheduleSerializer(serializers.HyperlinkedModelSerializer): - file_id = serializers.IntegerField(source='file.id', read_only=True) - stream_id = serializers.IntegerField(source='stream.id', read_only=True) - instance_id = serializers.IntegerField(source='instance.id', read_only=True) + file_id = serializers.IntegerField(source="file.id", read_only=True) + stream_id = serializers.IntegerField(source="stream.id", read_only=True) + instance_id = serializers.IntegerField(source="instance.id", read_only=True) + class Meta: model = Schedule fields = [ - 'item_url', - 'id', - 'starts', - 'ends', - 'clip_length', - 'fade_in', - 'fade_out', - 'cue_in', - 'cue_out', - 'media_item_played', - 'file', - 'file_id', - 'stream', - 'stream_id', - 'instance', - 'instance_id', + "item_url", + "id", + "starts", + "ends", + "clip_length", + "fade_in", + "fade_out", + "cue_in", + "cue_out", + "media_item_played", + "file", + "file_id", + "stream", + "stream_id", + "instance", + "instance_id", ] + class ServiceRegisterSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = ServiceRegister - fields = '__all__' + fields = "__all__" + class SessionSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Session - fields = '__all__' + fields = "__all__" + class ShowSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Show fields = [ - 'item_url', - 'id', - 'name', - 'url', - 'genre', - 'description', - 'color', - 'background_color', - 'linked', - 'is_linkable', - 'image_path', - 'has_autoplaylist', - 'autoplaylist_repeat', - 'autoplaylist', + "item_url", + "id", + "name", + "url", + "genre", + "description", + "color", + "background_color", + "linked", + "is_linkable", + "image_path", + "has_autoplaylist", + "autoplaylist_repeat", + "autoplaylist", ] + class ShowDaysSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = ShowDays - fields = '__all__' + fields = "__all__" + class ShowHostSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = ShowHost - fields = '__all__' + fields = "__all__" + class ShowInstanceSerializer(serializers.HyperlinkedModelSerializer): - show_id = serializers.IntegerField(source='show.id', read_only=True) - file_id = serializers.IntegerField(source='file.id', read_only=True) + show_id = serializers.IntegerField(source="show.id", read_only=True) + file_id = serializers.IntegerField(source="file.id", read_only=True) + class Meta: model = ShowInstance fields = [ - 'item_url', - 'id', - 'description', - 'starts', - 'ends', - 'record', - 'rebroadcast', - 'time_filled', - 'created', - 'last_scheduled', - 'modified_instance', - 'autoplaylist_built', - 'show', - 'show_id', - 'instance', - 'file', - 'file_id', + "item_url", + "id", + "description", + "starts", + "ends", + "record", + "rebroadcast", + "time_filled", + "created", + "last_scheduled", + "modified_instance", + "autoplaylist_built", + "show", + "show_id", + "instance", + "file", + "file_id", ] + class ShowRebroadcastSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = ShowRebroadcast - fields = '__all__' + fields = "__all__" + class StreamSettingSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = StreamSetting - fields = '__all__' + fields = "__all__" + class UserTokenSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = UserToken - fields = '__all__' + fields = "__all__" + class TimestampSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Timestamp - fields = '__all__' + fields = "__all__" + class WebstreamSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Webstream - fields = '__all__' + fields = "__all__" + class WebstreamMetadataSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = WebstreamMetadata - fields = '__all__' + fields = "__all__" + class CeleryTaskSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = CeleryTask - fields = '__all__' + fields = "__all__" + class CloudFileSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = CloudFile - fields = '__all__' + fields = "__all__" + class ImportedPodcastSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = ImportedPodcast - fields = '__all__' + fields = "__all__" + class PodcastSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Podcast - fields = '__all__' + fields = "__all__" + class PodcastEpisodeSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = PodcastEpisode - fields = '__all__' + fields = "__all__" + class StationPodcastSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = StationPodcast - fields = '__all__' + fields = "__all__" + class ThirdPartyTrackReferenceSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = ThirdPartyTrackReference - fields = '__all__' + fields = "__all__" + class TrackTypeSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = TrackType - fields = '__all__' + fields = "__all__" diff --git a/api/libretimeapi/settings.py b/api/libretimeapi/settings.py index 807d97187..d8dde05db 100644 --- a/api/libretimeapi/settings.py +++ b/api/libretimeapi/settings.py @@ -3,10 +3,11 @@ import configparser import os from .utils import read_config_file, get_random_string -LIBRETIME_CONF_DIR = os.getenv('LIBRETIME_CONF_DIR', '/etc/airtime') -DEFAULT_CONFIG_PATH = os.getenv('LIBRETIME_CONF_FILE', - os.path.join(LIBRETIME_CONF_DIR, 'airtime.conf')) -API_VERSION = '2.0.0' +LIBRETIME_CONF_DIR = os.getenv("LIBRETIME_CONF_DIR", "/etc/airtime") +DEFAULT_CONFIG_PATH = os.getenv( + "LIBRETIME_CONF_FILE", os.path.join(LIBRETIME_CONF_DIR, "airtime.conf") +) +API_VERSION = "2.0.0" try: CONFIG = read_config_file(DEFAULT_CONFIG_PATH) @@ -18,70 +19,70 @@ except IOError: # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! -SECRET_KEY = get_random_string(CONFIG.get('general', 'api_key', fallback='')) +SECRET_KEY = get_random_string(CONFIG.get("general", "api_key", fallback="")) # SECURITY WARNING: don't run with debug turned on in production! -DEBUG = os.getenv('LIBRETIME_DEBUG', False) +DEBUG = os.getenv("LIBRETIME_DEBUG", False) -ALLOWED_HOSTS = ['*'] +ALLOWED_HOSTS = ["*"] # Application definition INSTALLED_APPS = [ - 'libretimeapi.apps.LibreTimeAPIConfig', - 'django.contrib.admin', - 'django.contrib.auth', - 'django.contrib.contenttypes', - 'django.contrib.sessions', - 'django.contrib.messages', - 'django.contrib.staticfiles', - 'rest_framework', - 'url_filter', + "libretimeapi.apps.LibreTimeAPIConfig", + "django.contrib.admin", + "django.contrib.auth", + "django.contrib.contenttypes", + "django.contrib.sessions", + "django.contrib.messages", + "django.contrib.staticfiles", + "rest_framework", + "url_filter", ] MIDDLEWARE = [ - 'django.middleware.security.SecurityMiddleware', - 'django.contrib.sessions.middleware.SessionMiddleware', - 'django.middleware.common.CommonMiddleware', - 'django.middleware.csrf.CsrfViewMiddleware', - 'django.contrib.auth.middleware.AuthenticationMiddleware', - 'django.contrib.messages.middleware.MessageMiddleware', - 'django.middleware.clickjacking.XFrameOptionsMiddleware', + "django.middleware.security.SecurityMiddleware", + "django.contrib.sessions.middleware.SessionMiddleware", + "django.middleware.common.CommonMiddleware", + "django.middleware.csrf.CsrfViewMiddleware", + "django.contrib.auth.middleware.AuthenticationMiddleware", + "django.contrib.messages.middleware.MessageMiddleware", + "django.middleware.clickjacking.XFrameOptionsMiddleware", ] -ROOT_URLCONF = 'libretimeapi.urls' +ROOT_URLCONF = "libretimeapi.urls" TEMPLATES = [ { - 'BACKEND': 'django.template.backends.django.DjangoTemplates', - 'DIRS': [], - 'APP_DIRS': True, - 'OPTIONS': { - 'context_processors': [ - 'django.template.context_processors.debug', - 'django.template.context_processors.request', - 'django.contrib.auth.context_processors.auth', - 'django.contrib.messages.context_processors.messages', + "BACKEND": "django.template.backends.django.DjangoTemplates", + "DIRS": [], + "APP_DIRS": True, + "OPTIONS": { + "context_processors": [ + "django.template.context_processors.debug", + "django.template.context_processors.request", + "django.contrib.auth.context_processors.auth", + "django.contrib.messages.context_processors.messages", ], }, }, ] -WSGI_APPLICATION = 'libretimeapi.wsgi.application' +WSGI_APPLICATION = "libretimeapi.wsgi.application" # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.postgresql', - 'NAME': CONFIG.get('database', 'dbname', fallback=''), - 'USER': CONFIG.get('database', 'dbuser', fallback=''), - 'PASSWORD': CONFIG.get('database', 'dbpass', fallback=''), - 'HOST': CONFIG.get('database', 'host', fallback=''), - 'PORT': '5432', + "default": { + "ENGINE": "django.db.backends.postgresql", + "NAME": CONFIG.get("database", "dbname", fallback=""), + "USER": CONFIG.get("database", "dbuser", fallback=""), + "PASSWORD": CONFIG.get("database", "dbpass", fallback=""), + "HOST": CONFIG.get("database", "host", fallback=""), + "PORT": "5432", } } @@ -91,40 +92,40 @@ DATABASES = { AUTH_PASSWORD_VALIDATORS = [ { - 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', + "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", }, { - 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', + "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", }, { - 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', + "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", }, { - 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', + "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", }, ] REST_FRAMEWORK = { - 'DEFAULT_AUTHENTICATION_CLASSES': ( - 'rest_framework.authentication.SessionAuthentication', - 'rest_framework.authentication.BasicAuthentication', + "DEFAULT_AUTHENTICATION_CLASSES": ( + "rest_framework.authentication.SessionAuthentication", + "rest_framework.authentication.BasicAuthentication", ), - 'DEFAULT_PERMISSION_CLASSES': [ - 'libretimeapi.permissions.IsSystemTokenOrUser', + "DEFAULT_PERMISSION_CLASSES": [ + "libretimeapi.permissions.IsSystemTokenOrUser", ], - 'DEFAULT_FILTER_BACKENDS': [ - 'url_filter.integrations.drf.DjangoFilterBackend', + "DEFAULT_FILTER_BACKENDS": [ + "url_filter.integrations.drf.DjangoFilterBackend", ], - 'URL_FIELD_NAME': 'item_url', + "URL_FIELD_NAME": "item_url", } # Internationalization # https://docs.djangoproject.com/en/3.0/topics/i18n/ -LANGUAGE_CODE = 'en-us' +LANGUAGE_CODE = "en-us" -TIME_ZONE = 'UTC' +TIME_ZONE = "UTC" USE_I18N = True @@ -136,50 +137,53 @@ USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ -STATIC_URL = '/api/static/' +STATIC_URL = "/api/static/" if not DEBUG: - STATIC_ROOT = os.getenv('LIBRETIME_STATIC_ROOT', '/usr/share/airtime/api') + STATIC_ROOT = os.getenv("LIBRETIME_STATIC_ROOT", "/usr/share/airtime/api") -AUTH_USER_MODEL = 'libretimeapi.User' +AUTH_USER_MODEL = "libretimeapi.User" -TEST_RUNNER = 'libretimeapi.tests.runners.ManagedModelTestRunner' +TEST_RUNNER = "libretimeapi.tests.runners.ManagedModelTestRunner" LOGGING = { - 'version': 1, - 'disable_existing_loggers': False, - 'formatters': { - 'simple': { - 'format': '{levelname} {message}', - 'style': '{', + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "simple": { + "format": "{levelname} {message}", + "style": "{", }, - 'verbose': { - 'format': '{asctime} {module} {levelname} {message}', - 'style': '{', - } - }, - 'handlers': { - 'file': { - 'level': 'DEBUG', - 'class': 'logging.FileHandler', - 'filename': os.path.join(CONFIG.get('pypo', 'log_base_dir', fallback='.').replace('\'',''), 'api.log'), - 'formatter': 'verbose', - }, - 'console': { - 'level': 'INFO', - 'class': 'logging.StreamHandler', - 'formatter': 'simple', + "verbose": { + "format": "{asctime} {module} {levelname} {message}", + "style": "{", }, }, - 'loggers': { - 'django': { - 'handlers': ['file', 'console'], - 'level': 'INFO', - 'propogate': True, + "handlers": { + "file": { + "level": "DEBUG", + "class": "logging.FileHandler", + "filename": os.path.join( + CONFIG.get("pypo", "log_base_dir", fallback=".").replace("'", ""), + "api.log", + ), + "formatter": "verbose", }, - 'libretimeapi': { - 'handlers': ['file', 'console'], - 'level': 'INFO', - 'propogate': True, + "console": { + "level": "INFO", + "class": "logging.StreamHandler", + "formatter": "simple", + }, + }, + "loggers": { + "django": { + "handlers": ["file", "console"], + "level": "INFO", + "propogate": True, + }, + "libretimeapi": { + "handlers": ["file", "console"], + "level": "INFO", + "propogate": True, }, }, } diff --git a/api/libretimeapi/tests/runners.py b/api/libretimeapi/tests/runners.py index 888e59c79..973910759 100644 --- a/api/libretimeapi/tests/runners.py +++ b/api/libretimeapi/tests/runners.py @@ -8,18 +8,17 @@ class ManagedModelTestRunner(DiscoverRunner): project managed for the duration of the test run, so that one doesn't need to execute the SQL manually to create them. """ + def setup_test_environment(self, *args, **kwargs): from django.apps import apps - self.unmanaged_models = [m for m in apps.get_models() - if not m._meta.managed] + + self.unmanaged_models = [m for m in apps.get_models() if not m._meta.managed] for m in self.unmanaged_models: m._meta.managed = True - super(ManagedModelTestRunner, self).setup_test_environment(*args, - **kwargs) + super(ManagedModelTestRunner, self).setup_test_environment(*args, **kwargs) def teardown_test_environment(self, *args, **kwargs): - super(ManagedModelTestRunner, self).teardown_test_environment(*args, - **kwargs) + super(ManagedModelTestRunner, self).teardown_test_environment(*args, **kwargs) # reset unmanaged models for m in self.unmanaged_models: m._meta.managed = False diff --git a/api/libretimeapi/tests/test_models.py b/api/libretimeapi/tests/test_models.py index 89e0368f7..5581550cb 100644 --- a/api/libretimeapi/tests/test_models.py +++ b/api/libretimeapi/tests/test_models.py @@ -9,33 +9,40 @@ from libretimeapi.permission_constants import GROUPS class TestUserManager(APITestCase): def test_create_user(self): - user = User.objects.create_user('test', - email='test@example.com', - password='test', - type=DJ, - first_name='test', - last_name='user') + user = User.objects.create_user( + "test", + email="test@example.com", + password="test", + type=DJ, + first_name="test", + last_name="user", + ) db_user = User.objects.get(pk=user.pk) self.assertEqual(db_user.username, user.username) def test_create_superuser(self): - user = User.objects.create_superuser('test', - email='test@example.com', - password='test', - first_name='test', - last_name='user') + user = User.objects.create_superuser( + "test", + email="test@example.com", + password="test", + first_name="test", + last_name="user", + ) db_user = User.objects.get(pk=user.pk) self.assertEqual(db_user.username, user.username) + class TestUser(APITestCase): def test_guest_get_group_perms(self): - user = User.objects.create_user('test', - email='test@example.com', - password='test', - type=GUEST, - first_name='test', - last_name='user') + user = User.objects.create_user( + "test", + email="test@example.com", + password="test", + type=GUEST, + first_name="test", + last_name="user", + ) permissions = user.get_group_permissions() # APIRoot permission hardcoded in the check as it isn't a Permission object - str_perms = [p.codename for p in permissions] + ['view_apiroot'] + str_perms = [p.codename for p in permissions] + ["view_apiroot"] self.assertCountEqual(str_perms, GROUPS[GUEST]) diff --git a/api/libretimeapi/tests/test_permissions.py b/api/libretimeapi/tests/test_permissions.py index 4e557fcdd..31f98105e 100644 --- a/api/libretimeapi/tests/test_permissions.py +++ b/api/libretimeapi/tests/test_permissions.py @@ -6,7 +6,11 @@ from django.conf import settings from rest_framework.test import APITestCase, APIRequestFactory from model_bakery import baker from libretimeapi.permissions import IsSystemTokenOrUser -from libretimeapi.permission_constants import GUEST_PERMISSIONS, DJ_PERMISSIONS, PROGRAM_MANAGER_PERMISSIONS +from libretimeapi.permission_constants import ( + GUEST_PERMISSIONS, + DJ_PERMISSIONS, + PROGRAM_MANAGER_PERMISSIONS, +) from libretimeapi.models.user_constants import GUEST, DJ, PROGRAM_MANAGER, ADMIN @@ -16,54 +20,56 @@ class TestIsSystemTokenOrUser(APITestCase): cls.path = "/api/v2/files/" def test_unauthorized(self): - response = self.client.get(self.path.format('files')) + response = self.client.get(self.path.format("files")) self.assertEqual(response.status_code, 403) def test_token_incorrect(self): - token = 'doesnotexist' + token = "doesnotexist" request = APIRequestFactory().get(self.path) request.user = AnonymousUser() - request.META['Authorization'] = 'Api-Key {token}'.format(token=token) + request.META["Authorization"] = "Api-Key {token}".format(token=token) allowed = IsSystemTokenOrUser().has_permission(request, None) self.assertFalse(allowed) def test_token_correct(self): - token = settings.CONFIG.get('general', 'api_key') + token = settings.CONFIG.get("general", "api_key") request = APIRequestFactory().get(self.path) request.user = AnonymousUser() - request.META['Authorization'] = 'Api-Key {token}'.format(token=token) + request.META["Authorization"] = "Api-Key {token}".format(token=token) allowed = IsSystemTokenOrUser().has_permission(request, None) self.assertTrue(allowed) class TestPermissions(APITestCase): URLS = [ - 'schedule', - 'shows', - 'show-days', - 'show-hosts', - 'show-instances', - 'show-rebroadcasts', - 'files', - 'playlists', - 'playlist-contents', - 'smart-blocks', - 'smart-block-contents', - 'smart-block-criteria', - 'webstreams', + "schedule", + "shows", + "show-days", + "show-hosts", + "show-instances", + "show-rebroadcasts", + "files", + "playlists", + "playlist-contents", + "smart-blocks", + "smart-block-contents", + "smart-block-criteria", + "webstreams", ] def logged_in_test_model(self, model, name, user_type, fn): path = self.path.format(model) user_created = get_user_model().objects.filter(username=name) if not user_created: - user = get_user_model().objects.create_user(name, - email='test@example.com', - password='test', - type=user_type, - first_name='test', - last_name='user') - self.client.login(username=name, password='test') + user = get_user_model().objects.create_user( + name, + email="test@example.com", + password="test", + type=user_type, + first_name="test", + last_name="user", + ) + self.client.login(username=name, password="test") return fn(path) @classmethod @@ -72,49 +78,57 @@ class TestPermissions(APITestCase): def test_guest_permissions_success(self): for model in self.URLS: - response = self.logged_in_test_model(model, 'guest', GUEST, self.client.get) - self.assertEqual(response.status_code, 200, - msg='Invalid for model {}'.format(model)) + response = self.logged_in_test_model(model, "guest", GUEST, self.client.get) + self.assertEqual( + response.status_code, 200, msg="Invalid for model {}".format(model) + ) def test_guest_permissions_failure(self): for model in self.URLS: - response = self.logged_in_test_model(model, 'guest', GUEST, self.client.post) - self.assertEqual(response.status_code, 403, - msg='Invalid for model {}'.format(model)) - response = self.logged_in_test_model('users', 'guest', GUEST, self.client.get) - self.assertEqual(response.status_code, 403, msg='Invalid for model users') + response = self.logged_in_test_model( + model, "guest", GUEST, self.client.post + ) + self.assertEqual( + response.status_code, 403, msg="Invalid for model {}".format(model) + ) + response = self.logged_in_test_model("users", "guest", GUEST, self.client.get) + self.assertEqual(response.status_code, 403, msg="Invalid for model users") def test_dj_get_permissions(self): for model in self.URLS: - response = self.logged_in_test_model(model, 'dj', DJ, self.client.get) - self.assertEqual(response.status_code, 200, - msg='Invalid for model {}'.format(model)) + response = self.logged_in_test_model(model, "dj", DJ, self.client.get) + self.assertEqual( + response.status_code, 200, msg="Invalid for model {}".format(model) + ) def test_dj_post_permissions(self): - user = get_user_model().objects.create_user('test-dj', - email='test@example.com', - password='test', - type=DJ, - first_name='test', - last_name='user') - f = baker.make('libretimeapi.File', - owner=user) - model = 'files/{}'.format(f.id) + user = get_user_model().objects.create_user( + "test-dj", + email="test@example.com", + password="test", + type=DJ, + first_name="test", + last_name="user", + ) + f = baker.make("libretimeapi.File", owner=user) + model = "files/{}".format(f.id) path = self.path.format(model) - self.client.login(username='test-dj', password='test') - response = self.client.patch(path, {'name': 'newFilename'}) + self.client.login(username="test-dj", password="test") + response = self.client.patch(path, {"name": "newFilename"}) self.assertEqual(response.status_code, 200) def test_dj_post_permissions_failure(self): - user = get_user_model().objects.create_user('test-dj', - email='test@example.com', - password='test', - type=DJ, - first_name='test', - last_name='user') - f = baker.make('libretimeapi.File') - model = 'files/{}'.format(f.id) + user = get_user_model().objects.create_user( + "test-dj", + email="test@example.com", + password="test", + type=DJ, + first_name="test", + last_name="user", + ) + f = baker.make("libretimeapi.File") + model = "files/{}".format(f.id) path = self.path.format(model) - self.client.login(username='test-dj', password='test') - response = self.client.patch(path, {'name': 'newFilename'}) + self.client.login(username="test-dj", password="test") + response = self.client.patch(path, {"name": "newFilename"}) self.assertEqual(response.status_code, 403) diff --git a/api/libretimeapi/tests/test_views.py b/api/libretimeapi/tests/test_views.py index 0a16a20f0..0ee41b160 100644 --- a/api/libretimeapi/tests/test_views.py +++ b/api/libretimeapi/tests/test_views.py @@ -11,29 +11,32 @@ class TestFileViewSet(APITestCase): @classmethod def setUpTestData(cls): cls.path = "/api/v2/files/{id}/download/" - cls.token = settings.CONFIG.get('general', 'api_key') + cls.token = settings.CONFIG.get("general", "api_key") def test_invalid(self): - path = self.path.format(id='a') - self.client.credentials(HTTP_AUTHORIZATION='Api-Key {}'.format(self.token)) + path = self.path.format(id="a") + self.client.credentials(HTTP_AUTHORIZATION="Api-Key {}".format(self.token)) response = self.client.get(path) self.assertEqual(response.status_code, 400) def test_does_not_exist(self): - path = self.path.format(id='1') - self.client.credentials(HTTP_AUTHORIZATION='Api-Key {}'.format(self.token)) + path = self.path.format(id="1") + self.client.credentials(HTTP_AUTHORIZATION="Api-Key {}".format(self.token)) response = self.client.get(path) self.assertEqual(response.status_code, 404) def test_exists(self): - music_dir = baker.make('libretimeapi.MusicDir', - directory=os.path.join(os.path.dirname(__file__), - 'resources')) - f = baker.make('libretimeapi.File', - directory=music_dir, - mime='audio/mp3', - filepath='song.mp3') + music_dir = baker.make( + "libretimeapi.MusicDir", + directory=os.path.join(os.path.dirname(__file__), "resources"), + ) + f = baker.make( + "libretimeapi.File", + directory=music_dir, + mime="audio/mp3", + filepath="song.mp3", + ) path = self.path.format(id=str(f.pk)) - self.client.credentials(HTTP_AUTHORIZATION='Api-Key {}'.format(self.token)) + self.client.credentials(HTTP_AUTHORIZATION="Api-Key {}".format(self.token)) response = self.client.get(path) self.assertEqual(response.status_code, 200) diff --git a/api/libretimeapi/urls.py b/api/libretimeapi/urls.py index 8dfca3cd3..365c17c81 100644 --- a/api/libretimeapi/urls.py +++ b/api/libretimeapi/urls.py @@ -5,48 +5,48 @@ from rest_framework import routers from .views import * router = routers.DefaultRouter() -router.register('smart-blocks', SmartBlockViewSet) -router.register('smart-block-contents', SmartBlockContentViewSet) -router.register('smart-block-criteria', SmartBlockCriteriaViewSet) -router.register('countries', CountryViewSet) -router.register('files', FileViewSet) -router.register('listener-counts', ListenerCountViewSet) -router.register('live-logs', LiveLogViewSet) -router.register('login-attempts', LoginAttemptViewSet) -router.register('mount-names', MountNameViewSet) -router.register('music-dirs', MusicDirViewSet) -router.register('playlists', PlaylistViewSet) -router.register('playlist-contents', PlaylistContentViewSet) -router.register('playout-history', PlayoutHistoryViewSet) -router.register('playout-history-metadata', PlayoutHistoryMetadataViewSet) -router.register('playout-history-templates', PlayoutHistoryTemplateViewSet) -router.register('playout-history-template-fields', PlayoutHistoryTemplateFieldViewSet) -router.register('preferences', PreferenceViewSet) -router.register('schedule', ScheduleViewSet) -router.register('service-registers', ServiceRegisterViewSet) -router.register('sessions', SessionViewSet) -router.register('shows', ShowViewSet) -router.register('show-days', ShowDaysViewSet) -router.register('show-hosts', ShowHostViewSet) -router.register('show-instances', ShowInstanceViewSet) -router.register('show-rebroadcasts', ShowRebroadcastViewSet) -router.register('stream-settings', StreamSettingViewSet) -router.register('users', UserViewSet) -router.register('user-tokens', UserTokenViewSet) -router.register('timestamps', TimestampViewSet) -router.register('webstreams', WebstreamViewSet) -router.register('webstream-metadata', WebstreamMetadataViewSet) -router.register('celery-tasks', CeleryTaskViewSet) -router.register('cloud-files', CloudFileViewSet) -router.register('imported-podcasts', ImportedPodcastViewSet) -router.register('podcasts', PodcastViewSet) -router.register('podcast-episodes', PodcastEpisodeViewSet) -router.register('station-podcasts', StationPodcastViewSet) -router.register('third-party-track-references', ThirdPartyTrackReferenceViewSet) -router.register('track-types', TrackTypeViewSet) +router.register("smart-blocks", SmartBlockViewSet) +router.register("smart-block-contents", SmartBlockContentViewSet) +router.register("smart-block-criteria", SmartBlockCriteriaViewSet) +router.register("countries", CountryViewSet) +router.register("files", FileViewSet) +router.register("listener-counts", ListenerCountViewSet) +router.register("live-logs", LiveLogViewSet) +router.register("login-attempts", LoginAttemptViewSet) +router.register("mount-names", MountNameViewSet) +router.register("music-dirs", MusicDirViewSet) +router.register("playlists", PlaylistViewSet) +router.register("playlist-contents", PlaylistContentViewSet) +router.register("playout-history", PlayoutHistoryViewSet) +router.register("playout-history-metadata", PlayoutHistoryMetadataViewSet) +router.register("playout-history-templates", PlayoutHistoryTemplateViewSet) +router.register("playout-history-template-fields", PlayoutHistoryTemplateFieldViewSet) +router.register("preferences", PreferenceViewSet) +router.register("schedule", ScheduleViewSet) +router.register("service-registers", ServiceRegisterViewSet) +router.register("sessions", SessionViewSet) +router.register("shows", ShowViewSet) +router.register("show-days", ShowDaysViewSet) +router.register("show-hosts", ShowHostViewSet) +router.register("show-instances", ShowInstanceViewSet) +router.register("show-rebroadcasts", ShowRebroadcastViewSet) +router.register("stream-settings", StreamSettingViewSet) +router.register("users", UserViewSet) +router.register("user-tokens", UserTokenViewSet) +router.register("timestamps", TimestampViewSet) +router.register("webstreams", WebstreamViewSet) +router.register("webstream-metadata", WebstreamMetadataViewSet) +router.register("celery-tasks", CeleryTaskViewSet) +router.register("cloud-files", CloudFileViewSet) +router.register("imported-podcasts", ImportedPodcastViewSet) +router.register("podcasts", PodcastViewSet) +router.register("podcast-episodes", PodcastEpisodeViewSet) +router.register("station-podcasts", StationPodcastViewSet) +router.register("third-party-track-references", ThirdPartyTrackReferenceViewSet) +router.register("track-types", TrackTypeViewSet) urlpatterns = [ - path('api/v2/', include(router.urls)), - path('api/v2/version/', version), - path('api-auth/', include('rest_framework.urls', namespace='rest_framework')), + path("api/v2/", include(router.urls)), + path("api/v2/version/", version), + path("api-auth/", include("rest_framework.urls", namespace="rest_framework")), ] diff --git a/api/libretimeapi/utils.py b/api/libretimeapi/utils.py index f67af5f5c..dda02b550 100644 --- a/api/libretimeapi/utils.py +++ b/api/libretimeapi/utils.py @@ -4,23 +4,27 @@ import sys import string import random + def read_config_file(config_path): """Parse the application's config file located at config_path.""" config = configparser.ConfigParser() try: config.readfp(open(config_path)) except IOError as e: - print("Failed to open config file at {}: {}".format(config_path, e.strerror), - file=sys.stderr) + print( + "Failed to open config file at {}: {}".format(config_path, e.strerror), + file=sys.stderr, + ) raise e except Exception as e: print(e.strerror, file=sys.stderr) raise e return config + def get_random_string(seed): """Generates a random string based on the given seed""" choices = string.ascii_letters + string.digits + string.punctuation - seed = seed.encode('utf-8') + seed = seed.encode("utf-8") rand = random.Random(seed) return [rand.choice(choices) for i in range(16)] diff --git a/api/libretimeapi/views.py b/api/libretimeapi/views.py index cc499f059..15636635b 100644 --- a/api/libretimeapi/views.py +++ b/api/libretimeapi/views.py @@ -10,220 +10,261 @@ from rest_framework.response import Response from .serializers import * from .permissions import IsAdminOrOwnUser + class UserViewSet(viewsets.ModelViewSet): queryset = get_user_model().objects.all() serializer_class = UserSerializer permission_classes = [IsAdminOrOwnUser] - model_permission_name = 'user' + model_permission_name = "user" + class SmartBlockViewSet(viewsets.ModelViewSet): queryset = SmartBlock.objects.all() serializer_class = SmartBlockSerializer - model_permission_name = 'smartblock' + model_permission_name = "smartblock" + class SmartBlockContentViewSet(viewsets.ModelViewSet): queryset = SmartBlockContent.objects.all() serializer_class = SmartBlockContentSerializer - model_permission_name = 'smartblockcontent' + model_permission_name = "smartblockcontent" + class SmartBlockCriteriaViewSet(viewsets.ModelViewSet): queryset = SmartBlockCriteria.objects.all() serializer_class = SmartBlockCriteriaSerializer - model_permission_name = 'smartblockcriteria' + model_permission_name = "smartblockcriteria" + class CountryViewSet(viewsets.ModelViewSet): queryset = Country.objects.all() serializer_class = CountrySerializer - model_permission_name = 'country' + model_permission_name = "country" + class FileViewSet(viewsets.ModelViewSet): queryset = File.objects.all() serializer_class = FileSerializer - model_permission_name = 'file' + model_permission_name = "file" - @action(detail=True, methods=['GET']) + @action(detail=True, methods=["GET"]) def download(self, request, pk=None): if pk is None: - return Response('No file requested', status=status.HTTP_400_BAD_REQUEST) + return Response("No file requested", status=status.HTTP_400_BAD_REQUEST) try: pk = int(pk) except ValueError: - return Response('File ID should be an integer', - status=status.HTTP_400_BAD_REQUEST) + return Response( + "File ID should be an integer", status=status.HTTP_400_BAD_REQUEST + ) filename = get_object_or_404(File, pk=pk) directory = filename.directory path = os.path.join(directory.directory, filename.filepath) - response = FileResponse(open(path, 'rb'), content_type=filename.mime) + response = FileResponse(open(path, "rb"), content_type=filename.mime) return response + class ListenerCountViewSet(viewsets.ModelViewSet): queryset = ListenerCount.objects.all() serializer_class = ListenerCountSerializer - model_permission_name = 'listenercount' + model_permission_name = "listenercount" + class LiveLogViewSet(viewsets.ModelViewSet): queryset = LiveLog.objects.all() serializer_class = LiveLogSerializer - model_permission_name = 'livelog' + model_permission_name = "livelog" + class LoginAttemptViewSet(viewsets.ModelViewSet): queryset = LoginAttempt.objects.all() serializer_class = LoginAttemptSerializer - model_permission_name = 'loginattempt' + model_permission_name = "loginattempt" + class MountNameViewSet(viewsets.ModelViewSet): queryset = MountName.objects.all() serializer_class = MountNameSerializer - model_permission_name = 'mountname' + model_permission_name = "mountname" + class MusicDirViewSet(viewsets.ModelViewSet): queryset = MusicDir.objects.all() serializer_class = MusicDirSerializer - model_permission_name = 'musicdir' + model_permission_name = "musicdir" + class PlaylistViewSet(viewsets.ModelViewSet): queryset = Playlist.objects.all() serializer_class = PlaylistSerializer - model_permission_name = 'playlist' + model_permission_name = "playlist" + class PlaylistContentViewSet(viewsets.ModelViewSet): queryset = PlaylistContent.objects.all() serializer_class = PlaylistContentSerializer - model_permission_name = 'playlistcontent' + model_permission_name = "playlistcontent" + class PlayoutHistoryViewSet(viewsets.ModelViewSet): queryset = PlayoutHistory.objects.all() serializer_class = PlayoutHistorySerializer - model_permission_name = 'playouthistory' + model_permission_name = "playouthistory" + class PlayoutHistoryMetadataViewSet(viewsets.ModelViewSet): queryset = PlayoutHistoryMetadata.objects.all() serializer_class = PlayoutHistoryMetadataSerializer - model_permission_name = 'playouthistorymetadata' + model_permission_name = "playouthistorymetadata" + class PlayoutHistoryTemplateViewSet(viewsets.ModelViewSet): queryset = PlayoutHistoryTemplate.objects.all() serializer_class = PlayoutHistoryTemplateSerializer - model_permission_name = 'playouthistorytemplate' + model_permission_name = "playouthistorytemplate" + class PlayoutHistoryTemplateFieldViewSet(viewsets.ModelViewSet): queryset = PlayoutHistoryTemplateField.objects.all() serializer_class = PlayoutHistoryTemplateFieldSerializer - model_permission_name = 'playouthistorytemplatefield' + model_permission_name = "playouthistorytemplatefield" + class PreferenceViewSet(viewsets.ModelViewSet): queryset = Preference.objects.all() serializer_class = PreferenceSerializer - model_permission_name = 'perference' + model_permission_name = "perference" + class ScheduleViewSet(viewsets.ModelViewSet): queryset = Schedule.objects.all() serializer_class = ScheduleSerializer - filter_fields = ('starts', 'ends', 'playout_status', 'broadcasted') - model_permission_name = 'schedule' + filter_fields = ("starts", "ends", "playout_status", "broadcasted") + model_permission_name = "schedule" + class ServiceRegisterViewSet(viewsets.ModelViewSet): queryset = ServiceRegister.objects.all() serializer_class = ServiceRegisterSerializer - model_permission_name = 'serviceregister' + model_permission_name = "serviceregister" + class SessionViewSet(viewsets.ModelViewSet): queryset = Session.objects.all() serializer_class = SessionSerializer - model_permission_name = 'session' + model_permission_name = "session" + class ShowViewSet(viewsets.ModelViewSet): queryset = Show.objects.all() serializer_class = ShowSerializer - model_permission_name = 'show' + model_permission_name = "show" + class ShowDaysViewSet(viewsets.ModelViewSet): queryset = ShowDays.objects.all() serializer_class = ShowDaysSerializer - model_permission_name = 'showdays' + model_permission_name = "showdays" + class ShowHostViewSet(viewsets.ModelViewSet): queryset = ShowHost.objects.all() serializer_class = ShowHostSerializer - model_permission_name = 'showhost' + model_permission_name = "showhost" + class ShowInstanceViewSet(viewsets.ModelViewSet): queryset = ShowInstance.objects.all() serializer_class = ShowInstanceSerializer - model_permission_name = 'showinstance' + model_permission_name = "showinstance" + class ShowRebroadcastViewSet(viewsets.ModelViewSet): queryset = ShowRebroadcast.objects.all() serializer_class = ShowRebroadcastSerializer - model_permission_name = 'showrebroadcast' + model_permission_name = "showrebroadcast" + class StreamSettingViewSet(viewsets.ModelViewSet): queryset = StreamSetting.objects.all() serializer_class = StreamSettingSerializer - model_permission_name = 'streamsetting' + model_permission_name = "streamsetting" + class UserTokenViewSet(viewsets.ModelViewSet): queryset = UserToken.objects.all() serializer_class = UserTokenSerializer - model_permission_name = 'usertoken' + model_permission_name = "usertoken" + class TimestampViewSet(viewsets.ModelViewSet): queryset = Timestamp.objects.all() serializer_class = TimestampSerializer - model_permission_name = 'timestamp' + model_permission_name = "timestamp" + class WebstreamViewSet(viewsets.ModelViewSet): queryset = Webstream.objects.all() serializer_class = WebstreamSerializer - model_permission_name = 'webstream' + model_permission_name = "webstream" + class WebstreamMetadataViewSet(viewsets.ModelViewSet): queryset = WebstreamMetadata.objects.all() serializer_class = WebstreamMetadataSerializer - model_permission_name = 'webstreametadata' + model_permission_name = "webstreametadata" + class CeleryTaskViewSet(viewsets.ModelViewSet): queryset = CeleryTask.objects.all() serializer_class = CeleryTaskSerializer - model_permission_name = 'celerytask' + model_permission_name = "celerytask" + class CloudFileViewSet(viewsets.ModelViewSet): queryset = CloudFile.objects.all() serializer_class = CloudFileSerializer - model_permission_name = 'cloudfile' + model_permission_name = "cloudfile" + class ImportedPodcastViewSet(viewsets.ModelViewSet): queryset = ImportedPodcast.objects.all() serializer_class = ImportedPodcastSerializer - model_permission_name = 'importedpodcast' + model_permission_name = "importedpodcast" + class PodcastViewSet(viewsets.ModelViewSet): queryset = Podcast.objects.all() serializer_class = PodcastSerializer - model_permission_name = 'podcast' + model_permission_name = "podcast" + class PodcastEpisodeViewSet(viewsets.ModelViewSet): queryset = PodcastEpisode.objects.all() serializer_class = PodcastEpisodeSerializer - model_permission_name = 'podcastepisode' + model_permission_name = "podcastepisode" + class StationPodcastViewSet(viewsets.ModelViewSet): queryset = StationPodcast.objects.all() serializer_class = StationPodcastSerializer - model_permission_name = 'station' + model_permission_name = "station" + class ThirdPartyTrackReferenceViewSet(viewsets.ModelViewSet): queryset = ThirdPartyTrackReference.objects.all() serializer_class = ThirdPartyTrackReferenceSerializer - model_permission_name = 'thirdpartytrackreference' + model_permission_name = "thirdpartytrackreference" + class TrackTypeViewSet(viewsets.ModelViewSet): queryset = TrackType.objects.all() serializer_class = TrackTypeSerializer - model_permission_name = 'tracktype' + model_permission_name = "tracktype" -@api_view(['GET']) -@permission_classes((AllowAny, )) + +@api_view(["GET"]) +@permission_classes((AllowAny,)) def version(request, *args, **kwargs): - return Response({'api_version': settings.API_VERSION}) + return Response({"api_version": settings.API_VERSION}) diff --git a/api/libretimeapi/wsgi.py b/api/libretimeapi/wsgi.py index 11e2a4c75..b7b37f5e1 100644 --- a/api/libretimeapi/wsgi.py +++ b/api/libretimeapi/wsgi.py @@ -12,6 +12,6 @@ import os from django.core.wsgi import get_wsgi_application -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'libretimeapi.settings') +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "libretimeapi.settings") application = get_wsgi_application() diff --git a/api/setup.py b/api/setup.py index 533cb8b56..98d48f50f 100644 --- a/api/setup.py +++ b/api/setup.py @@ -8,26 +8,26 @@ print(script_path) os.chdir(script_path) setup( - name='libretime-api', - version='2.0.0a1', + name="libretime-api", + version="2.0.0a1", packages=find_packages(), include_package_data=True, - description='LibreTime API backend server', - url='https://github.com/LibreTime/libretime', - author='LibreTime Contributors', - scripts=['bin/libretime-api'], + description="LibreTime API backend server", + url="https://github.com/LibreTime/libretime", + author="LibreTime Contributors", + scripts=["bin/libretime-api"], install_requires=[ - 'coreapi', - 'Django~=3.0', - 'djangorestframework', - 'django-url-filter', - 'markdown', - 'model_bakery', - 'psycopg2', + "coreapi", + "Django~=3.0", + "djangorestframework", + "django-url-filter", + "markdown", + "model_bakery", + "psycopg2", ], project_urls={ - 'Bug Tracker': 'https://github.com/LibreTime/libretime/issues', - 'Documentation': 'https://libretime.org', - 'Source Code': 'https://github.com/LibreTime/libretime', + "Bug Tracker": "https://github.com/LibreTime/libretime/issues", + "Documentation": "https://libretime.org", + "Source Code": "https://github.com/LibreTime/libretime", }, ) diff --git a/dev_tools/compare_cc_files_to_fs.py b/dev_tools/compare_cc_files_to_fs.py index 80b727222..5b83d27df 100644 --- a/dev_tools/compare_cc_files_to_fs.py +++ b/dev_tools/compare_cc_files_to_fs.py @@ -16,8 +16,8 @@ similar code when it starts up (but then makes changes if something is different """ -class AirtimeMediaMonitorBootstrap(): - +class AirtimeMediaMonitorBootstrap: + """AirtimeMediaMonitorBootstrap constructor Keyword Arguments: @@ -25,8 +25,9 @@ class AirtimeMediaMonitorBootstrap(): pe -- reference to an instance of ProcessEvent api_clients -- reference of api_clients to communicate with airtime-server """ + def __init__(self): - config = ConfigObj('/etc/airtime/airtime.conf') + config = ConfigObj("/etc/airtime/airtime.conf") self.api_client = apc.api_client_factory(config) """ @@ -36,25 +37,26 @@ class AirtimeMediaMonitorBootstrap(): print 'Error configuring logging: ', e sys.exit(1) """ - + self.logger = logging.getLogger() self.logger.info("Adding %s on watch list...", "xxx") - + self.scan() - + """On bootup we want to scan all directories and look for files that weren't there or files that changed before media-monitor process went offline. """ + def scan(self): - directories = self.get_list_of_watched_dirs(); + directories = self.get_list_of_watched_dirs() self.logger.info("watched directories found: %s", directories) for id, dir in directories.iteritems(): self.logger.debug("%s, %s", id, dir) - #CHANGED!!! - #self.sync_database_to_filesystem(id, api_client.encode_to(dir, "utf-8")) + # CHANGED!!! + # self.sync_database_to_filesystem(id, api_client.encode_to(dir, "utf-8")) self.sync_database_to_filesystem(id, dir) """Gets a list of files that the Airtime database knows for a specific directory. @@ -62,6 +64,7 @@ class AirtimeMediaMonitorBootstrap(): get_list_of_watched_dirs function. dir_id -- row id of the directory in the cc_watched_dirs database table """ + def list_db_files(self, dir_id): return self.api_client.list_all_db_files(dir_id) @@ -69,23 +72,29 @@ class AirtimeMediaMonitorBootstrap(): returns the path and the database row id for this path for all watched directories. Also returns the Stor directory, which can be identified by its row id (always has value of "1") """ + def get_list_of_watched_dirs(self): json = self.api_client.list_all_watched_dirs() return json["dirs"] - + def scan_dir_for_existing_files(self, dir): - command = 'find "%s" -type f -iname "*.ogg" -o -iname "*.mp3" -readable' % dir.replace('"', '\\"') + command = ( + 'find "%s" -type f -iname "*.ogg" -o -iname "*.mp3" -readable' + % dir.replace('"', '\\"') + ) self.logger.debug(command) - #CHANGED!! + # CHANGED!! stdout = self.exec_command(command).decode("UTF-8") - + return stdout.splitlines() - + def exec_command(self, command): p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() if p.returncode != 0: - self.logger.warn("command \n%s\n return with a non-zero return value", command) + self.logger.warn( + "command \n%s\n return with a non-zero return value", command + ) self.logger.error(stderr) return stdout @@ -98,6 +107,7 @@ class AirtimeMediaMonitorBootstrap(): dir_id -- row id of the directory in the cc_watched_dirs database table dir -- pathname of the directory """ + def sync_database_to_filesystem(self, dir_id, dir): """ set to hold new and/or modified files. We use a set to make it ok if files are added @@ -107,7 +117,7 @@ class AirtimeMediaMonitorBootstrap(): db_known_files_set = set() files = self.list_db_files(dir_id) - for file in files['files']: + for file in files["files"]: db_known_files_set.add(file) existing_files = self.scan_dir_for_existing_files(dir) @@ -115,18 +125,17 @@ class AirtimeMediaMonitorBootstrap(): existing_files_set = set() for file_path in existing_files: if len(file_path.strip(" \n")) > 0: - existing_files_set.add(file_path[len(dir):]) + existing_files_set.add(file_path[len(dir) :]) - deleted_files_set = db_known_files_set - existing_files_set new_files_set = existing_files_set - db_known_files_set + print("DB Known files: \n%s\n\n" % len(db_known_files_set)) + print("FS Known files: \n%s\n\n" % len(existing_files_set)) + + print("Deleted files: \n%s\n\n" % deleted_files_set) + print("New files: \n%s\n\n" % new_files_set) + - print ("DB Known files: \n%s\n\n"%len(db_known_files_set)) - print ("FS Known files: \n%s\n\n"%len(existing_files_set)) - - print ("Deleted files: \n%s\n\n"%deleted_files_set) - print ("New files: \n%s\n\n"%new_files_set) - if __name__ == "__main__": AirtimeMediaMonitorBootstrap() diff --git a/python_apps/airtime_analyzer/airtime_analyzer/airtime_analyzer.py b/python_apps/airtime_analyzer/airtime_analyzer/airtime_analyzer.py index b8e52d5c4..2ec4504d2 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/airtime_analyzer.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/airtime_analyzer.py @@ -10,24 +10,25 @@ from . import config_file from functools import partial from .metadata_analyzer import MetadataAnalyzer from .replaygain_analyzer import ReplayGainAnalyzer -from .status_reporter import StatusReporter +from .status_reporter import StatusReporter from .message_listener import MessageListener class AirtimeAnalyzerServer: - """A server for importing uploads to Airtime as background jobs. - """ + """A server for importing uploads to Airtime as background jobs.""" - # Constants + # Constants _LOG_PATH = "/var/log/airtime/airtime_analyzer.log" - + # Variables _log_level = logging.INFO def __init__(self, rmq_config_path, http_retry_queue_path, debug=False): # Dump a stacktrace with 'kill -SIGUSR2 ' - signal.signal(signal.SIGUSR2, lambda sig, frame: AirtimeAnalyzerServer.dump_stacktrace()) + signal.signal( + signal.SIGUSR2, lambda sig, frame: AirtimeAnalyzerServer.dump_stacktrace() + ) # Configure logging self.setup_logging(debug) @@ -43,11 +44,10 @@ class AirtimeAnalyzerServer: self._msg_listener = MessageListener(rmq_config) StatusReporter.stop_thread() - def setup_logging(self, debug): """Set up nicely formatted logging and log rotation. - + Keyword arguments: debug -- a boolean indicating whether to enable super verbose logging to the screen and disk. @@ -55,27 +55,30 @@ class AirtimeAnalyzerServer: if debug: self._log_level = logging.DEBUG else: - #Disable most pika/rabbitmq logging: - pika_logger = logging.getLogger('pika') + # Disable most pika/rabbitmq logging: + pika_logger = logging.getLogger("pika") pika_logger.setLevel(logging.CRITICAL) - + # Set up logging - logFormatter = logging.Formatter("%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s") + logFormatter = logging.Formatter( + "%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s" + ) rootLogger = logging.getLogger() rootLogger.setLevel(self._log_level) - fileHandler = logging.handlers.RotatingFileHandler(filename=self._LOG_PATH, maxBytes=1024*1024*30, - backupCount=8) + fileHandler = logging.handlers.RotatingFileHandler( + filename=self._LOG_PATH, maxBytes=1024 * 1024 * 30, backupCount=8 + ) fileHandler.setFormatter(logFormatter) rootLogger.addHandler(fileHandler) consoleHandler = logging.StreamHandler() consoleHandler.setFormatter(logFormatter) rootLogger.addHandler(consoleHandler) - + @classmethod def dump_stacktrace(stack): - ''' Dump a stacktrace for all threads ''' + """Dump a stacktrace for all threads""" code = [] for threadId, stack in list(sys._current_frames().items()): code.append("\n# ThreadID: %s" % threadId) @@ -83,4 +86,4 @@ class AirtimeAnalyzerServer: code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) if line: code.append(" %s" % (line.strip())) - logging.info('\n'.join(code)) + logging.info("\n".join(code)) diff --git a/python_apps/airtime_analyzer/airtime_analyzer/analyzer.py b/python_apps/airtime_analyzer/airtime_analyzer/analyzer.py index f476c6f9d..c58f609ba 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/analyzer.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/analyzer.py @@ -3,8 +3,7 @@ class Analyzer: - """ Abstract base class for all "analyzers". - """ + """Abstract base class for all "analyzers".""" @staticmethod def analyze(filename, metadata): diff --git a/python_apps/airtime_analyzer/airtime_analyzer/analyzer_pipeline.py b/python_apps/airtime_analyzer/airtime_analyzer/analyzer_pipeline.py index e0b66618e..18c536152 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/analyzer_pipeline.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/analyzer_pipeline.py @@ -12,20 +12,28 @@ from .cuepoint_analyzer import CuePointAnalyzer from .replaygain_analyzer import ReplayGainAnalyzer from .playability_analyzer import * -class AnalyzerPipeline: - """ Analyzes and imports an audio file into the Airtime library. - This currently performs metadata extraction (eg. gets the ID3 tags from an MP3), - then moves the file to the Airtime music library (stor/imported), and returns - the results back to the parent process. This class is used in an isolated process - so that if it crashes, it does not kill the entire airtime_analyzer daemon and - the failure to import can be reported back to the web application. +class AnalyzerPipeline: + """Analyzes and imports an audio file into the Airtime library. + + This currently performs metadata extraction (eg. gets the ID3 tags from an MP3), + then moves the file to the Airtime music library (stor/imported), and returns + the results back to the parent process. This class is used in an isolated process + so that if it crashes, it does not kill the entire airtime_analyzer daemon and + the failure to import can be reported back to the web application. """ IMPORT_STATUS_FAILED = 2 @staticmethod - def run_analysis(queue, audio_file_path, import_directory, original_filename, storage_backend, file_prefix): + def run_analysis( + queue, + audio_file_path, + import_directory, + original_filename, + storage_backend, + file_prefix, + ): """Analyze and import an audio file, and put all extracted metadata into queue. Keyword arguments: @@ -50,14 +58,29 @@ class AnalyzerPipeline: if not isinstance(queue, Queue): raise TypeError("queue must be a Queue.Queue()") if not isinstance(audio_file_path, str): - raise TypeError("audio_file_path must be unicode. Was of type " + type(audio_file_path).__name__ + " instead.") + raise TypeError( + "audio_file_path must be unicode. Was of type " + + type(audio_file_path).__name__ + + " instead." + ) if not isinstance(import_directory, str): - raise TypeError("import_directory must be unicode. Was of type " + type(import_directory).__name__ + " instead.") + raise TypeError( + "import_directory must be unicode. Was of type " + + type(import_directory).__name__ + + " instead." + ) if not isinstance(original_filename, str): - raise TypeError("original_filename must be unicode. Was of type " + type(original_filename).__name__ + " instead.") + raise TypeError( + "original_filename must be unicode. Was of type " + + type(original_filename).__name__ + + " instead." + ) if not isinstance(file_prefix, str): - raise TypeError("file_prefix must be unicode. Was of type " + type(file_prefix).__name__ + " instead.") - + raise TypeError( + "file_prefix must be unicode. Was of type " + + type(file_prefix).__name__ + + " instead." + ) # Analyze the audio file we were told to analyze: # First, we extract the ID3 tags and other metadata: @@ -69,9 +92,11 @@ class AnalyzerPipeline: metadata = ReplayGainAnalyzer.analyze(audio_file_path, metadata) metadata = PlayabilityAnalyzer.analyze(audio_file_path, metadata) - metadata = FileMoverAnalyzer.move(audio_file_path, import_directory, original_filename, metadata) + metadata = FileMoverAnalyzer.move( + audio_file_path, import_directory, original_filename, metadata + ) - metadata["import_status"] = 0 # Successfully imported + metadata["import_status"] = 0 # Successfully imported # Note that the queue we're putting the results into is our interprocess communication # back to the main process. @@ -93,9 +118,8 @@ class AnalyzerPipeline: def python_logger_deadlock_workaround(): # Workaround for: http://bugs.python.org/issue6721#msg140215 logger_names = list(logging.Logger.manager.loggerDict.keys()) - logger_names.append(None) # Root logger + logger_names.append(None) # Root logger for name in logger_names: for handler in logging.getLogger(name).handlers: handler.createLock() logging._lock = threading.RLock() - diff --git a/python_apps/airtime_analyzer/airtime_analyzer/cli.py b/python_apps/airtime_analyzer/airtime_analyzer/cli.py index 3161b6aa2..794a0f6eb 100755 --- a/python_apps/airtime_analyzer/airtime_analyzer/cli.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/cli.py @@ -9,21 +9,32 @@ import os import airtime_analyzer.airtime_analyzer as aa VERSION = "1.0" -LIBRETIME_CONF_DIR = os.getenv('LIBRETIME_CONF_DIR', '/etc/airtime') -DEFAULT_RMQ_CONFIG_PATH = os.path.join(LIBRETIME_CONF_DIR, 'airtime.conf') -DEFAULT_HTTP_RETRY_PATH = '/tmp/airtime_analyzer_http_retries' +LIBRETIME_CONF_DIR = os.getenv("LIBRETIME_CONF_DIR", "/etc/airtime") +DEFAULT_RMQ_CONFIG_PATH = os.path.join(LIBRETIME_CONF_DIR, "airtime.conf") +DEFAULT_HTTP_RETRY_PATH = "/tmp/airtime_analyzer_http_retries" + def main(): - '''Entry-point for this application''' + """Entry-point for this application""" print("LibreTime Analyzer {}".format(VERSION)) parser = argparse.ArgumentParser() parser.add_argument("-d", "--daemon", help="run as a daemon", action="store_true") - parser.add_argument("--debug", help="log full debugging output", action="store_true") - parser.add_argument("--rmq-config-file", help="specify a configuration file with RabbitMQ settings (default is %s)" % DEFAULT_RMQ_CONFIG_PATH) - parser.add_argument("--http-retry-queue-file", help="specify where incompleted HTTP requests will be serialized (default is %s)" % DEFAULT_HTTP_RETRY_PATH) + parser.add_argument( + "--debug", help="log full debugging output", action="store_true" + ) + parser.add_argument( + "--rmq-config-file", + help="specify a configuration file with RabbitMQ settings (default is %s)" + % DEFAULT_RMQ_CONFIG_PATH, + ) + parser.add_argument( + "--http-retry-queue-file", + help="specify where incompleted HTTP requests will be serialized (default is %s)" + % DEFAULT_HTTP_RETRY_PATH, + ) args = parser.parse_args() - #Default config file path + # Default config file path rmq_config_path = DEFAULT_RMQ_CONFIG_PATH http_retry_queue_path = DEFAULT_HTTP_RETRY_PATH if args.rmq_config_file: @@ -33,14 +44,19 @@ def main(): if args.daemon: with daemon.DaemonContext(): - aa.AirtimeAnalyzerServer(rmq_config_path=rmq_config_path, - http_retry_queue_path=http_retry_queue_path, - debug=args.debug) + aa.AirtimeAnalyzerServer( + rmq_config_path=rmq_config_path, + http_retry_queue_path=http_retry_queue_path, + debug=args.debug, + ) else: # Run without daemonizing - aa.AirtimeAnalyzerServer(rmq_config_path=rmq_config_path, - http_retry_queue_path=http_retry_queue_path, - debug=args.debug) + aa.AirtimeAnalyzerServer( + rmq_config_path=rmq_config_path, + http_retry_queue_path=http_retry_queue_path, + debug=args.debug, + ) + if __name__ == "__main__": main() diff --git a/python_apps/airtime_analyzer/airtime_analyzer/config_file.py b/python_apps/airtime_analyzer/airtime_analyzer/config_file.py index fc0e41af7..7aee190b4 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/config_file.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/config_file.py @@ -2,6 +2,7 @@ import configparser + def read_config_file(config_path): """Parse the application's config file located at config_path.""" config = configparser.SafeConfigParser() diff --git a/python_apps/airtime_analyzer/airtime_analyzer/cuepoint_analyzer.py b/python_apps/airtime_analyzer/airtime_analyzer/cuepoint_analyzer.py index bc69b49c8..fdaf63744 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/cuepoint_analyzer.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/cuepoint_analyzer.py @@ -8,26 +8,38 @@ from .analyzer import Analyzer class CuePointAnalyzer(Analyzer): - ''' This class extracts the cue-in time, cue-out time, and length of a track using silan. ''' + """This class extracts the cue-in time, cue-out time, and length of a track using silan.""" - SILAN_EXECUTABLE = 'silan' + SILAN_EXECUTABLE = "silan" @staticmethod def analyze(filename, metadata): - ''' Extracts the cue-in and cue-out times along and sets the file duration based on that. + """Extracts the cue-in and cue-out times along and sets the file duration based on that. The cue points are there to skip the silence at the start and end of a track, and are determined using "silan", which analyzes the loudness in a track. :param filename: The full path to the file to analyzer :param metadata: A metadata dictionary where the results will be put :return: The metadata dictionary - ''' - ''' The silan -F 0.99 parameter tweaks the highpass filter. The default is 0.98, but at that setting, + """ + """ The silan -F 0.99 parameter tweaks the highpass filter. The default is 0.98, but at that setting, the unit test on the short m4a file fails. With the new setting, it gets the correct cue-in time and all the unit tests pass. - ''' - command = [CuePointAnalyzer.SILAN_EXECUTABLE, '-b', '-F', '0.99', '-f', 'JSON', '-t', '1.0', filename] + """ + command = [ + CuePointAnalyzer.SILAN_EXECUTABLE, + "-b", + "-F", + "0.99", + "-f", + "JSON", + "-t", + "1.0", + filename, + ] try: - results_json = subprocess.check_output(command, stderr=subprocess.STDOUT, close_fds=True) + results_json = subprocess.check_output( + command, stderr=subprocess.STDOUT, close_fds=True + ) try: results_json = results_json.decode() except (UnicodeDecodeError, AttributeError): @@ -35,40 +47,51 @@ class CuePointAnalyzer(Analyzer): silan_results = json.loads(results_json) # Defensive coding against Silan wildly miscalculating the cue in and out times: - silan_length_seconds = float(silan_results['file duration']) - silan_cuein = format(silan_results['sound'][0][0], 'f') - silan_cueout = format(silan_results['sound'][0][1], 'f') + silan_length_seconds = float(silan_results["file duration"]) + silan_cuein = format(silan_results["sound"][0][0], "f") + silan_cueout = format(silan_results["sound"][0][1], "f") # Sanity check the results against any existing metadata passed to us (presumably extracted by Mutagen): - if 'length_seconds' in metadata: + if "length_seconds" in metadata: # Silan has a rare bug where it can massively overestimate the length or cue out time sometimes. - if (silan_length_seconds - metadata['length_seconds'] > 3) or (float(silan_cueout) - metadata['length_seconds'] > 2): + if (silan_length_seconds - metadata["length_seconds"] > 3) or ( + float(silan_cueout) - metadata["length_seconds"] > 2 + ): # Don't trust anything silan says then... - raise Exception("Silan cue out {0} or length {1} differs too much from the Mutagen length {2}. Ignoring Silan values." - .format(silan_cueout, silan_length_seconds, metadata['length_seconds'])) + raise Exception( + "Silan cue out {0} or length {1} differs too much from the Mutagen length {2}. Ignoring Silan values.".format( + silan_cueout, + silan_length_seconds, + metadata["length_seconds"], + ) + ) # Don't allow silan to trim more than the greater of 3 seconds or 5% off the start of a track - if float(silan_cuein) > max(silan_length_seconds*0.05, 3): - raise Exception("Silan cue in time {0} too big, ignoring.".format(silan_cuein)) + if float(silan_cuein) > max(silan_length_seconds * 0.05, 3): + raise Exception( + "Silan cue in time {0} too big, ignoring.".format(silan_cuein) + ) else: # Only use the Silan track length in the worst case, where Mutagen didn't give us one for some reason. # (This is mostly to make the unit tests still pass.) # Convert the length into a formatted time string. - metadata['length_seconds'] = silan_length_seconds # - track_length = datetime.timedelta(seconds=metadata['length_seconds']) + metadata["length_seconds"] = silan_length_seconds # + track_length = datetime.timedelta(seconds=metadata["length_seconds"]) metadata["length"] = str(track_length) - - ''' XXX: I've commented out the track_length stuff below because Mutagen seems more accurate than silan + """ XXX: I've commented out the track_length stuff below because Mutagen seems more accurate than silan as of Mutagen version 1.31. We are always going to use Mutagen's length now because Silan's length can be off by a few seconds reasonably often. - ''' + """ - metadata['cuein'] = silan_cuein - metadata['cueout'] = silan_cueout + metadata["cuein"] = silan_cuein + metadata["cueout"] = silan_cueout - except OSError as e: # silan was not found - logging.warn("Failed to run: %s - %s. %s" % (command[0], e.strerror, "Do you have silan installed?")) - except subprocess.CalledProcessError as e: # silan returned an error code + except OSError as e: # silan was not found + logging.warn( + "Failed to run: %s - %s. %s" + % (command[0], e.strerror, "Do you have silan installed?") + ) + except subprocess.CalledProcessError as e: # silan returned an error code logging.warn("%s %s %s", e.cmd, e.output, e.returncode) except Exception as e: logging.warn(e) diff --git a/python_apps/airtime_analyzer/airtime_analyzer/filemover_analyzer.py b/python_apps/airtime_analyzer/airtime_analyzer/filemover_analyzer.py index 37a0672a8..2846e7e43 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/filemover_analyzer.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/filemover_analyzer.py @@ -9,10 +9,12 @@ import uuid from .analyzer import Analyzer + class FileMoverAnalyzer(Analyzer): """This analyzer copies a file over from a temporary directory (stor/organize) - into the Airtime library (stor/imported). + into the Airtime library (stor/imported). """ + @staticmethod def analyze(audio_file_path, metadata): """Dummy method because we need more info than analyze gets passed to it""" @@ -21,27 +23,38 @@ class FileMoverAnalyzer(Analyzer): @staticmethod def move(audio_file_path, import_directory, original_filename, metadata): """Move the file at audio_file_path over into the import_directory/import, - renaming it to original_filename. + renaming it to original_filename. - Keyword arguments: - audio_file_path: Path to the file to be imported. - import_directory: Path to the "import" directory inside the Airtime stor directory. - (eg. /srv/airtime/stor/import) - original_filename: The filename of the file when it was uploaded to Airtime. - metadata: A dictionary where the "full_path" of where the file is moved to will be added. + Keyword arguments: + audio_file_path: Path to the file to be imported. + import_directory: Path to the "import" directory inside the Airtime stor directory. + (eg. /srv/airtime/stor/import) + original_filename: The filename of the file when it was uploaded to Airtime. + metadata: A dictionary where the "full_path" of where the file is moved to will be added. """ if not isinstance(audio_file_path, str): - raise TypeError("audio_file_path must be string. Was of type " + type(audio_file_path).__name__) + raise TypeError( + "audio_file_path must be string. Was of type " + + type(audio_file_path).__name__ + ) if not isinstance(import_directory, str): - raise TypeError("import_directory must be string. Was of type " + type(import_directory).__name__) + raise TypeError( + "import_directory must be string. Was of type " + + type(import_directory).__name__ + ) if not isinstance(original_filename, str): - raise TypeError("original_filename must be string. Was of type " + type(original_filename).__name__) + raise TypeError( + "original_filename must be string. Was of type " + + type(original_filename).__name__ + ) if not isinstance(metadata, dict): - raise TypeError("metadata must be a dict. Was of type " + type(metadata).__name__) + raise TypeError( + "metadata must be a dict. Was of type " + type(metadata).__name__ + ) if not os.path.exists(audio_file_path): raise FileNotFoundError("audio file not found: {}".format(audio_file_path)) - #Import the file over to it's final location. + # Import the file over to it's final location. # TODO: Also, handle the case where the move fails and write some code # to possibly move the file to problem_files. @@ -50,52 +63,65 @@ class FileMoverAnalyzer(Analyzer): final_file_path = import_directory orig_file_basename, orig_file_extension = os.path.splitext(original_filename) if "artist_name" in metadata: - final_file_path += "/" + metadata["artist_name"][0:max_dir_len] # truncating with array slicing + final_file_path += ( + "/" + metadata["artist_name"][0:max_dir_len] + ) # truncating with array slicing if "album_title" in metadata: final_file_path += "/" + metadata["album_title"][0:max_dir_len] # Note that orig_file_extension includes the "." already - final_file_path += "/" + orig_file_basename[0:max_file_len] + orig_file_extension + final_file_path += ( + "/" + orig_file_basename[0:max_file_len] + orig_file_extension + ) - #Ensure any redundant slashes are stripped + # Ensure any redundant slashes are stripped final_file_path = os.path.normpath(final_file_path) - #If a file with the same name already exists in the "import" directory, then - #we add a unique string to the end of this one. We never overwrite a file on import - #because if we did that, it would mean Airtime's database would have - #the wrong information for the file we just overwrote (eg. the song length would be wrong!) - #If the final file path is the same as the file we've been told to import (which - #you often do when you're debugging), then don't move the file at all. + # If a file with the same name already exists in the "import" directory, then + # we add a unique string to the end of this one. We never overwrite a file on import + # because if we did that, it would mean Airtime's database would have + # the wrong information for the file we just overwrote (eg. the song length would be wrong!) + # If the final file path is the same as the file we've been told to import (which + # you often do when you're debugging), then don't move the file at all. if os.path.exists(final_file_path): if os.path.samefile(audio_file_path, final_file_path): metadata["full_path"] = final_file_path return metadata base_file_path, file_extension = os.path.splitext(final_file_path) - final_file_path = "%s_%s%s" % (base_file_path, time.strftime("%m-%d-%Y-%H-%M-%S", time.localtime()), file_extension) + final_file_path = "%s_%s%s" % ( + base_file_path, + time.strftime("%m-%d-%Y-%H-%M-%S", time.localtime()), + file_extension, + ) - #If THAT path exists, append a UUID instead: + # If THAT path exists, append a UUID instead: while os.path.exists(final_file_path): base_file_path, file_extension = os.path.splitext(final_file_path) - final_file_path = "%s_%s%s" % (base_file_path, str(uuid.uuid4()), file_extension) + final_file_path = "%s_%s%s" % ( + base_file_path, + str(uuid.uuid4()), + file_extension, + ) - #Ensure the full path to the file exists + # Ensure the full path to the file exists mkdir_p(os.path.dirname(final_file_path)) - #Move the file into its final destination directory + # Move the file into its final destination directory logging.debug("Moving %s to %s" % (audio_file_path, final_file_path)) shutil.move(audio_file_path, final_file_path) metadata["full_path"] = final_file_path return metadata + def mkdir_p(path): - """ Make all directories in a tree (like mkdir -p)""" + """Make all directories in a tree (like mkdir -p)""" if path == "": return try: os.makedirs(path) - except OSError as exc: # Python >2.5 + except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass - else: raise - + else: + raise diff --git a/python_apps/airtime_analyzer/airtime_analyzer/message_listener.py b/python_apps/airtime_analyzer/airtime_analyzer/message_listener.py index 7c3cc29ab..965205195 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/message_listener.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/message_listener.py @@ -5,8 +5,8 @@ import json import time import select import signal -import logging -import multiprocessing +import logging +import multiprocessing import queue from .analyzer_pipeline import AnalyzerPipeline from .status_reporter import StatusReporter @@ -54,29 +54,30 @@ QUEUE = "airtime-uploads" So that is a quick overview of the design constraints for this application, and why airtime_analyzer is written this way. """ -class MessageListener: + +class MessageListener: def __init__(self, rmq_config): - ''' Start listening for file upload notification messages - from RabbitMQ - - Keyword arguments: - rmq_config: A ConfigParser object containing the [rabbitmq] configuration. - ''' - + """Start listening for file upload notification messages + from RabbitMQ + + Keyword arguments: + rmq_config: A ConfigParser object containing the [rabbitmq] configuration. + """ + self._shutdown = False # Read the RabbitMQ connection settings from the rmq_config file - # The exceptions throw here by default give good error messages. + # The exceptions throw here by default give good error messages. RMQ_CONFIG_SECTION = "rabbitmq" - self._host = rmq_config.get(RMQ_CONFIG_SECTION, 'host') - self._port = rmq_config.getint(RMQ_CONFIG_SECTION, 'port') - self._username = rmq_config.get(RMQ_CONFIG_SECTION, 'user') - self._password = rmq_config.get(RMQ_CONFIG_SECTION, 'password') - self._vhost = rmq_config.get(RMQ_CONFIG_SECTION, 'vhost') + self._host = rmq_config.get(RMQ_CONFIG_SECTION, "host") + self._port = rmq_config.getint(RMQ_CONFIG_SECTION, "port") + self._username = rmq_config.get(RMQ_CONFIG_SECTION, "user") + self._password = rmq_config.get(RMQ_CONFIG_SECTION, "password") + self._vhost = rmq_config.get(RMQ_CONFIG_SECTION, "vhost") # Set up a signal handler so we can shutdown gracefully - # For some reason, this signal handler must be set up here. I'd rather + # For some reason, this signal handler must be set up here. I'd rather # put it in AirtimeAnalyzerServer, but it doesn't work there (something to do # with pika's SIGTERM handler interfering with it, I think...) signal.signal(signal.SIGTERM, self.graceful_shutdown) @@ -86,9 +87,9 @@ class MessageListener: self.connect_to_messaging_server() self.wait_for_messages() except (KeyboardInterrupt, SystemExit): - break # Break out of the while loop and exit the application + break # Break out of the while loop and exit the application except select.error: - pass + pass except pika.exceptions.AMQPError as e: if self._shutdown: break @@ -100,27 +101,37 @@ class MessageListener: self.disconnect_from_messaging_server() logging.info("Exiting cleanly.") - def connect_to_messaging_server(self): - '''Connect to the RabbitMQ server and start listening for messages.''' - self._connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._host, - port=self._port, virtual_host=self._vhost, - credentials=pika.credentials.PlainCredentials(self._username, self._password))) + """Connect to the RabbitMQ server and start listening for messages.""" + self._connection = pika.BlockingConnection( + pika.ConnectionParameters( + host=self._host, + port=self._port, + virtual_host=self._vhost, + credentials=pika.credentials.PlainCredentials( + self._username, self._password + ), + ) + ) self._channel = self._connection.channel() - self._channel.exchange_declare(exchange=EXCHANGE, exchange_type=EXCHANGE_TYPE, durable=True) + self._channel.exchange_declare( + exchange=EXCHANGE, exchange_type=EXCHANGE_TYPE, durable=True + ) result = self._channel.queue_declare(queue=QUEUE, durable=True) - self._channel.queue_bind(exchange=EXCHANGE, queue=QUEUE, routing_key=ROUTING_KEY) - + self._channel.queue_bind( + exchange=EXCHANGE, queue=QUEUE, routing_key=ROUTING_KEY + ) + logging.info(" Listening for messages...") self._channel.basic_consume(QUEUE, self.msg_received_callback, auto_ack=False) def wait_for_messages(self): - '''Wait until we've received a RabbitMQ message.''' + """Wait until we've received a RabbitMQ message.""" self._channel.start_consuming() def disconnect_from_messaging_server(self): - '''Stop consuming RabbitMQ messages and disconnect''' + """Stop consuming RabbitMQ messages and disconnect""" # If you try to close a connection that's already closed, you're going to have a bad time. # We're breaking EAFP because this can be called multiple times depending on exception # handling flow here. @@ -128,43 +139,45 @@ class MessageListener: self._channel.stop_consuming() if not self._connection.is_closed and not self._connection.is_closing: self._connection.close() - + def graceful_shutdown(self, signum, frame): - '''Disconnect and break out of the message listening loop''' + """Disconnect and break out of the message listening loop""" self._shutdown = True self.disconnect_from_messaging_server() def msg_received_callback(self, channel, method_frame, header_frame, body): - ''' A callback method that runs when a RabbitMQ message is received. - - Here we parse the message, spin up an analyzer process, and report the - metadata back to the Airtime web application (or report an error). - ''' - logging.info(" - Received '%s' on routing_key '%s'" % (body, method_frame.routing_key)) - - #Declare all variables here so they exist in the exception handlers below, no matter what. + """A callback method that runs when a RabbitMQ message is received. + + Here we parse the message, spin up an analyzer process, and report the + metadata back to the Airtime web application (or report an error). + """ + logging.info( + " - Received '%s' on routing_key '%s'" % (body, method_frame.routing_key) + ) + + # Declare all variables here so they exist in the exception handlers below, no matter what. audio_file_path = "" - #final_file_path = "" + # final_file_path = "" import_directory = "" original_filename = "" - callback_url = "" - api_key = "" + callback_url = "" + api_key = "" file_prefix = "" - ''' Spin up a worker process. We use the multiprocessing module and multiprocessing.Queue + """ Spin up a worker process. We use the multiprocessing module and multiprocessing.Queue to pass objects between the processes so that if the analyzer process crashes, it does not take down the rest of the daemon and we NACK that message so that it doesn't get propagated to other airtime_analyzer daemons (eg. running on other servers). We avoid cascading failure this way. - ''' + """ try: try: body = body.decode() except (UnicodeDecodeError, AttributeError): pass msg_dict = json.loads(body) - api_key = msg_dict["api_key"] - callback_url = msg_dict["callback_url"] + api_key = msg_dict["api_key"] + callback_url = msg_dict["callback_url"] audio_file_path = msg_dict["tmp_file_path"] import_directory = msg_dict["import_directory"] @@ -172,48 +185,71 @@ class MessageListener: file_prefix = msg_dict["file_prefix"] storage_backend = msg_dict["storage_backend"] - audio_metadata = MessageListener.spawn_analyzer_process(audio_file_path, import_directory, original_filename, storage_backend, file_prefix) - StatusReporter.report_success_to_callback_url(callback_url, api_key, audio_metadata) + audio_metadata = MessageListener.spawn_analyzer_process( + audio_file_path, + import_directory, + original_filename, + storage_backend, + file_prefix, + ) + StatusReporter.report_success_to_callback_url( + callback_url, api_key, audio_metadata + ) except KeyError as e: # A field in msg_dict that we needed was missing (eg. audio_file_path) - logging.exception("A mandatory airtime_analyzer message field was missing from the message.") + logging.exception( + "A mandatory airtime_analyzer message field was missing from the message." + ) # See the huge comment about NACK below. - channel.basic_nack(delivery_tag=method_frame.delivery_tag, multiple=False, - requeue=False) #Important that it doesn't requeue the message - + channel.basic_nack( + delivery_tag=method_frame.delivery_tag, multiple=False, requeue=False + ) # Important that it doesn't requeue the message + except Exception as e: logging.exception(e) - ''' If ANY exception happens while processing a file, we're going to NACK to the + """ If ANY exception happens while processing a file, we're going to NACK to the messaging server and tell it to remove the message from the queue. (NACK is a negative acknowledgement. We could use ACK instead, but this might come in handy in the future.) Exceptions in this context are unexpected, unhandled errors. We try to recover from as many errors as possible in AnalyzerPipeline, but we're safeguarding ourselves here from any catastrophic or genuinely unexpected errors: - ''' - channel.basic_nack(delivery_tag=method_frame.delivery_tag, multiple=False, - requeue=False) #Important that it doesn't requeue the message + """ + channel.basic_nack( + delivery_tag=method_frame.delivery_tag, multiple=False, requeue=False + ) # Important that it doesn't requeue the message # - # TODO: If the JSON was invalid or the web server is down, + # TODO: If the JSON was invalid or the web server is down, # then don't report that failure to the REST API - #TODO: Catch exceptions from this HTTP request too: - if callback_url: # If we got an invalid message, there might be no callback_url in the JSON + # TODO: Catch exceptions from this HTTP request too: + if ( + callback_url + ): # If we got an invalid message, there might be no callback_url in the JSON # Report this as a failed upload to the File Upload REST API. - StatusReporter.report_failure_to_callback_url(callback_url, api_key, import_status=2, - reason='An error occurred while importing this file') - + StatusReporter.report_failure_to_callback_url( + callback_url, + api_key, + import_status=2, + reason="An error occurred while importing this file", + ) else: # ACK at the very end, after the message has been successfully processed. # If we don't ack, then RabbitMQ will redeliver the message in the future. channel.basic_ack(delivery_tag=method_frame.delivery_tag) - + @staticmethod - def spawn_analyzer_process(audio_file_path, import_directory, original_filename, storage_backend, file_prefix): - ''' Spawn a child process to analyze and import a new audio file. ''' - ''' + def spawn_analyzer_process( + audio_file_path, + import_directory, + original_filename, + storage_backend, + file_prefix, + ): + """Spawn a child process to analyze and import a new audio file.""" + """ q = multiprocessing.Queue() p = multiprocessing.Process(target=AnalyzerPipeline.run_analysis, args=(q, audio_file_path, import_directory, original_filename, storage_backend, file_prefix)) @@ -225,12 +261,19 @@ class MessageListener: logging.info(results) else: raise Exception("Analyzer process terminated unexpectedly.") - ''' + """ metadata = {} q = queue.Queue() try: - AnalyzerPipeline.run_analysis(q, audio_file_path, import_directory, original_filename, storage_backend, file_prefix) + AnalyzerPipeline.run_analysis( + q, + audio_file_path, + import_directory, + original_filename, + storage_backend, + file_prefix, + ) metadata = q.get() except Exception as e: logging.error("Analyzer pipeline exception: %s" % str(e)) @@ -241,4 +284,3 @@ class MessageListener: q.get() return metadata - diff --git a/python_apps/airtime_analyzer/airtime_analyzer/metadata_analyzer.py b/python_apps/airtime_analyzer/airtime_analyzer/metadata_analyzer.py index 91b10a6b9..ff86494fa 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/metadata_analyzer.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/metadata_analyzer.py @@ -9,32 +9,36 @@ import os import hashlib from .analyzer import Analyzer -class MetadataAnalyzer(Analyzer): +class MetadataAnalyzer(Analyzer): @staticmethod def analyze(filename, metadata): - ''' Extract audio metadata from tags embedded in the file (eg. ID3 tags) + """Extract audio metadata from tags embedded in the file (eg. ID3 tags) - Keyword arguments: - filename: The path to the audio file to extract metadata from. - metadata: A dictionary that the extracted metadata will be added to. - ''' + Keyword arguments: + filename: The path to the audio file to extract metadata from. + metadata: A dictionary that the extracted metadata will be added to. + """ if not isinstance(filename, str): - raise TypeError("filename must be string. Was of type " + type(filename).__name__) + raise TypeError( + "filename must be string. Was of type " + type(filename).__name__ + ) if not isinstance(metadata, dict): - raise TypeError("metadata must be a dict. Was of type " + type(metadata).__name__) + raise TypeError( + "metadata must be a dict. Was of type " + type(metadata).__name__ + ) if not os.path.exists(filename): raise FileNotFoundError("audio file not found: {}".format(filename)) - #Airtime <= 2.5.x nonsense: + # Airtime <= 2.5.x nonsense: metadata["ftype"] = "audioclip" - #Other fields we'll want to set for Airtime: + # Other fields we'll want to set for Airtime: metadata["hidden"] = False # Get file size and md5 hash of the file metadata["filesize"] = os.path.getsize(filename) - with open(filename, 'rb') as fh: + with open(filename, "rb") as fh: m = hashlib.md5() while True: data = fh.read(8192) @@ -46,37 +50,41 @@ class MetadataAnalyzer(Analyzer): # Mutagen doesn't handle WAVE files so we use a different package ms = magic.open(magic.MIME_TYPE) ms.load() - with open(filename, 'rb') as fh: + with open(filename, "rb") as fh: mime_check = ms.buffer(fh.read(2014)) metadata["mime"] = mime_check - if mime_check == 'audio/x-wav': + if mime_check == "audio/x-wav": return MetadataAnalyzer._analyze_wave(filename, metadata) - #Extract metadata from an audio file using mutagen + # Extract metadata from an audio file using mutagen audio_file = mutagen.File(filename, easy=True) - #Bail if the file couldn't be parsed. The title should stay as the filename - #inside Airtime. - if audio_file == None: # Don't use "if not" here. It is wrong due to mutagen's design. + # Bail if the file couldn't be parsed. The title should stay as the filename + # inside Airtime. + if ( + audio_file == None + ): # Don't use "if not" here. It is wrong due to mutagen's design. return metadata # Note that audio_file can equal {} if the file is valid but there's no metadata tags. # We can still try to grab the info variables below. - #Grab other file information that isn't encoded in a tag, but instead usually - #in the file header. Mutagen breaks that out into a separate "info" object: + # Grab other file information that isn't encoded in a tag, but instead usually + # in the file header. Mutagen breaks that out into a separate "info" object: info = audio_file.info - if hasattr(info, "sample_rate"): # Mutagen is annoying and inconsistent + if hasattr(info, "sample_rate"): # Mutagen is annoying and inconsistent metadata["sample_rate"] = info.sample_rate if hasattr(info, "length"): metadata["length_seconds"] = info.length - #Converting the length in seconds (float) to a formatted time string + # Converting the length in seconds (float) to a formatted time string track_length = datetime.timedelta(seconds=info.length) - metadata["length"] = str(track_length) #time.strftime("%H:%M:%S.%f", track_length) + metadata["length"] = str( + track_length + ) # time.strftime("%H:%M:%S.%f", track_length) # Other fields for Airtime metadata["cueout"] = metadata["length"] # Set a default cue in time in seconds - metadata["cuein"] = 0.0; + metadata["cuein"] = 0.0 if hasattr(info, "bitrate"): metadata["bit_rate"] = info.bitrate @@ -86,11 +94,11 @@ class MetadataAnalyzer(Analyzer): if audio_file.mime: metadata["mime"] = audio_file.mime[0] - #Try to get the number of channels if mutagen can... + # Try to get the number of channels if mutagen can... try: - #Special handling for getting the # of channels from MP3s. It's in the "mode" field - #which is 0=Stereo, 1=Joint Stereo, 2=Dual Channel, 3=Mono. Part of the ID3 spec... - if metadata["mime"] in ["audio/mpeg", 'audio/mp3']: + # Special handling for getting the # of channels from MP3s. It's in the "mode" field + # which is 0=Stereo, 1=Joint Stereo, 2=Dual Channel, 3=Mono. Part of the ID3 spec... + if metadata["mime"] in ["audio/mpeg", "audio/mp3"]: if info.mode == 3: metadata["channels"] = 1 else: @@ -98,54 +106,54 @@ class MetadataAnalyzer(Analyzer): else: metadata["channels"] = info.channels except (AttributeError, KeyError): - #If mutagen can't figure out the number of channels, we'll just leave it out... + # If mutagen can't figure out the number of channels, we'll just leave it out... pass - #Try to extract the number of tracks on the album if we can (the "track total") + # Try to extract the number of tracks on the album if we can (the "track total") try: track_number = audio_file["tracknumber"] - if isinstance(track_number, list): # Sometimes tracknumber is a list, ugh + if isinstance(track_number, list): # Sometimes tracknumber is a list, ugh track_number = track_number[0] track_number_tokens = track_number - if '/' in track_number: - track_number_tokens = track_number.split('/') + if "/" in track_number: + track_number_tokens = track_number.split("/") track_number = track_number_tokens[0] - elif '-' in track_number: - track_number_tokens = track_number.split('-') + elif "-" in track_number: + track_number_tokens = track_number.split("-") track_number = track_number_tokens[0] metadata["track_number"] = track_number track_total = track_number_tokens[1] metadata["track_total"] = track_total except (AttributeError, KeyError, IndexError): - #If we couldn't figure out the track_number or track_total, just ignore it... + # If we couldn't figure out the track_number or track_total, just ignore it... pass - #We normalize the mutagen tags slightly here, so in case mutagen changes, - #we find the + # We normalize the mutagen tags slightly here, so in case mutagen changes, + # we find the mutagen_to_airtime_mapping = { - 'title': 'track_title', - 'artist': 'artist_name', - 'album': 'album_title', - 'bpm': 'bpm', - 'composer': 'composer', - 'conductor': 'conductor', - 'copyright': 'copyright', - 'comment': 'comment', - 'encoded_by': 'encoder', - 'genre': 'genre', - 'isrc': 'isrc', - 'label': 'label', - 'organization': 'label', + "title": "track_title", + "artist": "artist_name", + "album": "album_title", + "bpm": "bpm", + "composer": "composer", + "conductor": "conductor", + "copyright": "copyright", + "comment": "comment", + "encoded_by": "encoder", + "genre": "genre", + "isrc": "isrc", + "label": "label", + "organization": "label", #'length': 'length', - 'language': 'language', - 'last_modified':'last_modified', - 'mood': 'mood', - 'bit_rate': 'bit_rate', - 'replay_gain': 'replaygain', + "language": "language", + "last_modified": "last_modified", + "mood": "mood", + "bit_rate": "bit_rate", + "replay_gain": "replaygain", #'tracknumber': 'track_number', #'track_total': 'track_total', - 'website': 'website', - 'date': 'year', + "website": "website", + "date": "year", #'mime_type': 'mime', } @@ -158,7 +166,7 @@ class MetadataAnalyzer(Analyzer): if isinstance(metadata[airtime_tag], list): if metadata[airtime_tag]: metadata[airtime_tag] = metadata[airtime_tag][0] - else: # Handle empty lists + else: # Handle empty lists metadata[airtime_tag] = "" except KeyError: @@ -169,13 +177,15 @@ class MetadataAnalyzer(Analyzer): @staticmethod def _analyze_wave(filename, metadata): try: - reader = wave.open(filename, 'rb') + reader = wave.open(filename, "rb") metadata["channels"] = reader.getnchannels() metadata["sample_rate"] = reader.getframerate() length_seconds = float(reader.getnframes()) / float(metadata["sample_rate"]) - #Converting the length in seconds (float) to a formatted time string + # Converting the length in seconds (float) to a formatted time string track_length = datetime.timedelta(seconds=length_seconds) - metadata["length"] = str(track_length) #time.strftime("%H:%M:%S.%f", track_length) + metadata["length"] = str( + track_length + ) # time.strftime("%H:%M:%S.%f", track_length) metadata["length_seconds"] = length_seconds metadata["cueout"] = metadata["length"] except wave.Error as ex: diff --git a/python_apps/airtime_analyzer/airtime_analyzer/playability_analyzer.py b/python_apps/airtime_analyzer/airtime_analyzer/playability_analyzer.py index 0f4b030f4..99d1f4c99 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/playability_analyzer.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/playability_analyzer.py @@ -1,32 +1,47 @@ # -*- coding: utf-8 -*- -__author__ = 'asantoni' +__author__ = "asantoni" import subprocess import logging from .analyzer import Analyzer + class UnplayableFileError(Exception): pass -class PlayabilityAnalyzer(Analyzer): - ''' This class checks if a file can actually be played with Liquidsoap. ''' - LIQUIDSOAP_EXECUTABLE = 'liquidsoap' +class PlayabilityAnalyzer(Analyzer): + """This class checks if a file can actually be played with Liquidsoap.""" + + LIQUIDSOAP_EXECUTABLE = "liquidsoap" @staticmethod def analyze(filename, metadata): - ''' Checks if a file can be played by Liquidsoap. + """Checks if a file can be played by Liquidsoap. :param filename: The full path to the file to analyzer :param metadata: A metadata dictionary where the results will be put :return: The metadata dictionary - ''' - command = [PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE, '-v', '-c', "output.dummy(audio_to_stereo(single(argv(1))))", '--', filename] + """ + command = [ + PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE, + "-v", + "-c", + "output.dummy(audio_to_stereo(single(argv(1))))", + "--", + filename, + ] try: subprocess.check_output(command, stderr=subprocess.STDOUT, close_fds=True) - except OSError as e: # liquidsoap was not found - logging.warn("Failed to run: %s - %s. %s" % (command[0], e.strerror, "Do you have liquidsoap installed?")) - except (subprocess.CalledProcessError, Exception) as e: # liquidsoap returned an error code + except OSError as e: # liquidsoap was not found + logging.warn( + "Failed to run: %s - %s. %s" + % (command[0], e.strerror, "Do you have liquidsoap installed?") + ) + except ( + subprocess.CalledProcessError, + Exception, + ) as e: # liquidsoap returned an error code logging.warn(e) raise UnplayableFileError() diff --git a/python_apps/airtime_analyzer/airtime_analyzer/replaygain_analyzer.py b/python_apps/airtime_analyzer/airtime_analyzer/replaygain_analyzer.py index 60360a7cd..046650ef4 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/replaygain_analyzer.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/replaygain_analyzer.py @@ -6,30 +6,39 @@ import re class ReplayGainAnalyzer(Analyzer): - ''' This class extracts the ReplayGain using a tool from the python-rgain package. ''' + """This class extracts the ReplayGain using a tool from the python-rgain package.""" - REPLAYGAIN_EXECUTABLE = 'replaygain' # From the rgain3 python package + REPLAYGAIN_EXECUTABLE = "replaygain" # From the rgain3 python package @staticmethod def analyze(filename, metadata): - ''' Extracts the Replaygain loudness normalization factor of a track. + """Extracts the Replaygain loudness normalization factor of a track. :param filename: The full path to the file to analyzer :param metadata: A metadata dictionary where the results will be put :return: The metadata dictionary - ''' - ''' The -d flag means do a dry-run, ie. don't modify the file directly. - ''' - command = [ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE, '-d', filename] + """ + """ The -d flag means do a dry-run, ie. don't modify the file directly. + """ + command = [ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE, "-d", filename] try: - results = subprocess.check_output(command, stderr=subprocess.STDOUT, - close_fds=True, universal_newlines=True) - gain_match = r'Calculating Replay Gain information \.\.\.(?:\n|.)*?:([\d.-]*) dB' + results = subprocess.check_output( + command, + stderr=subprocess.STDOUT, + close_fds=True, + universal_newlines=True, + ) + gain_match = ( + r"Calculating Replay Gain information \.\.\.(?:\n|.)*?:([\d.-]*) dB" + ) replaygain = re.search(gain_match, results).group(1) - metadata['replay_gain'] = float(replaygain) + metadata["replay_gain"] = float(replaygain) - except OSError as e: # replaygain was not found - logging.warn("Failed to run: %s - %s. %s" % (command[0], e.strerror, "Do you have python-rgain installed?")) - except subprocess.CalledProcessError as e: # replaygain returned an error code + except OSError as e: # replaygain was not found + logging.warn( + "Failed to run: %s - %s. %s" + % (command[0], e.strerror, "Do you have python-rgain installed?") + ) + except subprocess.CalledProcessError as e: # replaygain returned an error code logging.warn("%s %s %s", e.cmd, e.output, e.returncode) except Exception as e: logging.warn(e) diff --git a/python_apps/airtime_analyzer/airtime_analyzer/status_reporter.py b/python_apps/airtime_analyzer/airtime_analyzer/status_reporter.py index e8d7dd2fa..4c58ac85e 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/status_reporter.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/status_reporter.py @@ -7,14 +7,15 @@ import queue import time import traceback import pickle -import threading +import threading from urllib.parse import urlparse # Disable urllib3 warnings because these can cause a rare deadlock due to Python 2's crappy internal non-reentrant locking # around POSIX stuff. See SAAS-714. The hasattr() is for compatibility with older versions of requests. -if hasattr(requests, 'packages'): +if hasattr(requests, "packages"): requests.packages.urllib3.disable_warnings() + class PicklableHttpRequest: def __init__(self, method, url, data, api_key): self.method = method @@ -23,18 +24,23 @@ class PicklableHttpRequest: self.api_key = api_key def create_request(self): - return requests.Request(method=self.method, url=self.url, data=self.data, - auth=requests.auth.HTTPBasicAuth(self.api_key, '')) + return requests.Request( + method=self.method, + url=self.url, + data=self.data, + auth=requests.auth.HTTPBasicAuth(self.api_key, ""), + ) + def process_http_requests(ipc_queue, http_retry_queue_path): - ''' Runs in a separate thread and performs all the HTTP requests where we're - reporting extracted audio file metadata or errors back to the Airtime web application. + """Runs in a separate thread and performs all the HTTP requests where we're + reporting extracted audio file metadata or errors back to the Airtime web application. - This process also checks every 5 seconds if there's failed HTTP requests that we - need to retry. We retry failed HTTP requests so that we don't lose uploads if the - web server is temporarily down. + This process also checks every 5 seconds if there's failed HTTP requests that we + need to retry. We retry failed HTTP requests so that we don't lose uploads if the + web server is temporarily down. - ''' + """ # Store any failed requests (eg. due to web server errors or downtime) to be # retried later: @@ -45,7 +51,7 @@ def process_http_requests(ipc_queue, http_retry_queue_path): # if airtime_analyzer is shut down while the web server is down or unreachable, # and there were failed HTTP requests pending, waiting to be retried. try: - with open(http_retry_queue_path, 'rb') as pickle_file: + with open(http_retry_queue_path, "rb") as pickle_file: retry_queue = pickle.load(pickle_file) except IOError as e: if e.errno == 2: @@ -64,11 +70,16 @@ def process_http_requests(ipc_queue, http_retry_queue_path): while not shutdown: try: request = ipc_queue.get(block=True, timeout=5) - if isinstance(request, str) and request == "shutdown": # Bit of a cheat + if ( + isinstance(request, str) and request == "shutdown" + ): # Bit of a cheat shutdown = True break if not isinstance(request, PicklableHttpRequest): - raise TypeError("request must be a PicklableHttpRequest. Was of type " + type(request).__name__) + raise TypeError( + "request must be a PicklableHttpRequest. Was of type " + + type(request).__name__ + ) except queue.Empty: request = None @@ -85,32 +96,40 @@ def process_http_requests(ipc_queue, http_retry_queue_path): logging.info("Shutting down status_reporter") # Pickle retry_queue to disk so that we don't lose uploads if we're shut down while # while the web server is down or unreachable. - with open(http_retry_queue_path, 'wb') as pickle_file: + with open(http_retry_queue_path, "wb") as pickle_file: pickle.dump(retry_queue, pickle_file) return - except Exception as e: # Terrible top-level exception handler to prevent the thread from dying, just in case. + except Exception as e: # Terrible top-level exception handler to prevent the thread from dying, just in case. if shutdown: return logging.exception("Unhandled exception in StatusReporter") logging.exception(e) logging.info("Restarting StatusReporter thread") - time.sleep(2) # Throttle it + time.sleep(2) # Throttle it def send_http_request(picklable_request, retry_queue): if not isinstance(picklable_request, PicklableHttpRequest): - raise TypeError("picklable_request must be a PicklableHttpRequest. Was of type " + type(picklable_request).__name__) - try: + raise TypeError( + "picklable_request must be a PicklableHttpRequest. Was of type " + + type(picklable_request).__name__ + ) + try: bare_request = picklable_request.create_request() s = requests.Session() prepared_request = s.prepare_request(bare_request) - r = s.send(prepared_request, timeout=StatusReporter._HTTP_REQUEST_TIMEOUT, verify=False) # SNI is a pain in the ass - r.raise_for_status() # Raise an exception if there was an http error code returned + r = s.send( + prepared_request, timeout=StatusReporter._HTTP_REQUEST_TIMEOUT, verify=False + ) # SNI is a pain in the ass + r.raise_for_status() # Raise an exception if there was an http error code returned logging.info("HTTP request sent successfully.") except requests.exceptions.HTTPError as e: if e.response.status_code == 422: # Do no retry the request if there was a metadata validation error - logging.error("HTTP request failed due to an HTTP exception. Exception was: %s" % str(e)) + logging.error( + "HTTP request failed due to an HTTP exception. Exception was: %s" + % str(e) + ) else: # The request failed with an error 500 probably, so let's check if Airtime and/or # the web server are broken. If not, then our request was probably causing an @@ -124,8 +143,10 @@ def send_http_request(picklable_request, retry_queue): # You will have to find these bad requests in logs or you'll be # notified by sentry. except requests.exceptions.ConnectionError as e: - logging.error("HTTP request failed due to a connection error. Retrying later. %s" % str(e)) - retry_queue.append(picklable_request) # Retry it later + logging.error( + "HTTP request failed due to a connection error. Retrying later. %s" % str(e) + ) + retry_queue.append(picklable_request) # Retry it later except Exception as e: logging.error("HTTP request failed with unhandled exception. %s" % str(e)) logging.error(traceback.format_exc()) @@ -134,12 +155,13 @@ def send_http_request(picklable_request, retry_queue): # that breaks our code. I don't want us pickling data that potentially # breaks airtime_analyzer. + def is_web_server_broken(url): - ''' Do a naive test to check if the web server we're trying to access is down. - We use this to try to differentiate between error 500s that are coming - from (for example) a bug in the Airtime Media REST API and error 500s - caused by Airtime or the webserver itself being broken temporarily. - ''' + """Do a naive test to check if the web server we're trying to access is down. + We use this to try to differentiate between error 500s that are coming + from (for example) a bug in the Airtime Media REST API and error 500s + caused by Airtime or the webserver itself being broken temporarily. + """ try: test_req = requests.get(url, verify=False) test_req.raise_for_status() @@ -147,35 +169,38 @@ def is_web_server_broken(url): return True else: # The request worked fine, so the web server and Airtime are still up. - return False + return False return False -class StatusReporter(): - ''' Reports the extracted audio file metadata and job status back to the - Airtime web application. - ''' +class StatusReporter: + """Reports the extracted audio file metadata and job status back to the + Airtime web application. + """ + _HTTP_REQUEST_TIMEOUT = 30 - - ''' We use multiprocessing.Process again here because we need a thread for this stuff + + """ We use multiprocessing.Process again here because we need a thread for this stuff anyways, and Python gives us process isolation for free (crash safety). - ''' + """ _ipc_queue = queue.Queue() - #_http_thread = multiprocessing.Process(target=process_http_requests, + # _http_thread = multiprocessing.Process(target=process_http_requests, # args=(_ipc_queue,)) _http_thread = None @classmethod def start_thread(self, http_retry_queue_path): - StatusReporter._http_thread = threading.Thread(target=process_http_requests, - args=(StatusReporter._ipc_queue,http_retry_queue_path)) + StatusReporter._http_thread = threading.Thread( + target=process_http_requests, + args=(StatusReporter._ipc_queue, http_retry_queue_path), + ) StatusReporter._http_thread.start() @classmethod def stop_thread(self): logging.info("Terminating status_reporter process") - #StatusReporter._http_thread.terminate() # Triggers SIGTERM on the child process - StatusReporter._ipc_queue.put("shutdown") # Special trigger + # StatusReporter._http_thread.terminate() # Triggers SIGTERM on the child process + StatusReporter._ipc_queue.put("shutdown") # Special trigger StatusReporter._http_thread.join() @classmethod @@ -184,30 +209,33 @@ class StatusReporter(): @classmethod def report_success_to_callback_url(self, callback_url, api_key, audio_metadata): - ''' Report the extracted metadata and status of the successfully imported file - to the callback URL (which should be the Airtime File Upload API) - ''' + """Report the extracted metadata and status of the successfully imported file + to the callback URL (which should be the Airtime File Upload API) + """ put_payload = json.dumps(audio_metadata) - #r = requests.Request(method='PUT', url=callback_url, data=put_payload, + # r = requests.Request(method='PUT', url=callback_url, data=put_payload, # auth=requests.auth.HTTPBasicAuth(api_key, '')) - ''' + """ r = requests.Request(method='PUT', url=callback_url, data=put_payload, auth=requests.auth.HTTPBasicAuth(api_key, '')) StatusReporter._send_http_request(r) - ''' + """ - StatusReporter._send_http_request(PicklableHttpRequest(method='PUT', url=callback_url, - data=put_payload, api_key=api_key)) + StatusReporter._send_http_request( + PicklableHttpRequest( + method="PUT", url=callback_url, data=put_payload, api_key=api_key + ) + ) - ''' + """ try: r.raise_for_status() # Raise an exception if there was an http error code returned except requests.exceptions.RequestException: StatusReporter._ipc_queue.put(r.prepare()) - ''' + """ - ''' + """ # Encode the audio metadata as json and post it back to the callback_url put_payload = json.dumps(audio_metadata) logging.debug("sending http put with payload: " + put_payload) @@ -219,31 +247,38 @@ class StatusReporter(): #TODO: queue up failed requests and try them again later. r.raise_for_status() # Raise an exception if there was an http error code returned - ''' + """ @classmethod - def report_failure_to_callback_url(self, callback_url, api_key, import_status, reason): - if not isinstance(import_status, int ): - raise TypeError("import_status must be an integer. Was of type " + type(import_status).__name__) + def report_failure_to_callback_url( + self, callback_url, api_key, import_status, reason + ): + if not isinstance(import_status, int): + raise TypeError( + "import_status must be an integer. Was of type " + + type(import_status).__name__ + ) logging.debug("Reporting import failure to Airtime REST API...") audio_metadata = dict() audio_metadata["import_status"] = import_status audio_metadata["comment"] = reason # hack attack put_payload = json.dumps(audio_metadata) - #logging.debug("sending http put with payload: " + put_payload) - ''' + # logging.debug("sending http put with payload: " + put_payload) + """ r = requests.put(callback_url, data=put_payload, auth=requests.auth.HTTPBasicAuth(api_key, ''), timeout=StatusReporter._HTTP_REQUEST_TIMEOUT) - ''' - StatusReporter._send_http_request(PicklableHttpRequest(method='PUT', url=callback_url, - data=put_payload, api_key=api_key)) - ''' + """ + StatusReporter._send_http_request( + PicklableHttpRequest( + method="PUT", url=callback_url, data=put_payload, api_key=api_key + ) + ) + """ logging.debug("HTTP request returned status: " + str(r.status_code)) logging.debug(r.text) # log the response body #TODO: queue up failed requests and try them again later. r.raise_for_status() # raise an exception if there was an http error code returned - ''' - + """ diff --git a/python_apps/airtime_analyzer/tests/airtime_analyzer_tests.py b/python_apps/airtime_analyzer/tests/airtime_analyzer_tests.py index f37fc6004..3abdac335 100644 --- a/python_apps/airtime_analyzer/tests/airtime_analyzer_tests.py +++ b/python_apps/airtime_analyzer/tests/airtime_analyzer_tests.py @@ -2,12 +2,14 @@ from nose.tools import * import airtime_analyzer + def setup(): pass + def teardown(): pass + def test_basic(): pass - diff --git a/python_apps/airtime_analyzer/tests/analyzer_pipeline_tests.py b/python_apps/airtime_analyzer/tests/analyzer_pipeline_tests.py index 82879f554..a2f05cdd2 100644 --- a/python_apps/airtime_analyzer/tests/analyzer_pipeline_tests.py +++ b/python_apps/airtime_analyzer/tests/analyzer_pipeline_tests.py @@ -8,48 +8,58 @@ import datetime from airtime_analyzer.analyzer_pipeline import AnalyzerPipeline from airtime_analyzer import config_file -DEFAULT_AUDIO_FILE = u'tests/test_data/44100Hz-16bit-mono.mp3' -DEFAULT_IMPORT_DEST = u'Test Artist/Test Album/44100Hz-16bit-mono.mp3' +DEFAULT_AUDIO_FILE = u"tests/test_data/44100Hz-16bit-mono.mp3" +DEFAULT_IMPORT_DEST = u"Test Artist/Test Album/44100Hz-16bit-mono.mp3" + def setup(): pass + def teardown(): - #Move the file back + # Move the file back shutil.move(DEFAULT_IMPORT_DEST, DEFAULT_AUDIO_FILE) assert os.path.exists(DEFAULT_AUDIO_FILE) + def test_basic(): filename = os.path.basename(DEFAULT_AUDIO_FILE) q = Queue() - file_prefix = u'' + file_prefix = u"" storage_backend = "file" - #This actually imports the file into the "./Test Artist" directory. - AnalyzerPipeline.run_analysis(q, DEFAULT_AUDIO_FILE, u'.', filename, storage_backend, file_prefix) + # This actually imports the file into the "./Test Artist" directory. + AnalyzerPipeline.run_analysis( + q, DEFAULT_AUDIO_FILE, u".", filename, storage_backend, file_prefix + ) metadata = q.get() - assert metadata['track_title'] == u'Test Title' - assert metadata['artist_name'] == u'Test Artist' - assert metadata['album_title'] == u'Test Album' - assert metadata['year'] == u'1999' - assert metadata['genre'] == u'Test Genre' - assert metadata['mime'] == 'audio/mp3' # Not unicode because MIMEs aren't. - assert abs(metadata['length_seconds'] - 3.9) < 0.1 - assert metadata["length"] == str(datetime.timedelta(seconds=metadata["length_seconds"])) + assert metadata["track_title"] == u"Test Title" + assert metadata["artist_name"] == u"Test Artist" + assert metadata["album_title"] == u"Test Album" + assert metadata["year"] == u"1999" + assert metadata["genre"] == u"Test Genre" + assert metadata["mime"] == "audio/mp3" # Not unicode because MIMEs aren't. + assert abs(metadata["length_seconds"] - 3.9) < 0.1 + assert metadata["length"] == str( + datetime.timedelta(seconds=metadata["length_seconds"]) + ) assert os.path.exists(DEFAULT_IMPORT_DEST) + @raises(TypeError) def test_wrong_type_queue_param(): - AnalyzerPipeline.run_analysis(Queue(), u'', u'', u'') + AnalyzerPipeline.run_analysis(Queue(), u"", u"", u"") + @raises(TypeError) def test_wrong_type_string_param2(): - AnalyzerPipeline.run_analysis(Queue(), '', u'', u'') + AnalyzerPipeline.run_analysis(Queue(), "", u"", u"") + @raises(TypeError) def test_wrong_type_string_param3(): - AnalyzerPipeline.run_analysis(Queue(), u'', '', u'') + AnalyzerPipeline.run_analysis(Queue(), u"", "", u"") + @raises(TypeError) def test_wrong_type_string_param4(): - AnalyzerPipeline.run_analysis(Queue(), u'', u'', '') - + AnalyzerPipeline.run_analysis(Queue(), u"", u"", "") diff --git a/python_apps/airtime_analyzer/tests/analyzer_tests.py b/python_apps/airtime_analyzer/tests/analyzer_tests.py index 6d34b6a9f..fc45f62fa 100644 --- a/python_apps/airtime_analyzer/tests/analyzer_tests.py +++ b/python_apps/airtime_analyzer/tests/analyzer_tests.py @@ -2,13 +2,16 @@ from nose.tools import * from airtime_analyzer.analyzer import Analyzer + def setup(): pass + def teardown(): pass + @raises(NotImplementedError) def test_analyze(): abstract_analyzer = Analyzer() - abstract_analyzer.analyze(u'foo', dict()) + abstract_analyzer.analyze(u"foo", dict()) diff --git a/python_apps/airtime_analyzer/tests/cuepoint_analyzer_tests.py b/python_apps/airtime_analyzer/tests/cuepoint_analyzer_tests.py index 7dced3618..277cd44d3 100644 --- a/python_apps/airtime_analyzer/tests/cuepoint_analyzer_tests.py +++ b/python_apps/airtime_analyzer/tests/cuepoint_analyzer_tests.py @@ -2,63 +2,97 @@ from nose.tools import * from airtime_analyzer.cuepoint_analyzer import CuePointAnalyzer + def check_default_metadata(metadata): - ''' Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect. + """Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect. :param metadata: a metadata dictionary :return: Nothing - ''' + """ # We give silan some leeway here by specifying a tolerance tolerance_seconds = 0.1 length_seconds = 3.9 - assert abs(metadata['length_seconds'] - length_seconds) < tolerance_seconds - assert abs(float(metadata['cuein'])) < tolerance_seconds - assert abs(float(metadata['cueout']) - length_seconds) < tolerance_seconds + assert abs(metadata["length_seconds"] - length_seconds) < tolerance_seconds + assert abs(float(metadata["cuein"])) < tolerance_seconds + assert abs(float(metadata["cueout"]) - length_seconds) < tolerance_seconds + def test_missing_silan(): old_silan = CuePointAnalyzer.SILAN_EXECUTABLE - CuePointAnalyzer.SILAN_EXECUTABLE = 'foosdaf' - metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict()) - CuePointAnalyzer.SILAN_EXECUTABLE = old_silan # Need to put this back + CuePointAnalyzer.SILAN_EXECUTABLE = "foosdaf" + metadata = CuePointAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict() + ) + CuePointAnalyzer.SILAN_EXECUTABLE = old_silan # Need to put this back + def test_invalid_filepath(): - metadata = CuePointAnalyzer.analyze(u'non-existent-file', dict()) + metadata = CuePointAnalyzer.analyze(u"non-existent-file", dict()) def test_mp3_utf8(): - metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict()) + metadata = CuePointAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict() + ) check_default_metadata(metadata) + def test_mp3_dualmono(): - metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-dualmono.mp3', dict()) + metadata = CuePointAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-dualmono.mp3", dict() + ) check_default_metadata(metadata) + def test_mp3_jointstereo(): - metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-jointstereo.mp3', dict()) + metadata = CuePointAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-jointstereo.mp3", dict() + ) check_default_metadata(metadata) + def test_mp3_simplestereo(): - metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-simplestereo.mp3', dict()) + metadata = CuePointAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-simplestereo.mp3", dict() + ) check_default_metadata(metadata) + def test_mp3_stereo(): - metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.mp3', dict()) + metadata = CuePointAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo.mp3", dict() + ) check_default_metadata(metadata) + def test_mp3_mono(): - metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mono.mp3', dict()) + metadata = CuePointAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-mono.mp3", dict() + ) check_default_metadata(metadata) + def test_ogg_stereo(): - metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.ogg', dict()) + metadata = CuePointAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo.ogg", dict() + ) check_default_metadata(metadata) + def test_invalid_wma(): - metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict()) + metadata = CuePointAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo-invalid.wma", dict() + ) + def test_m4a_stereo(): - metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.m4a', dict()) + metadata = CuePointAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo.m4a", dict() + ) check_default_metadata(metadata) + def test_wav_stereo(): - metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.wav', dict()) + metadata = CuePointAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo.wav", dict() + ) check_default_metadata(metadata) diff --git a/python_apps/airtime_analyzer/tests/filemover_analyzer_tests.py b/python_apps/airtime_analyzer/tests/filemover_analyzer_tests.py index c14e91e27..40eafc6ba 100644 --- a/python_apps/airtime_analyzer/tests/filemover_analyzer_tests.py +++ b/python_apps/airtime_analyzer/tests/filemover_analyzer_tests.py @@ -8,109 +8,125 @@ import mock from pprint import pprint from airtime_analyzer.filemover_analyzer import FileMoverAnalyzer -DEFAULT_AUDIO_FILE = u'tests/test_data/44100Hz-16bit-mono.mp3' -DEFAULT_IMPORT_DEST = u'Test Artist/Test Album/44100Hz-16bit-mono.mp3' +DEFAULT_AUDIO_FILE = u"tests/test_data/44100Hz-16bit-mono.mp3" +DEFAULT_IMPORT_DEST = u"Test Artist/Test Album/44100Hz-16bit-mono.mp3" + def setup(): pass + def teardown(): pass + @raises(Exception) def test_dont_use_analyze(): - FileMoverAnalyzer.analyze(u'foo', dict()) + FileMoverAnalyzer.analyze(u"foo", dict()) + @raises(TypeError) def test_move_wrong_string_param1(): - FileMoverAnalyzer.move(42, '', '', dict()) + FileMoverAnalyzer.move(42, "", "", dict()) + @raises(TypeError) def test_move_wrong_string_param2(): - FileMoverAnalyzer.move(u'', 23, u'', dict()) + FileMoverAnalyzer.move(u"", 23, u"", dict()) + @raises(TypeError) def test_move_wrong_string_param3(): - FileMoverAnalyzer.move('', '', 5, dict()) + FileMoverAnalyzer.move("", "", 5, dict()) + @raises(TypeError) def test_move_wrong_dict_param(): - FileMoverAnalyzer.move('', '', '', 12345) + FileMoverAnalyzer.move("", "", "", 12345) + @raises(FileNotFoundError) def test_move_wrong_string_param3(): - FileMoverAnalyzer.move('', '', '', dict()) + FileMoverAnalyzer.move("", "", "", dict()) + def test_basic(): filename = os.path.basename(DEFAULT_AUDIO_FILE) - FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, dict()) - #Move the file back + FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u".", filename, dict()) + # Move the file back shutil.move("./" + filename, DEFAULT_AUDIO_FILE) assert os.path.exists(DEFAULT_AUDIO_FILE) + def test_basic_samefile(): filename = os.path.basename(DEFAULT_AUDIO_FILE) - FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'tests/test_data', filename, dict()) + FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u"tests/test_data", filename, dict()) assert os.path.exists(DEFAULT_AUDIO_FILE) + def test_duplicate_file(): filename = os.path.basename(DEFAULT_AUDIO_FILE) - #Import the file once - FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, dict()) - #Copy it back to the original location + # Import the file once + FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u".", filename, dict()) + # Copy it back to the original location shutil.copy("./" + filename, DEFAULT_AUDIO_FILE) - #Import it again. It shouldn't overwrite the old file and instead create a new + # Import it again. It shouldn't overwrite the old file and instead create a new metadata = dict() - metadata = FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, metadata) - #Cleanup: move the file (eg. 44100Hz-16bit-mono.mp3) back + metadata = FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u".", filename, metadata) + # Cleanup: move the file (eg. 44100Hz-16bit-mono.mp3) back shutil.move("./" + filename, DEFAULT_AUDIO_FILE) - #Remove the renamed duplicate, eg. 44100Hz-16bit-mono_03-26-2014-11-58.mp3 + # Remove the renamed duplicate, eg. 44100Hz-16bit-mono_03-26-2014-11-58.mp3 os.remove(metadata["full_path"]) assert os.path.exists(DEFAULT_AUDIO_FILE) -''' If you import three copies of the same file, the behaviour is: + +""" If you import three copies of the same file, the behaviour is: - The filename is of the first file preserved. - The filename of the second file has the timestamp attached to it. - The filename of the third file has a UUID placed after the timestamp, but ONLY IF it's imported within 1 second of the second file (ie. if the timestamp is the same). -''' +""" + + def test_double_duplicate_files(): # Here we use mock to patch out the time.localtime() function so that it # always returns the same value. This allows us to consistently simulate this test cases # where the last two of the three files are imported at the same time as the timestamp. - with mock.patch('airtime_analyzer.filemover_analyzer.time') as mock_time: - mock_time.localtime.return_value = time.localtime()#date(2010, 10, 8) + with mock.patch("airtime_analyzer.filemover_analyzer.time") as mock_time: + mock_time.localtime.return_value = time.localtime() # date(2010, 10, 8) mock_time.side_effect = lambda *args, **kw: time(*args, **kw) filename = os.path.basename(DEFAULT_AUDIO_FILE) - #Import the file once - FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, dict()) - #Copy it back to the original location + # Import the file once + FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u".", filename, dict()) + # Copy it back to the original location shutil.copy("./" + filename, DEFAULT_AUDIO_FILE) - #Import it again. It shouldn't overwrite the old file and instead create a new + # Import it again. It shouldn't overwrite the old file and instead create a new first_dup_metadata = dict() - first_dup_metadata = FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, - first_dup_metadata) - #Copy it back again! + first_dup_metadata = FileMoverAnalyzer.move( + DEFAULT_AUDIO_FILE, u".", filename, first_dup_metadata + ) + # Copy it back again! shutil.copy("./" + filename, DEFAULT_AUDIO_FILE) - #Reimport for the third time, which should have the same timestamp as the second one - #thanks to us mocking out time.localtime() + # Reimport for the third time, which should have the same timestamp as the second one + # thanks to us mocking out time.localtime() second_dup_metadata = dict() - second_dup_metadata = FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, - second_dup_metadata) - #Cleanup: move the file (eg. 44100Hz-16bit-mono.mp3) back + second_dup_metadata = FileMoverAnalyzer.move( + DEFAULT_AUDIO_FILE, u".", filename, second_dup_metadata + ) + # Cleanup: move the file (eg. 44100Hz-16bit-mono.mp3) back shutil.move("./" + filename, DEFAULT_AUDIO_FILE) - #Remove the renamed duplicate, eg. 44100Hz-16bit-mono_03-26-2014-11-58.mp3 + # Remove the renamed duplicate, eg. 44100Hz-16bit-mono_03-26-2014-11-58.mp3 os.remove(first_dup_metadata["full_path"]) os.remove(second_dup_metadata["full_path"]) assert os.path.exists(DEFAULT_AUDIO_FILE) + @raises(OSError) def test_bad_permissions_destination_dir(): filename = os.path.basename(DEFAULT_AUDIO_FILE) - dest_dir = u'/sys/foobar' # /sys is using sysfs on Linux, which is unwritable + dest_dir = u"/sys/foobar" # /sys is using sysfs on Linux, which is unwritable FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, dest_dir, filename, dict()) - #Move the file back + # Move the file back shutil.move(os.path.join(dest_dir, filename), DEFAULT_AUDIO_FILE) assert os.path.exists(DEFAULT_AUDIO_FILE) - diff --git a/python_apps/airtime_analyzer/tests/metadata_analyzer_tests.py b/python_apps/airtime_analyzer/tests/metadata_analyzer_tests.py index e8e38f395..7e6b72d85 100644 --- a/python_apps/airtime_analyzer/tests/metadata_analyzer_tests.py +++ b/python_apps/airtime_analyzer/tests/metadata_analyzer_tests.py @@ -6,78 +6,101 @@ import mock from nose.tools import * from airtime_analyzer.metadata_analyzer import MetadataAnalyzer + def setup(): pass + def teardown(): pass + def check_default_metadata(metadata): - assert metadata['track_title'] == 'Test Title' - assert metadata['artist_name'] == 'Test Artist' - assert metadata['album_title'] == 'Test Album' - assert metadata['year'] == '1999' - assert metadata['genre'] == 'Test Genre' - assert metadata['track_number'] == '1' - assert metadata["length"] == str(datetime.timedelta(seconds=metadata["length_seconds"])) + assert metadata["track_title"] == "Test Title" + assert metadata["artist_name"] == "Test Artist" + assert metadata["album_title"] == "Test Album" + assert metadata["year"] == "1999" + assert metadata["genre"] == "Test Genre" + assert metadata["track_number"] == "1" + assert metadata["length"] == str( + datetime.timedelta(seconds=metadata["length_seconds"]) + ) + def test_mp3_mono(): - metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-mono.mp3', dict()) + metadata = MetadataAnalyzer.analyze( + "tests/test_data/44100Hz-16bit-mono.mp3", dict() + ) check_default_metadata(metadata) - assert metadata['channels'] == 1 - assert metadata['bit_rate'] == 63998 - assert abs(metadata['length_seconds'] - 3.9) < 0.1 - assert metadata['mime'] == 'audio/mp3' # Not unicode because MIMEs aren't. - assert metadata['track_total'] == '10' # MP3s can have a track_total - #Mutagen doesn't extract comments from mp3s it seems + assert metadata["channels"] == 1 + assert metadata["bit_rate"] == 63998 + assert abs(metadata["length_seconds"] - 3.9) < 0.1 + assert metadata["mime"] == "audio/mp3" # Not unicode because MIMEs aren't. + assert metadata["track_total"] == "10" # MP3s can have a track_total + # Mutagen doesn't extract comments from mp3s it seems + def test_mp3_jointstereo(): - metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-jointstereo.mp3', dict()) + metadata = MetadataAnalyzer.analyze( + "tests/test_data/44100Hz-16bit-jointstereo.mp3", dict() + ) check_default_metadata(metadata) - assert metadata['channels'] == 2 - assert metadata['bit_rate'] == 127998 - assert abs(metadata['length_seconds'] - 3.9) < 0.1 - assert metadata['mime'] == 'audio/mp3' - assert metadata['track_total'] == '10' # MP3s can have a track_total + assert metadata["channels"] == 2 + assert metadata["bit_rate"] == 127998 + assert abs(metadata["length_seconds"] - 3.9) < 0.1 + assert metadata["mime"] == "audio/mp3" + assert metadata["track_total"] == "10" # MP3s can have a track_total + def test_mp3_simplestereo(): - metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-simplestereo.mp3', dict()) + metadata = MetadataAnalyzer.analyze( + "tests/test_data/44100Hz-16bit-simplestereo.mp3", dict() + ) check_default_metadata(metadata) - assert metadata['channels'] == 2 - assert metadata['bit_rate'] == 127998 - assert abs(metadata['length_seconds'] - 3.9) < 0.1 - assert metadata['mime'] == 'audio/mp3' - assert metadata['track_total'] == '10' # MP3s can have a track_total + assert metadata["channels"] == 2 + assert metadata["bit_rate"] == 127998 + assert abs(metadata["length_seconds"] - 3.9) < 0.1 + assert metadata["mime"] == "audio/mp3" + assert metadata["track_total"] == "10" # MP3s can have a track_total + def test_mp3_dualmono(): - metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-dualmono.mp3', dict()) + metadata = MetadataAnalyzer.analyze( + "tests/test_data/44100Hz-16bit-dualmono.mp3", dict() + ) check_default_metadata(metadata) - assert metadata['channels'] == 2 - assert metadata['bit_rate'] == 127998 - assert abs(metadata['length_seconds'] - 3.9) < 0.1 - assert metadata['mime'] == 'audio/mp3' - assert metadata['track_total'] == '10' # MP3s can have a track_total + assert metadata["channels"] == 2 + assert metadata["bit_rate"] == 127998 + assert abs(metadata["length_seconds"] - 3.9) < 0.1 + assert metadata["mime"] == "audio/mp3" + assert metadata["track_total"] == "10" # MP3s can have a track_total def test_ogg_mono(): - metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-mono.ogg', dict()) + metadata = MetadataAnalyzer.analyze( + "tests/test_data/44100Hz-16bit-mono.ogg", dict() + ) check_default_metadata(metadata) - assert metadata['channels'] == 1 - assert metadata['bit_rate'] == 80000 - assert abs(metadata['length_seconds'] - 3.8) < 0.1 - assert metadata['mime'] == 'audio/vorbis' - assert metadata['comment'] == 'Test Comment' + assert metadata["channels"] == 1 + assert metadata["bit_rate"] == 80000 + assert abs(metadata["length_seconds"] - 3.8) < 0.1 + assert metadata["mime"] == "audio/vorbis" + assert metadata["comment"] == "Test Comment" + def test_ogg_stereo(): - metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo.ogg', dict()) + metadata = MetadataAnalyzer.analyze( + "tests/test_data/44100Hz-16bit-stereo.ogg", dict() + ) check_default_metadata(metadata) - assert metadata['channels'] == 2 - assert metadata['bit_rate'] == 112000 - assert abs(metadata['length_seconds'] - 3.8) < 0.1 - assert metadata['mime'] == 'audio/vorbis' - assert metadata['comment'] == 'Test Comment' + assert metadata["channels"] == 2 + assert metadata["bit_rate"] == 112000 + assert abs(metadata["length_seconds"] - 3.8) < 0.1 + assert metadata["mime"] == "audio/vorbis" + assert metadata["comment"] == "Test Comment" -''' faac and avconv can't seem to create a proper mono AAC file... ugh + +""" faac and avconv can't seem to create a proper mono AAC file... ugh def test_aac_mono(): metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-mono.m4a') print("Mono AAC metadata:") @@ -88,78 +111,93 @@ def test_aac_mono(): assert abs(metadata['length_seconds'] - 3.8) < 0.1 assert metadata['mime'] == 'audio/mp4' assert metadata['comment'] == 'Test Comment' -''' +""" + def test_aac_stereo(): - metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo.m4a', dict()) + metadata = MetadataAnalyzer.analyze( + "tests/test_data/44100Hz-16bit-stereo.m4a", dict() + ) check_default_metadata(metadata) - assert metadata['channels'] == 2 - assert metadata['bit_rate'] == 102619 - assert abs(metadata['length_seconds'] - 3.8) < 0.1 - assert metadata['mime'] == 'audio/mp4' - assert metadata['comment'] == 'Test Comment' + assert metadata["channels"] == 2 + assert metadata["bit_rate"] == 102619 + assert abs(metadata["length_seconds"] - 3.8) < 0.1 + assert metadata["mime"] == "audio/mp4" + assert metadata["comment"] == "Test Comment" + def test_mp3_utf8(): - metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict()) + metadata = MetadataAnalyzer.analyze( + "tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict() + ) # Using a bunch of different UTF-8 codepages here. Test data is from: # http://winrus.com/utf8-jap.htm - assert metadata['track_title'] == 'アイウエオカキクケコサシスセソタチツテ' - assert metadata['artist_name'] == 'てすと' - assert metadata['album_title'] == 'Ä ä Ü ü ß' - assert metadata['year'] == '1999' - assert metadata['genre'] == 'Я Б Г Д Ж Й' - assert metadata['track_number'] == '1' - assert metadata['channels'] == 2 - assert metadata['bit_rate'] < 130000 - assert metadata['bit_rate'] > 127000 - assert abs(metadata['length_seconds'] - 3.9) < 0.1 - assert metadata['mime'] == 'audio/mp3' - assert metadata['track_total'] == '10' # MP3s can have a track_total + assert metadata["track_title"] == "アイウエオカキクケコサシスセソタチツテ" + assert metadata["artist_name"] == "てすと" + assert metadata["album_title"] == "Ä ä Ü ü ß" + assert metadata["year"] == "1999" + assert metadata["genre"] == "Я Б Г Д Ж Й" + assert metadata["track_number"] == "1" + assert metadata["channels"] == 2 + assert metadata["bit_rate"] < 130000 + assert metadata["bit_rate"] > 127000 + assert abs(metadata["length_seconds"] - 3.9) < 0.1 + assert metadata["mime"] == "audio/mp3" + assert metadata["track_total"] == "10" # MP3s can have a track_total + def test_invalid_wma(): - metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict()) - assert metadata['mime'] == 'audio/x-ms-wma' + metadata = MetadataAnalyzer.analyze( + "tests/test_data/44100Hz-16bit-stereo-invalid.wma", dict() + ) + assert metadata["mime"] == "audio/x-ms-wma" + def test_wav_stereo(): - metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo.wav', dict()) - assert metadata['mime'] == 'audio/x-wav' - assert abs(metadata['length_seconds'] - 3.9) < 0.1 - assert metadata['channels'] == 2 - assert metadata['sample_rate'] == 44100 + metadata = MetadataAnalyzer.analyze( + "tests/test_data/44100Hz-16bit-stereo.wav", dict() + ) + assert metadata["mime"] == "audio/x-wav" + assert abs(metadata["length_seconds"] - 3.9) < 0.1 + assert metadata["channels"] == 2 + assert metadata["sample_rate"] == 44100 # Make sure the parameter checking works @raises(FileNotFoundError) def test_move_wrong_string_param1(): - not_unicode = 'asdfasdf' + not_unicode = "asdfasdf" MetadataAnalyzer.analyze(not_unicode, dict()) + @raises(TypeError) def test_move_wrong_metadata_dict(): not_a_dict = list() - MetadataAnalyzer.analyze('asdfasdf', not_a_dict) + MetadataAnalyzer.analyze("asdfasdf", not_a_dict) + # Test an mp3 file where the number of channels is invalid or missing: def test_mp3_bad_channels(): - filename = 'tests/test_data/44100Hz-16bit-mono.mp3' - ''' + filename = "tests/test_data/44100Hz-16bit-mono.mp3" + """ It'd be a pain in the ass to construct a real MP3 with an invalid number of channels by hand because that value is stored in every MP3 frame in the file - ''' + """ audio_file = mutagen.File(filename, easy=True) audio_file.info.mode = 1777 - with mock.patch('airtime_analyzer.metadata_analyzer.mutagen') as mock_mutagen: + with mock.patch("airtime_analyzer.metadata_analyzer.mutagen") as mock_mutagen: mock_mutagen.File.return_value = audio_file - #mock_mutagen.side_effect = lambda *args, **kw: audio_file #File(*args, **kw) + # mock_mutagen.side_effect = lambda *args, **kw: audio_file #File(*args, **kw) metadata = MetadataAnalyzer.analyze(filename, dict()) check_default_metadata(metadata) - assert metadata['channels'] == 1 - assert metadata['bit_rate'] == 63998 - assert abs(metadata['length_seconds'] - 3.9) < 0.1 - assert metadata['mime'] == 'audio/mp3' # Not unicode because MIMEs aren't. - assert metadata['track_total'] == '10' # MP3s can have a track_total - #Mutagen doesn't extract comments from mp3s it seems + assert metadata["channels"] == 1 + assert metadata["bit_rate"] == 63998 + assert abs(metadata["length_seconds"] - 3.9) < 0.1 + assert metadata["mime"] == "audio/mp3" # Not unicode because MIMEs aren't. + assert metadata["track_total"] == "10" # MP3s can have a track_total + # Mutagen doesn't extract comments from mp3s it seems + def test_unparsable_file(): - MetadataAnalyzer.analyze('tests/test_data/unparsable.txt', dict()) + MetadataAnalyzer.analyze("tests/test_data/unparsable.txt", dict()) diff --git a/python_apps/airtime_analyzer/tests/playability_analyzer_tests.py b/python_apps/airtime_analyzer/tests/playability_analyzer_tests.py index 2aa311ece..7042e1f93 100644 --- a/python_apps/airtime_analyzer/tests/playability_analyzer_tests.py +++ b/python_apps/airtime_analyzer/tests/playability_analyzer_tests.py @@ -2,61 +2,97 @@ from nose.tools import * from airtime_analyzer.playability_analyzer import * + def check_default_metadata(metadata): - ''' Stub function for now in case we need it later.''' + """Stub function for now in case we need it later.""" pass + def test_missing_liquidsoap(): old_ls = PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE - PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = 'foosdaf' - metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict()) - PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = old_ls # Need to put this back + PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = "foosdaf" + metadata = PlayabilityAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict() + ) + PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = old_ls # Need to put this back + @raises(UnplayableFileError) def test_invalid_filepath(): - metadata = PlayabilityAnalyzer.analyze(u'non-existent-file', dict()) + metadata = PlayabilityAnalyzer.analyze(u"non-existent-file", dict()) + def test_mp3_utf8(): - metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict()) + metadata = PlayabilityAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict() + ) check_default_metadata(metadata) + def test_mp3_dualmono(): - metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-dualmono.mp3', dict()) + metadata = PlayabilityAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-dualmono.mp3", dict() + ) check_default_metadata(metadata) + def test_mp3_jointstereo(): - metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-jointstereo.mp3', dict()) + metadata = PlayabilityAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-jointstereo.mp3", dict() + ) check_default_metadata(metadata) + def test_mp3_simplestereo(): - metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-simplestereo.mp3', dict()) + metadata = PlayabilityAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-simplestereo.mp3", dict() + ) check_default_metadata(metadata) + def test_mp3_stereo(): - metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.mp3', dict()) + metadata = PlayabilityAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo.mp3", dict() + ) check_default_metadata(metadata) + def test_mp3_mono(): - metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mono.mp3', dict()) + metadata = PlayabilityAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-mono.mp3", dict() + ) check_default_metadata(metadata) + def test_ogg_stereo(): - metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.ogg', dict()) + metadata = PlayabilityAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo.ogg", dict() + ) check_default_metadata(metadata) + @raises(UnplayableFileError) def test_invalid_wma(): - metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict()) + metadata = PlayabilityAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo-invalid.wma", dict() + ) + def test_m4a_stereo(): - metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.m4a', dict()) + metadata = PlayabilityAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo.m4a", dict() + ) check_default_metadata(metadata) + def test_wav_stereo(): - metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.wav', dict()) + metadata = PlayabilityAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo.wav", dict() + ) check_default_metadata(metadata) + @raises(UnplayableFileError) def test_unknown(): - metadata = PlayabilityAnalyzer.analyze(u'http://www.google.com', dict()) - check_default_metadata(metadata) \ No newline at end of file + metadata = PlayabilityAnalyzer.analyze(u"http://www.google.com", dict()) + check_default_metadata(metadata) diff --git a/python_apps/airtime_analyzer/tests/replaygain_analyzer_tests.py b/python_apps/airtime_analyzer/tests/replaygain_analyzer_tests.py index af25b01c0..044109669 100644 --- a/python_apps/airtime_analyzer/tests/replaygain_analyzer_tests.py +++ b/python_apps/airtime_analyzer/tests/replaygain_analyzer_tests.py @@ -5,80 +5,134 @@ from airtime_analyzer.replaygain_analyzer import ReplayGainAnalyzer def check_default_metadata(metadata): - ''' Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect. + """Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect. :param metadata: a metadata dictionary :return: Nothing - ''' - ''' + """ + """ # We give python-rgain some leeway here by specifying a tolerance. It's not perfectly consistent across codecs... assert abs(metadata['cuein']) < tolerance_seconds assert abs(metadata['cueout'] - length_seconds) < tolerance_seconds - ''' + """ tolerance = 0.60 expected_replaygain = 5.2 - print(metadata['replay_gain']) - assert abs(metadata['replay_gain'] - expected_replaygain) < tolerance + print(metadata["replay_gain"]) + assert abs(metadata["replay_gain"] - expected_replaygain) < tolerance + def test_missing_replaygain(): old_rg = ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE - ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = 'foosdaf' - metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict()) - ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = old_rg # Need to put this back + ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = "foosdaf" + metadata = ReplayGainAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict() + ) + ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = old_rg # Need to put this back + def test_invalid_filepath(): - metadata = ReplayGainAnalyzer.analyze(u'non-existent-file', dict()) + metadata = ReplayGainAnalyzer.analyze(u"non-existent-file", dict()) + def test_mp3_utf8(): - metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict()) + metadata = ReplayGainAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict() + ) check_default_metadata(metadata) + + test_mp3_utf8.rgain = True + def test_mp3_dualmono(): - metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-dualmono.mp3', dict()) + metadata = ReplayGainAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-dualmono.mp3", dict() + ) check_default_metadata(metadata) + + test_mp3_dualmono.rgain = True + def test_mp3_jointstereo(): - metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-jointstereo.mp3', dict()) + metadata = ReplayGainAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-jointstereo.mp3", dict() + ) check_default_metadata(metadata) + + test_mp3_jointstereo.rgain = True + def test_mp3_simplestereo(): - metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-simplestereo.mp3', dict()) + metadata = ReplayGainAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-simplestereo.mp3", dict() + ) check_default_metadata(metadata) + + test_mp3_simplestereo.rgain = True + def test_mp3_stereo(): - metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.mp3', dict()) + metadata = ReplayGainAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo.mp3", dict() + ) check_default_metadata(metadata) + + test_mp3_stereo.rgain = True + def test_mp3_mono(): - metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mono.mp3', dict()) + metadata = ReplayGainAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-mono.mp3", dict() + ) check_default_metadata(metadata) + + test_mp3_mono.rgain = True + def test_ogg_stereo(): - metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.ogg', dict()) + metadata = ReplayGainAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo.ogg", dict() + ) check_default_metadata(metadata) + + test_ogg_stereo = True + def test_invalid_wma(): - metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict()) + metadata = ReplayGainAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo-invalid.wma", dict() + ) + + test_invalid_wma.rgain = True + def test_mp3_missing_id3_header(): - metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mp3-missingid3header.mp3', dict()) + metadata = ReplayGainAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-mp3-missingid3header.mp3", dict() + ) + + test_mp3_missing_id3_header.rgain = True + def test_m4a_stereo(): - metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.m4a', dict()) + metadata = ReplayGainAnalyzer.analyze( + u"tests/test_data/44100Hz-16bit-stereo.m4a", dict() + ) check_default_metadata(metadata) + + test_m4a_stereo.rgain = True -''' WAVE is not supported by python-rgain yet +""" WAVE is not supported by python-rgain yet def test_wav_stereo(): metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.wav', dict()) check_default_metadata(metadata) test_wav_stereo.rgain = True -''' +""" diff --git a/python_apps/api_clients/api_clients/utils.py b/python_apps/api_clients/api_clients/utils.py index c2f236d03..3a61e1035 100644 --- a/python_apps/api_clients/api_clients/utils.py +++ b/python_apps/api_clients/api_clients/utils.py @@ -6,23 +6,28 @@ import socket import requests from requests.auth import AuthBase + def get_protocol(config): - positive_values = ['Yes', 'yes', 'True', 'true', True] - port = config['general'].get('base_port', 80) - force_ssl = config['general'].get('force_ssl', False) + positive_values = ["Yes", "yes", "True", "true", True] + port = config["general"].get("base_port", 80) + force_ssl = config["general"].get("force_ssl", False) if force_ssl in positive_values: - protocol = 'https' + protocol = "https" else: - protocol = config['general'].get('protocol') + protocol = config["general"].get("protocol") if not protocol: protocol = str(("http", "https")[int(port) == 443]) return protocol + class UrlParamDict(dict): def __missing__(self, key): - return '{' + key + '}' + return "{" + key + "}" + + +class UrlException(Exception): + pass -class UrlException(Exception): pass class IncompleteUrl(UrlException): def __init__(self, url): @@ -31,6 +36,7 @@ class IncompleteUrl(UrlException): def __str__(self): return "Incomplete url: '{}'".format(self.url) + class UrlBadParam(UrlException): def __init__(self, url, param): self.url = url @@ -39,17 +45,20 @@ class UrlBadParam(UrlException): def __str__(self): return "Bad param '{}' passed into url: '{}'".format(self.param, self.url) + class KeyAuth(AuthBase): def __init__(self, key): self.key = key def __call__(self, r): - r.headers['Authorization'] = "Api-Key {}".format(self.key) + r.headers["Authorization"] = "Api-Key {}".format(self.key) return r + class ApcUrl: - """ A safe abstraction and testable for filling in parameters in + """A safe abstraction and testable for filling in parameters in api_client.cfg""" + def __init__(self, base_url): self.base_url = base_url @@ -63,17 +72,18 @@ class ApcUrl: return ApcUrl(temp_url) def url(self): - if '{' in self.base_url: + if "{" in self.base_url: raise IncompleteUrl(self.base_url) else: return self.base_url + class ApiRequest: - API_HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout + API_HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout def __init__(self, name, url, logger=None, api_key=None): self.name = name - self.url = url + self.url = url self.__req = None if logger is None: self.logger = logging @@ -86,36 +96,45 @@ class ApiRequest: self.logger.debug(final_url) try: if _post_data: - response = requests.post(final_url, - data=_post_data, auth=self.auth, - timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT) + response = requests.post( + final_url, + data=_post_data, + auth=self.auth, + timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT, + ) else: - response = requests.get(final_url, params=params, auth=self.auth, - timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT) - if 'application/json' in response.headers['content-type']: + response = requests.get( + final_url, + params=params, + auth=self.auth, + timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT, + ) + if "application/json" in response.headers["content-type"]: return response.json() return response except requests.exceptions.Timeout: - self.logger.error('HTTP request to %s timed out', final_url) + self.logger.error("HTTP request to %s timed out", final_url) raise def req(self, *args, **kwargs): - self.__req = lambda : self(*args, **kwargs) + self.__req = lambda: self(*args, **kwargs) return self def retry(self, n, delay=5): """Try to send request n times. If after n times it fails then we finally raise exception""" - for i in range(0,n-1): + for i in range(0, n - 1): try: return self.__req() except Exception: time.sleep(delay) return self.__req() + class RequestProvider: - """ Creates the available ApiRequest instance that can be read from - a config file """ + """Creates the available ApiRequest instance that can be read from + a config file""" + def __init__(self, cfg, endpoints): self.config = cfg self.requests = {} @@ -123,27 +142,29 @@ class RequestProvider: self.config["general"]["base_dir"] = self.config["general"]["base_dir"][1:] protocol = get_protocol(self.config) - base_port = self.config['general']['base_port'] - base_url = self.config['general']['base_url'] - base_dir = self.config['general']['base_dir'] - api_base = self.config['api_base'] + base_port = self.config["general"]["base_port"] + base_url = self.config["general"]["base_url"] + base_dir = self.config["general"]["base_dir"] + api_base = self.config["api_base"] api_url = "{protocol}://{base_url}:{base_port}/{base_dir}{api_base}/{action}".format_map( - UrlParamDict(protocol=protocol, - base_url=base_url, - base_port=base_port, - base_dir=base_dir, - api_base=api_base - )) + UrlParamDict( + protocol=protocol, + base_url=base_url, + base_port=base_port, + base_dir=base_dir, + api_base=api_base, + ) + ) self.url = ApcUrl(api_url) # Now we must discover the possible actions for action_name, action_value in endpoints.items(): new_url = self.url.params(action=action_value) - if '{api_key}' in action_value: - new_url = new_url.params(api_key=self.config["general"]['api_key']) - self.requests[action_name] = ApiRequest(action_name, - new_url, - api_key=self.config['general']['api_key']) + if "{api_key}" in action_value: + new_url = new_url.params(api_key=self.config["general"]["api_key"]) + self.requests[action_name] = ApiRequest( + action_name, new_url, api_key=self.config["general"]["api_key"] + ) def available_requests(self): return list(self.requests.keys()) @@ -157,15 +178,20 @@ class RequestProvider: else: return super(RequestProvider, self).__getattribute__(attr) + def time_in_seconds(time): - return time.hour * 60 * 60 + \ - time.minute * 60 + \ - time.second + \ - time.microsecond / 1000000.0 + return ( + time.hour * 60 * 60 + + time.minute * 60 + + time.second + + time.microsecond / 1000000.0 + ) + def time_in_milliseconds(time): return time_in_seconds(time) * 1000 + def fromisoformat(time_string): """ This is required for Python 3.6 support. datetime.time.fromisoformat was diff --git a/python_apps/api_clients/api_clients/version1.py b/python_apps/api_clients/api_clients/version1.py index a3da27637..1e4986670 100644 --- a/python_apps/api_clients/api_clients/version1.py +++ b/python_apps/api_clients/api_clients/version1.py @@ -26,58 +26,112 @@ api_config = {} api_endpoints = {} # URL to get the version number of the server API -api_endpoints['version_url'] = 'version/api_key/{api_key}' -#URL to register a components IP Address with the central web server -api_endpoints['register_component'] = 'register-component/format/json/api_key/{api_key}/component/{component}' +api_endpoints["version_url"] = "version/api_key/{api_key}" +# URL to register a components IP Address with the central web server +api_endpoints[ + "register_component" +] = "register-component/format/json/api_key/{api_key}/component/{component}" -#media-monitor -api_endpoints['media_setup_url'] = 'media-monitor-setup/format/json/api_key/{api_key}' -api_endpoints['upload_recorded'] = 'upload-recorded/format/json/api_key/{api_key}/fileid/{fileid}/showinstanceid/{showinstanceid}' -api_endpoints['update_media_url'] = 'reload-metadata/format/json/api_key/{api_key}/mode/{mode}' -api_endpoints['list_all_db_files'] = 'list-all-files/format/json/api_key/{api_key}/dir_id/{dir_id}/all/{all}' -api_endpoints['list_all_watched_dirs'] = 'list-all-watched-dirs/format/json/api_key/{api_key}' -api_endpoints['add_watched_dir'] = 'add-watched-dir/format/json/api_key/{api_key}/path/{path}' -api_endpoints['remove_watched_dir'] = 'remove-watched-dir/format/json/api_key/{api_key}/path/{path}' -api_endpoints['set_storage_dir'] = 'set-storage-dir/format/json/api_key/{api_key}/path/{path}' -api_endpoints['update_fs_mount'] = 'update-file-system-mount/format/json/api_key/{api_key}' -api_endpoints['reload_metadata_group'] = 'reload-metadata-group/format/json/api_key/{api_key}' -api_endpoints['handle_watched_dir_missing'] = 'handle-watched-dir-missing/format/json/api_key/{api_key}/dir/{dir}' -#show-recorder -api_endpoints['show_schedule_url'] = 'recorded-shows/format/json/api_key/{api_key}' -api_endpoints['upload_file_url'] = 'rest/media' -api_endpoints['upload_retries'] = '3' -api_endpoints['upload_wait'] = '60' -#pypo -api_endpoints['export_url'] = 'schedule/api_key/{api_key}' -api_endpoints['get_media_url'] = 'get-media/file/{file}/api_key/{api_key}' -api_endpoints['update_item_url'] = 'notify-schedule-group-play/api_key/{api_key}/schedule_id/{schedule_id}' -api_endpoints['update_start_playing_url'] = 'notify-media-item-start-play/api_key/{api_key}/media_id/{media_id}/' -api_endpoints['get_stream_setting'] = 'get-stream-setting/format/json/api_key/{api_key}/' -api_endpoints['update_liquidsoap_status'] = 'update-liquidsoap-status/format/json/api_key/{api_key}/msg/{msg}/stream_id/{stream_id}/boot_time/{boot_time}' -api_endpoints['update_source_status'] = 'update-source-status/format/json/api_key/{api_key}/sourcename/{sourcename}/status/{status}' -api_endpoints['check_live_stream_auth'] = 'check-live-stream-auth/format/json/api_key/{api_key}/username/{username}/password/{password}/djtype/{djtype}' -api_endpoints['get_bootstrap_info'] = 'get-bootstrap-info/format/json/api_key/{api_key}' -api_endpoints['get_files_without_replay_gain'] = 'get-files-without-replay-gain/api_key/{api_key}/dir_id/{dir_id}' -api_endpoints['update_replay_gain_value'] = 'update-replay-gain-value/format/json/api_key/{api_key}' -api_endpoints['notify_webstream_data'] = 'notify-webstream-data/api_key/{api_key}/media_id/{media_id}/format/json' -api_endpoints['notify_liquidsoap_started'] = 'rabbitmq-do-push/api_key/{api_key}/format/json' -api_endpoints['get_stream_parameters'] = 'get-stream-parameters/api_key/{api_key}/format/json' -api_endpoints['push_stream_stats'] = 'push-stream-stats/api_key/{api_key}/format/json' -api_endpoints['update_stream_setting_table'] = 'update-stream-setting-table/api_key/{api_key}/format/json' -api_endpoints['get_files_without_silan_value'] = 'get-files-without-silan-value/api_key/{api_key}' -api_endpoints['update_cue_values_by_silan'] = 'update-cue-values-by-silan/api_key/{api_key}' -api_endpoints['update_metadata_on_tunein'] = 'update-metadata-on-tunein/api_key/{api_key}' -api_config['api_base'] = 'api' -api_config['bin_dir'] = '/usr/lib/airtime/api_clients/' +# media-monitor +api_endpoints["media_setup_url"] = "media-monitor-setup/format/json/api_key/{api_key}" +api_endpoints[ + "upload_recorded" +] = "upload-recorded/format/json/api_key/{api_key}/fileid/{fileid}/showinstanceid/{showinstanceid}" +api_endpoints[ + "update_media_url" +] = "reload-metadata/format/json/api_key/{api_key}/mode/{mode}" +api_endpoints[ + "list_all_db_files" +] = "list-all-files/format/json/api_key/{api_key}/dir_id/{dir_id}/all/{all}" +api_endpoints[ + "list_all_watched_dirs" +] = "list-all-watched-dirs/format/json/api_key/{api_key}" +api_endpoints[ + "add_watched_dir" +] = "add-watched-dir/format/json/api_key/{api_key}/path/{path}" +api_endpoints[ + "remove_watched_dir" +] = "remove-watched-dir/format/json/api_key/{api_key}/path/{path}" +api_endpoints[ + "set_storage_dir" +] = "set-storage-dir/format/json/api_key/{api_key}/path/{path}" +api_endpoints[ + "update_fs_mount" +] = "update-file-system-mount/format/json/api_key/{api_key}" +api_endpoints[ + "reload_metadata_group" +] = "reload-metadata-group/format/json/api_key/{api_key}" +api_endpoints[ + "handle_watched_dir_missing" +] = "handle-watched-dir-missing/format/json/api_key/{api_key}/dir/{dir}" +# show-recorder +api_endpoints["show_schedule_url"] = "recorded-shows/format/json/api_key/{api_key}" +api_endpoints["upload_file_url"] = "rest/media" +api_endpoints["upload_retries"] = "3" +api_endpoints["upload_wait"] = "60" +# pypo +api_endpoints["export_url"] = "schedule/api_key/{api_key}" +api_endpoints["get_media_url"] = "get-media/file/{file}/api_key/{api_key}" +api_endpoints[ + "update_item_url" +] = "notify-schedule-group-play/api_key/{api_key}/schedule_id/{schedule_id}" +api_endpoints[ + "update_start_playing_url" +] = "notify-media-item-start-play/api_key/{api_key}/media_id/{media_id}/" +api_endpoints[ + "get_stream_setting" +] = "get-stream-setting/format/json/api_key/{api_key}/" +api_endpoints[ + "update_liquidsoap_status" +] = "update-liquidsoap-status/format/json/api_key/{api_key}/msg/{msg}/stream_id/{stream_id}/boot_time/{boot_time}" +api_endpoints[ + "update_source_status" +] = "update-source-status/format/json/api_key/{api_key}/sourcename/{sourcename}/status/{status}" +api_endpoints[ + "check_live_stream_auth" +] = "check-live-stream-auth/format/json/api_key/{api_key}/username/{username}/password/{password}/djtype/{djtype}" +api_endpoints["get_bootstrap_info"] = "get-bootstrap-info/format/json/api_key/{api_key}" +api_endpoints[ + "get_files_without_replay_gain" +] = "get-files-without-replay-gain/api_key/{api_key}/dir_id/{dir_id}" +api_endpoints[ + "update_replay_gain_value" +] = "update-replay-gain-value/format/json/api_key/{api_key}" +api_endpoints[ + "notify_webstream_data" +] = "notify-webstream-data/api_key/{api_key}/media_id/{media_id}/format/json" +api_endpoints[ + "notify_liquidsoap_started" +] = "rabbitmq-do-push/api_key/{api_key}/format/json" +api_endpoints[ + "get_stream_parameters" +] = "get-stream-parameters/api_key/{api_key}/format/json" +api_endpoints["push_stream_stats"] = "push-stream-stats/api_key/{api_key}/format/json" +api_endpoints[ + "update_stream_setting_table" +] = "update-stream-setting-table/api_key/{api_key}/format/json" +api_endpoints[ + "get_files_without_silan_value" +] = "get-files-without-silan-value/api_key/{api_key}" +api_endpoints[ + "update_cue_values_by_silan" +] = "update-cue-values-by-silan/api_key/{api_key}" +api_endpoints[ + "update_metadata_on_tunein" +] = "update-metadata-on-tunein/api_key/{api_key}" +api_config["api_base"] = "api" +api_config["bin_dir"] = "/usr/lib/airtime/api_clients/" ################################################################################ # Airtime API Version 1 Client ################################################################################ class AirtimeApiClient(object): - def __init__(self, logger=None,config_path='/etc/airtime/airtime.conf'): - if logger is None: self.logger = logging - else: self.logger = logger + def __init__(self, logger=None, config_path="/etc/airtime/airtime.conf"): + if logger is None: + self.logger = logging + else: + self.logger = logger # loading config file try: @@ -85,16 +139,18 @@ class AirtimeApiClient(object): self.config.update(api_config) self.services = RequestProvider(self.config, api_endpoints) except Exception as e: - self.logger.exception('Error loading config file: %s', config_path) + self.logger.exception("Error loading config file: %s", config_path) sys.exit(1) def __get_airtime_version(self): - try: return self.services.version_url()['airtime_version'] - except Exception: return -1 + try: + return self.services.version_url()["airtime_version"] + except Exception: + return -1 def __get_api_version(self): try: - return self.services.version_url()['api_version'] + return self.services.version_url()["api_version"] except Exception as e: self.logger.exception(e) return -1 @@ -105,25 +161,30 @@ class AirtimeApiClient(object): # logger.info('Airtime version found: ' + str(version)) if api_version == -1: if verbose: - logger.info('Unable to get Airtime API version number.\n') + logger.info("Unable to get Airtime API version number.\n") return False elif api_version[0:3] != AIRTIME_API_VERSION[0:3]: if verbose: - logger.info('Airtime API version found: ' + str(api_version)) - logger.info('pypo is only compatible with API version: ' + AIRTIME_API_VERSION) + logger.info("Airtime API version found: " + str(api_version)) + logger.info( + "pypo is only compatible with API version: " + AIRTIME_API_VERSION + ) return False else: if verbose: - logger.info('Airtime API version found: ' + str(api_version)) - logger.info('pypo is only compatible with API version: ' + AIRTIME_API_VERSION) + logger.info("Airtime API version found: " + str(api_version)) + logger.info( + "pypo is only compatible with API version: " + AIRTIME_API_VERSION + ) return True - def get_schedule(self): # TODO : properly refactor this routine # For now the return type is a little messed up for compatibility reasons - try: return (True, self.services.export_url()) - except: return (False, None) + try: + return (True, self.services.export_url()) + except: + return (False, None) def notify_liquidsoap_started(self): try: @@ -132,9 +193,9 @@ class AirtimeApiClient(object): self.logger.exception(e) def notify_media_item_start_playing(self, media_id): - """ This is a callback from liquidsoap, we use this to notify + """This is a callback from liquidsoap, we use this to notify about the currently playing *song*. We get passed a JSON string - which we handed to liquidsoap in get_liquidsoap_data(). """ + which we handed to liquidsoap in get_liquidsoap_data().""" try: return self.services.update_start_playing_url(media_id=media_id) except Exception as e: @@ -150,7 +211,7 @@ class AirtimeApiClient(object): def upload_recorded_show(self, files, show_id): logger = self.logger - response = '' + response = "" retries = int(self.config["upload_retries"]) retries_wait = int(self.config["upload_wait"]) @@ -165,7 +226,9 @@ class AirtimeApiClient(object): logger.debug(ApiRequest.API_HTTP_REQUEST_TIMEOUT) try: - request = requests.post(url, files=files, timeout=float(ApiRequest.API_HTTP_REQUEST_TIMEOUT)) + request = requests.post( + url, files=files, timeout=float(ApiRequest.API_HTTP_REQUEST_TIMEOUT) + ) response = request.json() logger.debug(response) @@ -199,7 +262,7 @@ class AirtimeApiClient(object): except Exception as e: self.logger.exception(e) - #wait some time before next retry + # wait some time before next retry time.sleep(retries_wait) return response @@ -207,42 +270,49 @@ class AirtimeApiClient(object): def check_live_stream_auth(self, username, password, dj_type): try: return self.services.check_live_stream_auth( - username=username, password=password, djtype=dj_type) + username=username, password=password, djtype=dj_type + ) except Exception as e: self.logger.exception(e) return {} - def construct_url(self,config_action_key): + def construct_url(self, config_action_key): """Constructs the base url for every request""" # TODO : Make other methods in this class use this this method. if self.config["general"]["base_dir"].startswith("/"): self.config["general"]["base_dir"] = self.config["general"]["base_dir"][1:] protocol = get_protocol(self.config) - url = "%s://%s:%s/%s%s/%s" % \ - (protocol, - self.config["general"]["base_url"], str(self.config["general"]["base_port"]), - self.config["general"]["base_dir"], self.config["api_base"], - self.config[config_action_key]) + url = "%s://%s:%s/%s%s/%s" % ( + protocol, + self.config["general"]["base_url"], + str(self.config["general"]["base_port"]), + self.config["general"]["base_dir"], + self.config["api_base"], + self.config[config_action_key], + ) url = url.replace("%%api_key%%", self.config["general"]["api_key"]) return url - def construct_rest_url(self,config_action_key): + def construct_rest_url(self, config_action_key): """Constructs the base url for RESTful requests""" if self.config["general"]["base_dir"].startswith("/"): self.config["general"]["base_dir"] = self.config["general"]["base_dir"][1:] protocol = get_protocol(self.config) - url = "%s://%s:@%s:%s/%s/%s" % \ - (protocol, self.config["general"]["api_key"], - self.config["general"]["base_url"], str(self.config["general"]["base_port"]), - self.config["general"]["base_dir"], - self.config[config_action_key]) + url = "%s://%s:@%s:%s/%s/%s" % ( + protocol, + self.config["general"]["api_key"], + self.config["general"]["base_url"], + str(self.config["general"]["base_port"]), + self.config["general"]["base_dir"], + self.config[config_action_key], + ) return url - """ Caller of this method needs to catch any exceptions such as ValueError thrown by json.loads or URLError by urllib2.urlopen """ + def setup_media_monitor(self): return self.services.media_setup_url() @@ -264,49 +334,55 @@ class AirtimeApiClient(object): # filter but here we prefer a little more verbosity to help # debugging for action in action_list: - if not 'mode' in action: - self.logger.debug("Warning: Trying to send a request element without a 'mode'") - self.logger.debug("Here is the the request: '%s'" % str(action) ) + if not "mode" in action: + self.logger.debug( + "Warning: Trying to send a request element without a 'mode'" + ) + self.logger.debug("Here is the the request: '%s'" % str(action)) else: # We alias the value of is_record to true or false no # matter what it is based on if it's absent in the action - if 'is_record' not in action: - action['is_record'] = 0 + if "is_record" not in action: + action["is_record"] = 0 valid_actions.append(action) # Note that we must prefix every key with: mdX where x is a number # Is there a way to format the next line a little better? The # parenthesis make the code almost unreadable - md_list = dict((("md%d" % i), json.dumps(md)) \ - for i,md in enumerate(valid_actions)) + md_list = dict( + (("md%d" % i), json.dumps(md)) for i, md in enumerate(valid_actions) + ) # For testing we add the following "dry" parameter to tell the # controller not to actually do any changes - if dry: md_list['dry'] = 1 + if dry: + md_list["dry"] = 1 self.logger.info("Pumping out %d requests..." % len(valid_actions)) return self.services.reload_metadata_group(_post_data=md_list) - #returns a list of all db files for a given directory in JSON format: - #{"files":["path/to/file1", "path/to/file2"]} - #Note that these are relative paths to the given directory. The full - #path is not returned. + # returns a list of all db files for a given directory in JSON format: + # {"files":["path/to/file1", "path/to/file2"]} + # Note that these are relative paths to the given directory. The full + # path is not returned. def list_all_db_files(self, dir_id, all_files=True): logger = self.logger try: all_files = "1" if all_files else "0" - response = self.services.list_all_db_files(dir_id=dir_id, - all=all_files) + response = self.services.list_all_db_files(dir_id=dir_id, all=all_files) except Exception as e: response = {} logger.error("Exception: %s", e) try: return response["files"] except KeyError: - self.logger.error("Could not find index 'files' in dictionary: %s", - str(response)) + self.logger.error( + "Could not find index 'files' in dictionary: %s", str(response) + ) return [] + """ Caller of this method needs to catch any exceptions such as ValueError thrown by json.loads or URLError by urllib2.urlopen """ + def list_all_watched_dirs(self): return self.services.list_all_watched_dirs() @@ -314,6 +390,7 @@ class AirtimeApiClient(object): Caller of this method needs to catch any exceptions such as ValueError thrown by json.loads or URLError by urllib2.urlopen """ + def add_watched_dir(self, path): return self.services.add_watched_dir(path=base64.b64encode(path)) @@ -321,6 +398,7 @@ class AirtimeApiClient(object): Caller of this method needs to catch any exceptions such as ValueError thrown by json.loads or URLError by urllib2.urlopen """ + def remove_watched_dir(self, path): return self.services.remove_watched_dir(path=base64.b64encode(path)) @@ -328,6 +406,7 @@ class AirtimeApiClient(object): Caller of this method needs to catch any exceptions such as ValueError thrown by json.loads or URLError by urllib2.urlopen """ + def set_storage_dir(self, path): return self.services.set_storage_dir(path=base64.b64encode(path)) @@ -335,15 +414,16 @@ class AirtimeApiClient(object): Caller of this method needs to catch any exceptions such as ValueError thrown by json.loads or URLError by urllib2.urlopen """ + def get_stream_setting(self): return self.services.get_stream_setting() def register_component(self, component): - """ Purpose of this method is to contact the server with a "Hey its + """Purpose of this method is to contact the server with a "Hey its me!" message. This will allow the server to register the component's (component = media-monitor, pypo etc.) ip address, and later use it to query monit via monit's http service, or download log files via a - http server. """ + http server.""" return self.services.register_component(component=component) def notify_liquidsoap_status(self, msg, stream_id, time): @@ -351,24 +431,24 @@ class AirtimeApiClient(object): try: post_data = {"msg_post": msg} - #encoded_msg is no longer used server_side!! - encoded_msg = urllib.parse.quote('dummy') - self.services.update_liquidsoap_status.req(post_data, - msg=encoded_msg, - stream_id=stream_id, - boot_time=time).retry(5) + # encoded_msg is no longer used server_side!! + encoded_msg = urllib.parse.quote("dummy") + self.services.update_liquidsoap_status.req( + post_data, msg=encoded_msg, stream_id=stream_id, boot_time=time + ).retry(5) except Exception as e: self.logger.exception(e) def notify_source_status(self, sourcename, status): try: - return self.services.update_source_status.req(sourcename=sourcename, - status=status).retry(5) + return self.services.update_source_status.req( + sourcename=sourcename, status=status + ).retry(5) except Exception as e: self.logger.exception(e) def get_bootstrap_info(self): - """ Retrieve infomations needed on bootstrap time """ + """Retrieve infomations needed on bootstrap time""" return self.services.get_bootstrap_info() def get_files_without_replay_gain_value(self, dir_id): @@ -377,7 +457,7 @@ class AirtimeApiClient(object): calculated. This list of files is downloaded into a file and the path to this file is the return value. """ - #http://localhost/api/get-files-without-replay-gain/dir_id/1 + # http://localhost/api/get-files-without-replay-gain/dir_id/1 try: return self.services.get_files_without_replay_gain(dir_id=dir_id) except Exception as e: @@ -401,25 +481,31 @@ class AirtimeApiClient(object): 'pairs' is a list of pairs in (x, y), where x is the file's database row id and y is the file's replay_gain value in dB """ - self.logger.debug(self.services.update_replay_gain_value( - _post_data={'data': json.dumps(pairs)})) - + self.logger.debug( + self.services.update_replay_gain_value( + _post_data={"data": json.dumps(pairs)} + ) + ) def update_cue_values_by_silan(self, pairs): """ 'pairs' is a list of pairs in (x, y), where x is the file's database row id and y is the file's cue values in dB """ - return self.services.update_cue_values_by_silan(_post_data={'data': json.dumps(pairs)}) - + return self.services.update_cue_values_by_silan( + _post_data={"data": json.dumps(pairs)} + ) def notify_webstream_data(self, data, media_id): """ Update the server with the latest metadata we've received from the external webstream """ - self.logger.info( self.services.notify_webstream_data.req( - _post_data={'data':data}, media_id=str(media_id)).retry(5)) + self.logger.info( + self.services.notify_webstream_data.req( + _post_data={"data": data}, media_id=str(media_id) + ).retry(5) + ) def get_stream_parameters(self): response = self.services.get_stream_parameters() @@ -428,12 +514,16 @@ class AirtimeApiClient(object): def push_stream_stats(self, data): # TODO : users of this method should do their own error handling - response = self.services.push_stream_stats(_post_data={'data': json.dumps(data)}) + response = self.services.push_stream_stats( + _post_data={"data": json.dumps(data)} + ) return response def update_stream_setting_table(self, data): try: - response = self.services.update_stream_setting_table(_post_data={'data': json.dumps(data)}) + response = self.services.update_stream_setting_table( + _post_data={"data": json.dumps(data)} + ) return response except Exception as e: self.logger.exception(e) diff --git a/python_apps/api_clients/api_clients/version2.py b/python_apps/api_clients/api_clients/version2.py index c927f1f10..983fba487 100644 --- a/python_apps/api_clients/api_clients/version2.py +++ b/python_apps/api_clients/api_clients/version2.py @@ -18,17 +18,18 @@ LIBRETIME_API_VERSION = "2.0" api_config = {} api_endpoints = {} -api_endpoints['version_url'] = 'version/' -api_endpoints['schedule_url'] = 'schedule/' -api_endpoints['webstream_url'] = 'webstreams/{id}/' -api_endpoints['show_instance_url'] = 'show-instances/{id}/' -api_endpoints['show_url'] = 'shows/{id}/' -api_endpoints['file_url'] = 'files/{id}/' -api_endpoints['file_download_url'] = 'files/{id}/download/' -api_config['api_base'] = 'api/v2' +api_endpoints["version_url"] = "version/" +api_endpoints["schedule_url"] = "schedule/" +api_endpoints["webstream_url"] = "webstreams/{id}/" +api_endpoints["show_instance_url"] = "show-instances/{id}/" +api_endpoints["show_url"] = "shows/{id}/" +api_endpoints["file_url"] = "files/{id}/" +api_endpoints["file_download_url"] = "files/{id}/download/" +api_config["api_base"] = "api/v2" + class AirtimeApiClient: - def __init__(self, logger=None, config_path='/etc/airtime/airtime.conf'): + def __init__(self, logger=None, config_path="/etc/airtime/airtime.conf"): if logger is None: self.logger = logging else: @@ -39,87 +40,89 @@ class AirtimeApiClient: self.config.update(api_config) self.services = RequestProvider(self.config, api_endpoints) except Exception as e: - self.logger.exception('Error loading config file: %s', config_path) + self.logger.exception("Error loading config file: %s", config_path) sys.exit(1) def get_schedule(self): current_time = datetime.datetime.utcnow() end_time = current_time + datetime.timedelta(hours=1) - str_current = current_time.isoformat(timespec='seconds') - str_end = end_time.isoformat(timespec='seconds') - data = self.services.schedule_url(params={ - 'ends__range': ('{}Z,{}Z'.format(str_current, str_end)), - }) - result = {'media': {} } - for item in data: - start = isoparse(item['starts']) - key = start.strftime('%YYYY-%mm-%dd-%HH-%MM-%SS') - end = isoparse(item['ends']) - - show_instance = self.services.show_instance_url(id=item['instance_id']) - show = self.services.show_url(id=show_instance['show_id']) - - result['media'][key] = { - 'start': start.strftime('%Y-%m-%d-%H-%M-%S'), - 'end': end.strftime('%Y-%m-%d-%H-%M-%S'), - 'row_id': item['id'] + str_current = current_time.isoformat(timespec="seconds") + str_end = end_time.isoformat(timespec="seconds") + data = self.services.schedule_url( + params={ + "ends__range": ("{}Z,{}Z".format(str_current, str_end)), } - current = result['media'][key] - if item['file']: - current['independent_event'] = False - current['type'] = 'file' - current['id'] = item['file_id'] + ) + result = {"media": {}} + for item in data: + start = isoparse(item["starts"]) + key = start.strftime("%YYYY-%mm-%dd-%HH-%MM-%SS") + end = isoparse(item["ends"]) - fade_in = time_in_milliseconds(fromisoformat(item['fade_in'])) - fade_out = time_in_milliseconds(fromisoformat(item['fade_out'])) + show_instance = self.services.show_instance_url(id=item["instance_id"]) + show = self.services.show_url(id=show_instance["show_id"]) - cue_in = time_in_seconds(fromisoformat(item['cue_in'])) - cue_out = time_in_seconds(fromisoformat(item['cue_out'])) + result["media"][key] = { + "start": start.strftime("%Y-%m-%d-%H-%M-%S"), + "end": end.strftime("%Y-%m-%d-%H-%M-%S"), + "row_id": item["id"], + } + current = result["media"][key] + if item["file"]: + current["independent_event"] = False + current["type"] = "file" + current["id"] = item["file_id"] - current['fade_in'] = fade_in - current['fade_out'] = fade_out - current['cue_in'] = cue_in - current['cue_out'] = cue_out + fade_in = time_in_milliseconds(fromisoformat(item["fade_in"])) + fade_out = time_in_milliseconds(fromisoformat(item["fade_out"])) - info = self.services.file_url(id=item['file_id']) - current['metadata'] = info - current['uri'] = item['file'] - current['filesize'] = info['filesize'] - elif item['stream']: - current['independent_event'] = True - current['id'] = item['stream_id'] - info = self.services.webstream_url(id=item['stream_id']) - current['uri'] = info['url'] - current['type'] = 'stream_buffer_start' + cue_in = time_in_seconds(fromisoformat(item["cue_in"])) + cue_out = time_in_seconds(fromisoformat(item["cue_out"])) + + current["fade_in"] = fade_in + current["fade_out"] = fade_out + current["cue_in"] = cue_in + current["cue_out"] = cue_out + + info = self.services.file_url(id=item["file_id"]) + current["metadata"] = info + current["uri"] = item["file"] + current["filesize"] = info["filesize"] + elif item["stream"]: + current["independent_event"] = True + current["id"] = item["stream_id"] + info = self.services.webstream_url(id=item["stream_id"]) + current["uri"] = info["url"] + current["type"] = "stream_buffer_start" # Stream events are instantaneous - current['end'] = current['start'] + current["end"] = current["start"] - result['{}_0'.format(key)] = { - 'id': current['id'], - 'type': 'stream_output_start', - 'start': current['start'], - 'end': current['start'], - 'uri': current['uri'], - 'row_id': current['row_id'], - 'independent_event': current['independent_event'], + result["{}_0".format(key)] = { + "id": current["id"], + "type": "stream_output_start", + "start": current["start"], + "end": current["start"], + "uri": current["uri"], + "row_id": current["row_id"], + "independent_event": current["independent_event"], } result[end.isoformat()] = { - 'type': 'stream_buffer_end', - 'start': current['end'], - 'end': current['end'], - 'uri': current['uri'], - 'row_id': current['row_id'], - 'independent_event': current['independent_event'], + "type": "stream_buffer_end", + "start": current["end"], + "end": current["end"], + "uri": current["uri"], + "row_id": current["row_id"], + "independent_event": current["independent_event"], } - result['{}_0'.format(end.isoformat())] = { - 'type': 'stream_output_end', - 'start': current['end'], - 'end': current['end'], - 'uri': current['uri'], - 'row_id': current['row_id'], - 'independent_event': current['independent_event'], + result["{}_0".format(end.isoformat())] = { + "type": "stream_output_end", + "start": current["end"], + "end": current["end"], + "uri": current["uri"], + "row_id": current["row_id"], + "independent_event": current["independent_event"], } return result diff --git a/python_apps/api_clients/setup.py b/python_apps/api_clients/setup.py index cfcefd462..78638cc9d 100644 --- a/python_apps/api_clients/setup.py +++ b/python_apps/api_clients/setup.py @@ -9,17 +9,19 @@ script_path = os.path.dirname(os.path.realpath(__file__)) print(script_path) os.chdir(script_path) -setup(name='api_clients', - version='2.0.0', - description='LibreTime API Client', - url='http://github.com/LibreTime/Libretime', - author='LibreTime Contributors', - license='AGPLv3', - packages=['api_clients'], - scripts=[], - install_requires=[ - 'configobj', - 'python-dateutil', - ], - zip_safe=False, - data_files=[]) +setup( + name="api_clients", + version="2.0.0", + description="LibreTime API Client", + url="http://github.com/LibreTime/Libretime", + author="LibreTime Contributors", + license="AGPLv3", + packages=["api_clients"], + scripts=[], + install_requires=[ + "configobj", + "python-dateutil", + ], + zip_safe=False, + data_files=[], +) diff --git a/python_apps/api_clients/tests/test_apcurl.py b/python_apps/api_clients/tests/test_apcurl.py index a6b2a3366..8ed31eb1d 100644 --- a/python_apps/api_clients/tests/test_apcurl.py +++ b/python_apps/api_clients/tests/test_apcurl.py @@ -2,6 +2,7 @@ import unittest from api_clients.utils import ApcUrl, UrlBadParam, IncompleteUrl + class TestApcUrl(unittest.TestCase): def test_init(self): url = "/testing" @@ -10,22 +11,23 @@ class TestApcUrl(unittest.TestCase): def test_params_1(self): u = ApcUrl("/testing/{key}") - self.assertEqual(u.params(key='val').url(), '/testing/val') + self.assertEqual(u.params(key="val").url(), "/testing/val") def test_params_2(self): - u = ApcUrl('/testing/{key}/{api}/more_testing') - full_url = u.params(key="AAA",api="BBB").url() - self.assertEqual(full_url, '/testing/AAA/BBB/more_testing') + u = ApcUrl("/testing/{key}/{api}/more_testing") + full_url = u.params(key="AAA", api="BBB").url() + self.assertEqual(full_url, "/testing/AAA/BBB/more_testing") def test_params_ex(self): u = ApcUrl("/testing/{key}") with self.assertRaises(UrlBadParam): - u.params(bad_key='testing') + u.params(bad_key="testing") def test_url(self): u = "one/two/three" - self.assertEqual( ApcUrl(u).url(), u ) + self.assertEqual(ApcUrl(u).url(), u) def test_url_ex(self): - u = ApcUrl('/{one}/{two}/three').params(two='testing') - with self.assertRaises(IncompleteUrl): u.url() + u = ApcUrl("/{one}/{two}/three").params(two="testing") + with self.assertRaises(IncompleteUrl): + u.url() diff --git a/python_apps/api_clients/tests/test_apirequest.py b/python_apps/api_clients/tests/test_apirequest.py index 4f37766c7..fdf1a051e 100644 --- a/python_apps/api_clients/tests/test_apirequest.py +++ b/python_apps/api_clients/tests/test_apirequest.py @@ -4,39 +4,43 @@ import json from mock import MagicMock, patch from api_clients.utils import ApcUrl, ApiRequest + class ResponseInfo: @property def headers(self): - return {'content-type': 'application/json'} + return {"content-type": "application/json"} def json(self): - return {'ok', 'ok'} + return {"ok", "ok"} + class TestApiRequest(unittest.TestCase): def test_init(self): - u = ApiRequest('request_name', ApcUrl('/test/ing')) + u = ApiRequest("request_name", ApcUrl("/test/ing")) self.assertEqual(u.name, "request_name") def test_call_json(self): - ret = {'ok':'ok'} + ret = {"ok": "ok"} read = MagicMock() - read.headers = {'content-type': 'application/json'} + read.headers = {"content-type": "application/json"} read.json = MagicMock(return_value=ret) - u = 'http://localhost/testing' - with patch('requests.get') as mock_method: + u = "http://localhost/testing" + with patch("requests.get") as mock_method: mock_method.return_value = read - request = ApiRequest('mm', ApcUrl(u))() + request = ApiRequest("mm", ApcUrl(u))() self.assertEqual(request, ret) def test_call_html(self): - ret = '' + ret = "" read = MagicMock() - read.headers = {'content-type': 'application/html'} + read.headers = {"content-type": "application/html"} read.text = MagicMock(return_value=ret) - u = 'http://localhost/testing' - with patch('requests.get') as mock_method: + u = "http://localhost/testing" + with patch("requests.get") as mock_method: mock_method.return_value = read - request = ApiRequest('mm', ApcUrl(u))() + request = ApiRequest("mm", ApcUrl(u))() self.assertEqual(request.text(), ret) -if __name__ == '__main__': unittest.main() + +if __name__ == "__main__": + unittest.main() diff --git a/python_apps/api_clients/tests/test_requestprovider.py b/python_apps/api_clients/tests/test_requestprovider.py index c3592d549..24be93a29 100644 --- a/python_apps/api_clients/tests/test_requestprovider.py +++ b/python_apps/api_clients/tests/test_requestprovider.py @@ -6,18 +6,19 @@ from configobj import ConfigObj from api_clients.version1 import api_config from api_clients.utils import RequestProvider + class TestRequestProvider(unittest.TestCase): def setUp(self): self.cfg = api_config - self.cfg['general'] = {} - self.cfg['general']['base_dir'] = '/test' - self.cfg['general']['base_port'] = 80 - self.cfg['general']['base_url'] = 'localhost' - self.cfg['general']['api_key'] = 'TEST_KEY' - self.cfg['api_base'] = 'api' + self.cfg["general"] = {} + self.cfg["general"]["base_dir"] = "/test" + self.cfg["general"]["base_port"] = 80 + self.cfg["general"]["base_url"] = "localhost" + self.cfg["general"]["api_key"] = "TEST_KEY" + self.cfg["api_base"] = "api" def test_test(self): - self.assertTrue('general' in self.cfg) + self.assertTrue("general" in self.cfg) def test_init(self): rp = RequestProvider(self.cfg, {}) @@ -25,12 +26,14 @@ class TestRequestProvider(unittest.TestCase): def test_contains(self): methods = { - 'upload_recorded': '/1/', - 'update_media_url': '/2/', - 'list_all_db_files': '/3/', + "upload_recorded": "/1/", + "update_media_url": "/2/", + "list_all_db_files": "/3/", } rp = RequestProvider(self.cfg, methods) for meth in methods: self.assertTrue(meth in rp.requests) -if __name__ == '__main__': unittest.main() + +if __name__ == "__main__": + unittest.main() diff --git a/python_apps/api_clients/tests/test_utils.py b/python_apps/api_clients/tests/test_utils.py index 525b1210c..125fe9d35 100644 --- a/python_apps/api_clients/tests/test_utils.py +++ b/python_apps/api_clients/tests/test_utils.py @@ -4,13 +4,14 @@ import configparser import unittest from api_clients import utils + def get_force_ssl(value, useConfigParser): config = {} if useConfigParser: config = configparser.ConfigParser() - config['general'] = { - 'base_port': 80, - 'force_ssl': value, + config["general"] = { + "base_port": 80, + "force_ssl": value, } return utils.get_protocol(config) @@ -27,65 +28,65 @@ class TestTime(unittest.TestCase): class TestGetProtocol(unittest.TestCase): def test_dict_config_empty_http(self): - config = {'general': {}} + config = {"general": {}} protocol = utils.get_protocol(config) - self.assertEqual(protocol, 'http') + self.assertEqual(protocol, "http") def test_dict_config_http(self): config = { - 'general': { - 'base_port': 80, + "general": { + "base_port": 80, }, } protocol = utils.get_protocol(config) - self.assertEqual(protocol, 'http') + self.assertEqual(protocol, "http") def test_dict_config_https(self): config = { - 'general': { - 'base_port': 443, + "general": { + "base_port": 443, }, } protocol = utils.get_protocol(config) - self.assertEqual(protocol, 'https') + self.assertEqual(protocol, "https") def test_dict_config_force_https(self): - postive_values = ['yes', 'Yes', 'True', 'true', True] - negative_values = ['no', 'No', 'False', 'false', False] + postive_values = ["yes", "Yes", "True", "true", True] + negative_values = ["no", "No", "False", "false", False] for value in postive_values: - self.assertEqual(get_force_ssl(value, False), 'https') + self.assertEqual(get_force_ssl(value, False), "https") for value in negative_values: - self.assertEqual(get_force_ssl(value, False), 'http') + self.assertEqual(get_force_ssl(value, False), "http") def test_configparser_config_empty_http(self): config = configparser.ConfigParser() - config['general'] = {} + config["general"] = {} protocol = utils.get_protocol(config) - self.assertEqual(protocol, 'http') + self.assertEqual(protocol, "http") def test_configparser_config_http(self): config = configparser.ConfigParser() - config['general'] = { - 'base_port': 80, + config["general"] = { + "base_port": 80, } protocol = utils.get_protocol(config) - self.assertEqual(protocol, 'http') + self.assertEqual(protocol, "http") def test_configparser_config_https(self): config = configparser.ConfigParser() - config['general'] = { - 'base_port': 443, + config["general"] = { + "base_port": 443, } protocol = utils.get_protocol(config) - self.assertEqual(protocol, 'https') + self.assertEqual(protocol, "https") def test_configparser_config_force_https(self): - postive_values = ['yes', 'Yes', 'True', 'true', True] - negative_values = ['no', 'No', 'False', 'false', False] + postive_values = ["yes", "Yes", "True", "true", True] + negative_values = ["no", "No", "False", "false", False] for value in postive_values: - self.assertEqual(get_force_ssl(value, True), 'https') + self.assertEqual(get_force_ssl(value, True), "https") for value in negative_values: - self.assertEqual(get_force_ssl(value, True), 'http') + self.assertEqual(get_force_ssl(value, True), "http") def test_fromisoformat(self): time = { @@ -96,4 +97,6 @@ class TestGetProtocol(unittest.TestCase): result = utils.fromisoformat(time_string) self.assertEqual(result, expected) -if __name__ == '__main__': unittest.main() + +if __name__ == "__main__": + unittest.main() diff --git a/python_apps/icecast2/install/icecast2-install.py b/python_apps/icecast2/install/icecast2-install.py index d12588ab7..09e3b0695 100644 --- a/python_apps/icecast2/install/icecast2-install.py +++ b/python_apps/icecast2/install/icecast2-install.py @@ -9,14 +9,18 @@ if os.geteuid() != 0: print("Please run this as root.") sys.exit(1) + def get_current_script_dir(): - current_script_dir = os.path.realpath(__file__) - index = current_script_dir.rindex('/') - return current_script_dir[0:index] + current_script_dir = os.path.realpath(__file__) + index = current_script_dir.rindex("/") + return current_script_dir[0:index] + try: current_script_dir = get_current_script_dir() - shutil.copy(current_script_dir+"/../airtime-icecast-status.xsl", "/usr/share/icecast2/web") + shutil.copy( + current_script_dir + "/../airtime-icecast-status.xsl", "/usr/share/icecast2/web" + ) except Exception as e: print("exception: {}".format(e)) diff --git a/python_apps/pypo/bin/airtime-liquidsoap b/python_apps/pypo/bin/airtime-liquidsoap index 7d5864022..74e58620e 100755 --- a/python_apps/pypo/bin/airtime-liquidsoap +++ b/python_apps/pypo/bin/airtime-liquidsoap @@ -2,5 +2,5 @@ # -*- coding: utf-8 -*- import runpy -# Run the liquidsoap python module -runpy.run_module('liquidsoap') +# Run the liquidsoap python module +runpy.run_module("liquidsoap") diff --git a/python_apps/pypo/bin/airtime-playout b/python_apps/pypo/bin/airtime-playout index 9fe0ae5e8..dad5f9480 100755 --- a/python_apps/pypo/bin/airtime-playout +++ b/python_apps/pypo/bin/airtime-playout @@ -3,4 +3,3 @@ import runpy runpy.run_module("pypo", run_name="__main__") - diff --git a/python_apps/pypo/bin/pyponotify b/python_apps/pypo/bin/pyponotify index ff1feaaa5..fcf60e3a9 100755 --- a/python_apps/pypo/bin/pyponotify +++ b/python_apps/pypo/bin/pyponotify @@ -27,27 +27,75 @@ import json from configobj import ConfigObj # custom imports -#from util import * +# from util import * from api_clients import version1 as api_client LOG_LEVEL = logging.INFO -LOG_PATH = '/var/log/airtime/pypo/notify.log' +LOG_PATH = "/var/log/airtime/pypo/notify.log" # help screeen / info usage = "%prog [options]" + " - notification gateway" parser = OptionParser(usage=usage) # Options -parser.add_option("-d", "--data", help="Pass JSON data from Liquidsoap into this script.", metavar="data") -parser.add_option("-m", "--media-id", help="ID of the file that is currently playing.", metavar="media_id") -parser.add_option("-e", "--error", action="store", dest="error", type="string", help="Liquidsoap error msg.", metavar="error_msg") +parser.add_option( + "-d", + "--data", + help="Pass JSON data from Liquidsoap into this script.", + metavar="data", +) +parser.add_option( + "-m", + "--media-id", + help="ID of the file that is currently playing.", + metavar="media_id", +) +parser.add_option( + "-e", + "--error", + action="store", + dest="error", + type="string", + help="Liquidsoap error msg.", + metavar="error_msg", +) parser.add_option("-s", "--stream-id", help="ID stream", metavar="stream_id") -parser.add_option("-c", "--connect", help="Liquidsoap connected", action="store_true", metavar="connect") -parser.add_option("-t", "--time", help="Liquidsoap boot up time", action="store", dest="time", metavar="time", type="string") -parser.add_option("-x", "--source-name", help="source connection name", metavar="source_name") -parser.add_option("-y", "--source-status", help="source connection status", metavar="source_status") -parser.add_option("-w", "--webstream", help="JSON metadata associated with webstream", metavar="json_data") -parser.add_option("-n", "--liquidsoap-started", help="notify liquidsoap started", metavar="json_data", action="store_true", default=False) +parser.add_option( + "-c", + "--connect", + help="Liquidsoap connected", + action="store_true", + metavar="connect", +) +parser.add_option( + "-t", + "--time", + help="Liquidsoap boot up time", + action="store", + dest="time", + metavar="time", + type="string", +) +parser.add_option( + "-x", "--source-name", help="source connection name", metavar="source_name" +) +parser.add_option( + "-y", "--source-status", help="source connection status", metavar="source_status" +) +parser.add_option( + "-w", + "--webstream", + help="JSON metadata associated with webstream", + metavar="json_data", +) +parser.add_option( + "-n", + "--liquidsoap-started", + help="notify liquidsoap started", + metavar="json_data", + action="store_true", + default=False, +) # parse options @@ -55,12 +103,15 @@ parser.add_option("-n", "--liquidsoap-started", help="notify liquidsoap started" # Set up logging logging.captureWarnings(True) -logFormatter = logging.Formatter("%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s") +logFormatter = logging.Formatter( + "%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s" +) rootLogger = logging.getLogger() rootLogger.setLevel(LOG_LEVEL) -fileHandler = logging.handlers.RotatingFileHandler(filename=LOG_PATH, maxBytes=1024*1024*30, - backupCount=8) +fileHandler = logging.handlers.RotatingFileHandler( + filename=LOG_PATH, maxBytes=1024 * 1024 * 30, backupCount=8 +) fileHandler.setFormatter(logFormatter) rootLogger.addHandler(fileHandler) @@ -69,15 +120,15 @@ consoleHandler.setFormatter(logFormatter) rootLogger.addHandler(consoleHandler) logger = rootLogger -#need to wait for Python 2.7 for this.. -#logging.captureWarnings(True) +# need to wait for Python 2.7 for this.. +# logging.captureWarnings(True) # loading config file try: - config = ConfigObj('/etc/airtime/airtime.conf') + config = ConfigObj("/etc/airtime/airtime.conf") except Exception as e: - logger.error('Error loading config file: %s', e) + logger.error("Error loading config file: %s", e) sys.exit() @@ -90,39 +141,41 @@ class Notify: self.api_client.notify_liquidsoap_started() def notify_media_start_playing(self, media_id): - logger.debug('#################################################') - logger.debug('# Calling server to update about what\'s playing #') - logger.debug('#################################################') + logger.debug("#################################################") + logger.debug("# Calling server to update about what's playing #") + logger.debug("#################################################") response = self.api_client.notify_media_item_start_playing(media_id) logger.debug("Response: " + json.dumps(response)) # @pram time: time that LS started def notify_liquidsoap_status(self, msg, stream_id, time): - logger.info('#################################################') - logger.info('# Calling server to update liquidsoap status #') - logger.info('#################################################') - logger.info('msg = ' + str(msg)) + logger.info("#################################################") + logger.info("# Calling server to update liquidsoap status #") + logger.info("#################################################") + logger.info("msg = " + str(msg)) response = self.api_client.notify_liquidsoap_status(msg, stream_id, time) logger.info("Response: " + json.dumps(response)) def notify_source_status(self, source_name, status): - logger.debug('#################################################') - logger.debug('# Calling server to update source status #') - logger.debug('#################################################') - logger.debug('msg = ' + str(source_name) + ' : ' + str(status)) + logger.debug("#################################################") + logger.debug("# Calling server to update source status #") + logger.debug("#################################################") + logger.debug("msg = " + str(source_name) + " : " + str(status)) response = self.api_client.notify_source_status(source_name, status) logger.debug("Response: " + json.dumps(response)) def notify_webstream_data(self, data, media_id): - logger.debug('#################################################') - logger.debug('# Calling server to update webstream data #') - logger.debug('#################################################') + logger.debug("#################################################") + logger.debug("# Calling server to update webstream data #") + logger.debug("#################################################") response = self.api_client.notify_webstream_data(data, media_id) logger.debug("Response: " + json.dumps(response)) def run_with_options(self, options): if options.error and options.stream_id: - self.notify_liquidsoap_status(options.error, options.stream_id, options.time) + self.notify_liquidsoap_status( + options.error, options.stream_id, options.time + ) elif options.connect and options.stream_id: self.notify_liquidsoap_status("OK", options.stream_id, options.time) elif options.source_name and options.source_status: @@ -134,15 +187,17 @@ class Notify: elif options.liquidsoap_started: self.notify_liquidsoap_started() else: - logger.debug("Unrecognized option in options({}). Doing nothing".format(options)) + logger.debug( + "Unrecognized option in options({}). Doing nothing".format(options) + ) -if __name__ == '__main__': +if __name__ == "__main__": print() - print('#########################################') - print('# *** pypo *** #') - print('# pypo notification gateway #') - print('#########################################') + print("#########################################") + print("# *** pypo *** #") + print("# pypo notification gateway #") + print("#########################################") # initialize try: @@ -150,4 +205,3 @@ if __name__ == '__main__': n.run_with_options(options) except Exception as e: print(traceback.format_exc()) - diff --git a/python_apps/pypo/liquidsoap/generate_liquidsoap_cfg.py b/python_apps/pypo/liquidsoap/generate_liquidsoap_cfg.py index ca5dd764f..447a3310b 100644 --- a/python_apps/pypo/liquidsoap/generate_liquidsoap_cfg.py +++ b/python_apps/pypo/liquidsoap/generate_liquidsoap_cfg.py @@ -7,9 +7,10 @@ import time import traceback from api_clients.version1 import AirtimeApiClient + def generate_liquidsoap_config(ss): - data = ss['msg'] - fh = open('/etc/airtime/liquidsoap.cfg', 'w') + data = ss["msg"] + fh = open("/etc/airtime/liquidsoap.cfg", "w") fh.write("################################################\n") fh.write("# THIS FILE IS AUTO GENERATED. DO NOT CHANGE!! #\n") fh.write("################################################\n") @@ -17,17 +18,17 @@ def generate_liquidsoap_config(ss): for key, value in data.items(): try: - if not "port" in key and not "bitrate" in key: # Stupid hack + if not "port" in key and not "bitrate" in key: # Stupid hack raise ValueError() str_buffer = "%s = %s\n" % (key, int(value)) except ValueError: - try: # Is it a boolean? - if value=="true" or value=="false": + try: # Is it a boolean? + if value == "true" or value == "false": str_buffer = "%s = %s\n" % (key, value.lower()) else: - raise ValueError() # Just drop into the except below - except: #Everything else is a string - str_buffer = "%s = \"%s\"\n" % (key, value) + raise ValueError() # Just drop into the except below + except: # Everything else is a string + str_buffer = '%s = "%s"\n' % (key, value) fh.write(str_buffer) # ignore squashes unused variable errors from Liquidsoap @@ -38,8 +39,9 @@ def generate_liquidsoap_config(ss): fh.write('auth_path = "%s/liquidsoap_auth.py"\n' % auth_path) fh.close() + def run(): - logging.basicConfig(format='%(message)s') + logging.basicConfig(format="%(message)s") attempts = 0 max_attempts = 10 successful = False diff --git a/python_apps/pypo/liquidsoap/liquidsoap_auth.py b/python_apps/pypo/liquidsoap/liquidsoap_auth.py index efa087e98..df55c6844 100644 --- a/python_apps/pypo/liquidsoap/liquidsoap_auth.py +++ b/python_apps/pypo/liquidsoap/liquidsoap_auth.py @@ -9,16 +9,16 @@ dj_type = sys.argv[1] username = sys.argv[2] password = sys.argv[3] -source_type = '' -if dj_type == '--master': - source_type = 'master' -elif dj_type == '--dj': - source_type = 'dj' +source_type = "" +if dj_type == "--master": + source_type = "master" +elif dj_type == "--dj": + source_type = "dj" response = api_clients.check_live_stream_auth(username, password, source_type) -if 'msg' in response and response['msg'] == True: - print(response['msg']) +if "msg" in response and response["msg"] == True: + print(response["msg"]) sys.exit(0) else: print(False) diff --git a/python_apps/pypo/liquidsoap/liquidsoap_prepare_terminate.py b/python_apps/pypo/liquidsoap/liquidsoap_prepare_terminate.py index 18a3a5dac..9f9d8ffaa 100644 --- a/python_apps/pypo/liquidsoap/liquidsoap_prepare_terminate.py +++ b/python_apps/pypo/liquidsoap/liquidsoap_prepare_terminate.py @@ -4,17 +4,16 @@ import telnetlib import sys try: - config = ConfigObj('/etc/airtime/airtime.conf') - LS_HOST = config['pypo']['ls_host'] - LS_PORT = config['pypo']['ls_port'] + config = ConfigObj("/etc/airtime/airtime.conf") + LS_HOST = config["pypo"]["ls_host"] + LS_PORT = config["pypo"]["ls_port"] tn = telnetlib.Telnet(LS_HOST, LS_PORT) tn.write("master_harbor.stop\n") tn.write("live_dj_harbor.stop\n") - tn.write('exit\n') + tn.write("exit\n") tn.read_all() except Exception as e: print("Error loading config file: {}".format(e)) sys.exit() - diff --git a/python_apps/pypo/pypo/__main__.py b/python_apps/pypo/pypo/__main__.py index f8c40a7bf..ff2b02666 100644 --- a/python_apps/pypo/pypo/__main__.py +++ b/python_apps/pypo/pypo/__main__.py @@ -18,6 +18,7 @@ from configobj import ConfigObj from datetime import datetime from optparse import OptionParser import importlib + try: from queue import Queue except ImportError: # Python 2.7.5 (CentOS 7) diff --git a/python_apps/pypo/pypo/listenerstat.py b/python_apps/pypo/pypo/listenerstat.py index 2df8dc031..bd00d318b 100644 --- a/python_apps/pypo/pypo/listenerstat.py +++ b/python_apps/pypo/pypo/listenerstat.py @@ -10,9 +10,10 @@ import time from api_clients import version1 as api_client + class ListenerStat(Thread): - HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout + HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout def __init__(self, config, logger=None): Thread.__init__(self) @@ -28,50 +29,49 @@ class ListenerStat(Thread): for node in nodelist: if node.nodeType == node.TEXT_NODE: rc.append(node.data) - return ''.join(rc) + return "".join(rc) def get_stream_parameters(self): - #[{"user":"", "password":"", "url":"", "port":""},{},{}] + # [{"user":"", "password":"", "url":"", "port":""},{},{}] return self.api_client.get_stream_parameters() - def get_stream_server_xml(self, ip, url, is_shoutcast=False): auth_string = "%(admin_user)s:%(admin_pass)s" % ip - encoded = base64.b64encode(auth_string.encode('utf-8')) + encoded = base64.b64encode(auth_string.encode("utf-8")) - header = {"Authorization":"Basic %s" % encoded.decode('ascii')} + header = {"Authorization": "Basic %s" % encoded.decode("ascii")} if is_shoutcast: - #user agent is required for shoutcast auth, otherwise it returns 404. + # user agent is required for shoutcast auth, otherwise it returns 404. user_agent = "Mozilla/5.0 (Linux; rv:22.0) Gecko/20130405 Firefox/22.0" header["User-Agent"] = user_agent req = urllib.request.Request( - #assuming that the icecast stats path is /admin/stats.xml - #need to fix this + # assuming that the icecast stats path is /admin/stats.xml + # need to fix this url=url, - headers=header) + headers=header, + ) f = urllib.request.urlopen(req, timeout=ListenerStat.HTTP_REQUEST_TIMEOUT) document = f.read() return document - def get_icecast_stats(self, ip): document = None if "airtime.pro" in ip["host"].lower(): - url = 'http://%(host)s:%(port)s/stats.xsl' % ip + url = "http://%(host)s:%(port)s/stats.xsl" % ip document = self.get_stream_server_xml(ip, url) else: - url = 'http://%(host)s:%(port)s/admin/stats.xml' % ip + url = "http://%(host)s:%(port)s/admin/stats.xml" % ip document = self.get_stream_server_xml(ip, url) dom = defusedxml.minidom.parseString(document) sources = dom.getElementsByTagName("source") mount_stats = None for s in sources: - #drop the leading '/' character + # drop the leading '/' character mount_name = s.getAttribute("mount")[1:] if mount_name == ip["mount"]: timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") @@ -80,14 +80,16 @@ class ListenerStat(Thread): if len(listeners): num_listeners = self.get_node_text(listeners[0].childNodes) - mount_stats = {"timestamp":timestamp, \ - "num_listeners": num_listeners, \ - "mount_name": mount_name} + mount_stats = { + "timestamp": timestamp, + "num_listeners": num_listeners, + "mount_name": mount_name, + } return mount_stats def get_shoutcast_stats(self, ip): - url = 'http://%(host)s:%(port)s/admin.cgi?sid=1&mode=viewxml' % ip + url = "http://%(host)s:%(port)s/admin.cgi?sid=1&mode=viewxml" % ip document = self.get_stream_server_xml(ip, url, is_shoutcast=True) dom = defusedxml.minidom.parseString(document) current_listeners = dom.getElementsByTagName("CURRENTLISTENERS") @@ -97,34 +99,37 @@ class ListenerStat(Thread): if len(current_listeners): num_listeners = self.get_node_text(current_listeners[0].childNodes) - mount_stats = {"timestamp":timestamp, \ - "num_listeners": num_listeners, \ - "mount_name": "shoutcast"} + mount_stats = { + "timestamp": timestamp, + "num_listeners": num_listeners, + "mount_name": "shoutcast", + } return mount_stats def get_stream_stats(self, stream_parameters): stats = [] - #iterate over stream_parameters which is a list of dicts. Each dict - #represents one Airtime stream (currently this limit is 3). - #Note that there can be optimizations done, since if all three - #streams are the same server, we will still initiate 3 separate - #connections + # iterate over stream_parameters which is a list of dicts. Each dict + # represents one Airtime stream (currently this limit is 3). + # Note that there can be optimizations done, since if all three + # streams are the same server, we will still initiate 3 separate + # connections for k, v in stream_parameters.items(): - if v["enable"] == 'true': + if v["enable"] == "true": try: if v["output"] == "icecast": mount_stats = self.get_icecast_stats(v) - if mount_stats: stats.append(mount_stats) + if mount_stats: + stats.append(mount_stats) else: stats.append(self.get_shoutcast_stats(v)) - self.update_listener_stat_error(v["mount"], 'OK') + self.update_listener_stat_error(v["mount"], "OK") except Exception as e: try: self.update_listener_stat_error(v["mount"], str(e)) except Exception as e: - self.logger.error('Exception: %s', e) + self.logger.error("Exception: %s", e) return stats @@ -132,15 +137,15 @@ class ListenerStat(Thread): self.api_client.push_stream_stats(stats) def update_listener_stat_error(self, stream_id, error): - keyname = '%s_listener_stat_error' % stream_id + keyname = "%s_listener_stat_error" % stream_id data = {keyname: error} self.api_client.update_stream_setting_table(data) def run(self): - #Wake up every 120 seconds and gather icecast statistics. Note that we - #are currently querying the server every 2 minutes for list of - #mountpoints as well. We could remove this query if we hooked into - #rabbitmq events, and listened for these changes instead. + # Wake up every 120 seconds and gather icecast statistics. Note that we + # are currently querying the server every 2 minutes for list of + # mountpoints as well. We could remove this query if we hooked into + # rabbitmq events, and listened for these changes instead. while True: try: stream_parameters = self.get_stream_parameters() @@ -149,25 +154,27 @@ class ListenerStat(Thread): if stats: self.push_stream_stats(stats) except Exception as e: - self.logger.error('Exception: %s', e) + self.logger.error("Exception: %s", e) time.sleep(120) - self.logger.info('ListenerStat thread exiting') + self.logger.info("ListenerStat thread exiting") if __name__ == "__main__": # create logger - logger = logging.getLogger('std_out') + logger = logging.getLogger("std_out") logger.setLevel(logging.DEBUG) # create console handler and set level to debug - #ch = logging.StreamHandler() - #ch.setLevel(logging.DEBUG) + # ch = logging.StreamHandler() + # ch.setLevel(logging.DEBUG) # create formatter - formatter = logging.Formatter('%(asctime)s - %(name)s - %(lineno)s - %(levelname)s - %(message)s') + formatter = logging.Formatter( + "%(asctime)s - %(name)s - %(lineno)s - %(levelname)s - %(message)s" + ) # add formatter to ch - #ch.setFormatter(formatter) + # ch.setFormatter(formatter) # add ch to logger - #logger.addHandler(ch) + # logger.addHandler(ch) - #ls = ListenerStat(logger=logger) - #ls.run() + # ls = ListenerStat(logger=logger) + # ls.run() diff --git a/python_apps/pypo/pypo/pure.py b/python_apps/pypo/pypo/pure.py index 2f9d62a44..1a5e755a6 100644 --- a/python_apps/pypo/pypo/pure.py +++ b/python_apps/pypo/pypo/pure.py @@ -2,6 +2,7 @@ import re from packaging.version import Version, parse + def version_cmp(version1, version2): version1 = parse(version1) version2 = parse(version2) @@ -11,12 +12,14 @@ def version_cmp(version1, version2): return 0 return -1 + def date_interval_to_seconds(interval): """ Convert timedelta object into int representing the number of seconds. If number of seconds is less than 0, then return 0. """ - seconds = (interval.microseconds + \ - (interval.seconds + interval.days * 24 * 3600) * 10 ** 6) / float(10 ** 6) + seconds = ( + interval.microseconds + (interval.seconds + interval.days * 24 * 3600) * 10 ** 6 + ) / float(10 ** 6) return seconds diff --git a/python_apps/pypo/pypo/pypofetch.py b/python_apps/pypo/pypo/pypofetch.py index d7aa7c493..ee9026860 100644 --- a/python_apps/pypo/pypo/pypofetch.py +++ b/python_apps/pypo/pypo/pypofetch.py @@ -23,20 +23,24 @@ from .timeout import ls_timeout def keyboardInterruptHandler(signum, frame): logger = logging.getLogger() - logger.info('\nKeyboard Interrupt\n') + logger.info("\nKeyboard Interrupt\n") sys.exit(0) + + signal.signal(signal.SIGINT, keyboardInterruptHandler) logging.captureWarnings(True) POLL_INTERVAL = 400 -class PypoFetch(Thread): - def __init__(self, pypoFetch_q, pypoPush_q, media_q, telnet_lock, pypo_liquidsoap, config): +class PypoFetch(Thread): + def __init__( + self, pypoFetch_q, pypoPush_q, media_q, telnet_lock, pypo_liquidsoap, config + ): Thread.__init__(self) - #Hacky... + # Hacky... PypoFetch.ref = self self.v1_api_client = v1_api_client.AirtimeApiClient() @@ -76,6 +80,7 @@ class PypoFetch(Thread): Handle a message from RabbitMQ, put it into our yucky global var. Hopefully there is a better way to do this. """ + def handle_message(self, message): try: self.logger.info("Received event from Pypo Message Handler: %s" % message) @@ -85,50 +90,52 @@ class PypoFetch(Thread): except (UnicodeDecodeError, AttributeError): pass m = json.loads(message) - command = m['event_type'] + command = m["event_type"] self.logger.info("Handling command: " + command) - if command == 'update_schedule': - self.schedule_data = m['schedule'] + if command == "update_schedule": + self.schedule_data = m["schedule"] self.process_schedule(self.schedule_data) - elif command == 'reset_liquidsoap_bootstrap': + elif command == "reset_liquidsoap_bootstrap": self.set_bootstrap_variables() - elif command == 'update_stream_setting': + elif command == "update_stream_setting": self.logger.info("Updating stream setting...") - self.regenerate_liquidsoap_conf(m['setting']) - elif command == 'update_stream_format': + self.regenerate_liquidsoap_conf(m["setting"]) + elif command == "update_stream_format": self.logger.info("Updating stream format...") - self.update_liquidsoap_stream_format(m['stream_format']) - elif command == 'update_station_name': + self.update_liquidsoap_stream_format(m["stream_format"]) + elif command == "update_station_name": self.logger.info("Updating station name...") - self.update_liquidsoap_station_name(m['station_name']) - elif command == 'update_transition_fade': + self.update_liquidsoap_station_name(m["station_name"]) + elif command == "update_transition_fade": self.logger.info("Updating transition_fade...") - self.update_liquidsoap_transition_fade(m['transition_fade']) - elif command == 'switch_source': + self.update_liquidsoap_transition_fade(m["transition_fade"]) + elif command == "switch_source": self.logger.info("switch_on_source show command received...") - self.pypo_liquidsoap.\ - get_telnet_dispatcher().\ - switch_source(m['sourcename'], m['status']) - elif command == 'disconnect_source': + self.pypo_liquidsoap.get_telnet_dispatcher().switch_source( + m["sourcename"], m["status"] + ) + elif command == "disconnect_source": self.logger.info("disconnect_on_source show command received...") - self.pypo_liquidsoap.get_telnet_dispatcher().\ - disconnect_source(m['sourcename']) + self.pypo_liquidsoap.get_telnet_dispatcher().disconnect_source( + m["sourcename"] + ) else: self.logger.info("Unknown command: %s" % command) # update timeout value - if command == 'update_schedule': + if command == "update_schedule": self.listener_timeout = POLL_INTERVAL else: - self.listener_timeout = self.last_update_schedule_timestamp - time.time() + POLL_INTERVAL + self.listener_timeout = ( + self.last_update_schedule_timestamp - time.time() + POLL_INTERVAL + ) if self.listener_timeout < 0: self.listener_timeout = 0 self.logger.info("New timeout: %s" % self.listener_timeout) except Exception as e: self.logger.exception("Exception in handling Message Handler message") - def switch_source_temp(self, sourcename, status): self.logger.debug('Switching source: %s to "%s" status', sourcename, status) command = "streams." @@ -149,25 +156,28 @@ class PypoFetch(Thread): """ Initialize Liquidsoap environment """ + def set_bootstrap_variables(self): - self.logger.debug('Getting information needed on bootstrap from Airtime') + self.logger.debug("Getting information needed on bootstrap from Airtime") try: info = self.v1_api_client.get_bootstrap_info() except Exception as e: - self.logger.exception('Unable to get bootstrap info.. Exiting pypo...') + self.logger.exception("Unable to get bootstrap info.. Exiting pypo...") - self.logger.debug('info:%s', info) + self.logger.debug("info:%s", info) commands = [] - for k, v in info['switch_status'].items(): + for k, v in info["switch_status"].items(): commands.append(self.switch_source_temp(k, v)) - stream_format = info['stream_label'] - station_name = info['station_name'] - fade = info['transition_fade'] + stream_format = info["stream_label"] + station_name = info["station_name"] + fade = info["transition_fade"] - commands.append(('vars.stream_metadata_type %s\n' % stream_format).encode('utf-8')) - commands.append(('vars.station_name %s\n' % station_name).encode('utf-8')) - commands.append(('vars.default_dj_fade %s\n' % fade).encode('utf-8')) + commands.append( + ("vars.stream_metadata_type %s\n" % stream_format).encode("utf-8") + ) + commands.append(("vars.station_name %s\n" % station_name).encode("utf-8")) + commands.append(("vars.default_dj_fade %s\n" % fade).encode("utf-8")) self.pypo_liquidsoap.get_telnet_dispatcher().telnet_send(commands) self.pypo_liquidsoap.clear_all_queues() @@ -182,21 +192,24 @@ class PypoFetch(Thread): will be thrown.""" self.telnet_lock.acquire(False) - self.logger.info("Restarting Liquidsoap") - subprocess.call('kill -9 `pidof airtime-liquidsoap`', shell=True, close_fds=True) + subprocess.call( + "kill -9 `pidof airtime-liquidsoap`", shell=True, close_fds=True + ) - #Wait here and poll Liquidsoap until it has started up + # Wait here and poll Liquidsoap until it has started up self.logger.info("Waiting for Liquidsoap to start") while True: try: - tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port']) - tn.write('exit\n'.encode('utf-8')) + tn = telnetlib.Telnet( + self.config["ls_host"], self.config["ls_port"] + ) + tn.write("exit\n".encode("utf-8")) tn.read_all() self.logger.info("Liquidsoap is up and running") break except Exception as e: - #sleep 0.5 seconds and try again + # sleep 0.5 seconds and try again time.sleep(0.5) except Exception as e: @@ -208,11 +221,11 @@ class PypoFetch(Thread): """ NOTE: This function is quite short after it was refactored. """ + def regenerate_liquidsoap_conf(self, setting): self.restart_liquidsoap() self.update_liquidsoap_connection_status() - @ls_timeout def update_liquidsoap_connection_status(self): """ @@ -222,20 +235,22 @@ class PypoFetch(Thread): try: self.telnet_lock.acquire() - tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port']) + tn = telnetlib.Telnet(self.config["ls_host"], self.config["ls_port"]) # update the boot up time of Liquidsoap. Since Liquidsoap is not restarting, # we are manually adjusting the bootup time variable so the status msg will get # updated. current_time = time.time() - boot_up_time_command = ("vars.bootup_time " + str(current_time) + "\n").encode('utf-8') + boot_up_time_command = ( + "vars.bootup_time " + str(current_time) + "\n" + ).encode("utf-8") self.logger.info(boot_up_time_command) tn.write(boot_up_time_command) - connection_status = ("streams.connection_status\n").encode('utf-8') + connection_status = ("streams.connection_status\n").encode("utf-8") self.logger.info(connection_status) tn.write(connection_status) - tn.write('exit\n'.encode('utf-8')) + tn.write("exit\n".encode("utf-8")) output = tn.read_all() except Exception as e: @@ -253,12 +268,13 @@ class PypoFetch(Thread): fake_time = current_time + 1 for s in streams: - info = s.split(':') + info = s.split(":") stream_id = info[0] status = info[1] - if(status == "true"): - self.v1_api_client.notify_liquidsoap_status("OK", stream_id, str(fake_time)) - + if status == "true": + self.v1_api_client.notify_liquidsoap_status( + "OK", stream_id, str(fake_time) + ) @ls_timeout def update_liquidsoap_stream_format(self, stream_format): @@ -266,11 +282,11 @@ class PypoFetch(Thread): # TODO: THIS LIQUIDSOAP STUFF NEEDS TO BE MOVED TO PYPO-PUSH!!! try: self.telnet_lock.acquire() - tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port']) - command = ('vars.stream_metadata_type %s\n' % stream_format).encode('utf-8') + tn = telnetlib.Telnet(self.config["ls_host"], self.config["ls_port"]) + command = ("vars.stream_metadata_type %s\n" % stream_format).encode("utf-8") self.logger.info(command) tn.write(command) - tn.write('exit\n'.encode('utf-8')) + tn.write("exit\n".encode("utf-8")) tn.read_all() except Exception as e: self.logger.exception(e) @@ -283,11 +299,11 @@ class PypoFetch(Thread): # TODO: THIS LIQUIDSOAP STUFF NEEDS TO BE MOVED TO PYPO-PUSH!!! try: self.telnet_lock.acquire() - tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port']) - command = ('vars.default_dj_fade %s\n' % fade).encode('utf-8') + tn = telnetlib.Telnet(self.config["ls_host"], self.config["ls_port"]) + command = ("vars.default_dj_fade %s\n" % fade).encode("utf-8") self.logger.info(command) tn.write(command) - tn.write('exit\n'.encode('utf-8')) + tn.write("exit\n".encode("utf-8")) tn.read_all() except Exception as e: self.logger.exception(e) @@ -301,11 +317,11 @@ class PypoFetch(Thread): try: try: self.telnet_lock.acquire() - tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port']) - command = ('vars.station_name %s\n' % station_name).encode('utf-8') + tn = telnetlib.Telnet(self.config["ls_host"], self.config["ls_port"]) + command = ("vars.station_name %s\n" % station_name).encode("utf-8") self.logger.info(command) tn.write(command) - tn.write('exit\n'.encode('utf-8')) + tn.write("exit\n".encode("utf-8")) tn.read_all() except Exception as e: self.logger.exception(e) @@ -322,6 +338,7 @@ class PypoFetch(Thread): to the cache dir (Folder-structure: cache/YYYY-MM-DD-hh-mm-ss) - runs the cleanup routine, to get rid of unused cached files """ + def process_schedule(self, schedule_data): self.last_update_schedule_timestamp = time.time() self.logger.debug(schedule_data) @@ -343,20 +360,21 @@ class PypoFetch(Thread): media_copy = {} for key in media: media_item = media[key] - if (media_item['type'] == 'file'): + if media_item["type"] == "file": fileExt = self.sanity_check_media_item(media_item) dst = os.path.join(download_dir, f'{media_item["id"]}{fileExt}') - media_item['dst'] = dst - media_item['file_ready'] = False + media_item["dst"] = dst + media_item["file_ready"] = False media_filtered[key] = media_item - media_item['start'] = datetime.strptime(media_item['start'], - "%Y-%m-%d-%H-%M-%S") - media_item['end'] = datetime.strptime(media_item['end'], - "%Y-%m-%d-%H-%M-%S") + media_item["start"] = datetime.strptime( + media_item["start"], "%Y-%m-%d-%H-%M-%S" + ) + media_item["end"] = datetime.strptime( + media_item["end"], "%Y-%m-%d-%H-%M-%S" + ) media_copy[key] = media_item - self.media_prepare_queue.put(copy.copy(media_filtered)) except Exception as e: self.logger.exception(e) @@ -365,37 +383,36 @@ class PypoFetch(Thread): self.logger.debug("Pushing to pypo-push") self.push_queue.put(media_copy) - # cleanup try: self.cache_cleanup(media) except Exception as e: self.logger.exception(e) - #do basic validation of file parameters. Useful for debugging - #purposes + # do basic validation of file parameters. Useful for debugging + # purposes def sanity_check_media_item(self, media_item): - start = datetime.strptime(media_item['start'], "%Y-%m-%d-%H-%M-%S") - end = datetime.strptime(media_item['end'], "%Y-%m-%d-%H-%M-%S") + start = datetime.strptime(media_item["start"], "%Y-%m-%d-%H-%M-%S") + end = datetime.strptime(media_item["end"], "%Y-%m-%d-%H-%M-%S") - mime = media_item['metadata']['mime'] + mime = media_item["metadata"]["mime"] mimetypes.init(["%s/mime.types" % os.path.dirname(os.path.realpath(__file__))]) mime_ext = mimetypes.guess_extension(mime, strict=False) length1 = pure.date_interval_to_seconds(end - start) - length2 = media_item['cue_out'] - media_item['cue_in'] + length2 = media_item["cue_out"] - media_item["cue_in"] if abs(length2 - length1) > 1: self.logger.error("end - start length: %s", length1) self.logger.error("cue_out - cue_in length: %s", length2) self.logger.error("Two lengths are not equal!!!") - media_item['file_ext'] = mime_ext + media_item["file_ext"] = mime_ext return mime_ext def is_file_opened(self, path): - #Capture stderr to avoid polluting py-interpreter.log + # Capture stderr to avoid polluting py-interpreter.log proc = Popen(["lsof", path], stdout=PIPE, stderr=PIPE) out = proc.communicate()[0].strip() return bool(out) @@ -411,10 +428,14 @@ class PypoFetch(Thread): for mkey in media: media_item = media[mkey] - if media_item['type'] == 'file': + if media_item["type"] == "file": if "file_ext" not in media_item.keys(): - media_item["file_ext"] = mimetypes.guess_extension(media_item['metadata']['mime'], strict=False) - scheduled_file_set.add("{}{}".format(media_item["id"], media_item["file_ext"])) + media_item["file_ext"] = mimetypes.guess_extension( + media_item["metadata"]["mime"], strict=False + ) + scheduled_file_set.add( + "{}{}".format(media_item["id"], media_item["file_ext"]) + ) expired_files = cached_file_set - scheduled_file_set @@ -424,9 +445,9 @@ class PypoFetch(Thread): path = os.path.join(self.cache_dir, f) self.logger.debug("Removing %s" % path) - #check if this file is opened (sometimes Liquidsoap is still - #playing the file due to our knowledge of the track length - #being incorrect!) + # check if this file is opened (sometimes Liquidsoap is still + # playing the file due to our knowledge of the track length + # being incorrect!) if not self.is_file_opened(path): os.remove(path) self.logger.info("File '%s' removed" % path) @@ -441,7 +462,7 @@ class PypoFetch(Thread): self.process_schedule(self.schedule_data) return True except Exception as e: - self.logger.error('Unable to fetch schedule') + self.logger.error("Unable to fetch schedule") self.logger.exception(e) return False @@ -462,11 +483,11 @@ class PypoFetch(Thread): Timer(120, self.update_metadata_on_tunein).start() def main(self): - #Make sure all Liquidsoap queues are empty. This is important in the - #case where we've just restarted the pypo scheduler, but Liquidsoap still - #is playing tracks. In this case let's just restart everything from scratch - #so that we can repopulate our dictionary that keeps track of what - #Liquidsoap is playing much more easily. + # Make sure all Liquidsoap queues are empty. This is important in the + # case where we've just restarted the pypo scheduler, but Liquidsoap still + # is playing tracks. In this case let's just restart everything from scratch + # so that we can repopulate our dictionary that keeps track of what + # Liquidsoap is playing much more easily. self.pypo_liquidsoap.clear_all_queues() self.set_bootstrap_variables() @@ -500,7 +521,9 @@ class PypoFetch(Thread): Currently we are checking every POLL_INTERVAL seconds """ - message = self.fetch_queue.get(block=True, timeout=self.listener_timeout) + message = self.fetch_queue.get( + block=True, timeout=self.listener_timeout + ) manual_fetch_needed = False self.handle_message(message) except Empty as e: @@ -513,7 +536,7 @@ class PypoFetch(Thread): if manual_fetch_needed: self.persistent_manual_schedule_fetch(max_attempts=5) except Exception as e: - self.logger.exception('Failed to manually fetch the schedule.') + self.logger.exception("Failed to manually fetch the schedule.") loops += 1 @@ -522,4 +545,4 @@ class PypoFetch(Thread): Entry point of the thread """ self.main() - self.logger.info('PypoFetch thread exiting') + self.logger.info("PypoFetch thread exiting") diff --git a/python_apps/pypo/pypo/pypofile.py b/python_apps/pypo/pypo/pypofile.py index 8dd28bfff..449f4c9fd 100644 --- a/python_apps/pypo/pypo/pypofile.py +++ b/python_apps/pypo/pypo/pypofile.py @@ -18,13 +18,12 @@ import hashlib from requests.exceptions import ConnectionError, HTTPError, Timeout from api_clients import version2 as api_client -CONFIG_PATH = '/etc/airtime/airtime.conf' +CONFIG_PATH = "/etc/airtime/airtime.conf" logging.captureWarnings(True) class PypoFile(Thread): - def __init__(self, schedule_queue, config): Thread.__init__(self) self.logger = logging.getLogger() @@ -38,10 +37,10 @@ class PypoFile(Thread): """ Copy media_item from local library directory to local cache directory. """ - src = media_item['uri'] - dst = media_item['dst'] + src = media_item["uri"] + dst = media_item["dst"] - src_size = media_item['filesize'] + src_size = media_item["filesize"] dst_exists = True try: @@ -59,34 +58,44 @@ class PypoFile(Thread): # become an issue here... This needs proper cache management. # https://github.com/LibreTime/libretime/issues/756#issuecomment-477853018 # https://github.com/LibreTime/libretime/pull/845 - self.logger.debug("file %s already exists in local cache as %s, skipping copying..." % (src, dst)) + self.logger.debug( + "file %s already exists in local cache as %s, skipping copying..." + % (src, dst) + ) else: do_copy = True - media_item['file_ready'] = not do_copy + media_item["file_ready"] = not do_copy if do_copy: self.logger.info("copying from %s to local cache %s" % (src, dst)) try: with open(dst, "wb") as handle: self.logger.info(media_item) - response = self.api_client.services.file_download_url(id=media_item['id']) + response = self.api_client.services.file_download_url( + id=media_item["id"] + ) if not response.ok: self.logger.error(response) - raise Exception("%s - Error occurred downloading file" % response.status_code) + raise Exception( + "%s - Error occurred downloading file" + % response.status_code + ) for chunk in response.iter_content(chunk_size=1024): handle.write(chunk) - #make file world readable and owner writable + # make file world readable and owner writable os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) - if media_item['filesize'] == 0: - file_size = self.report_file_size_and_md5_to_airtime(dst, media_item["id"], host, username) + if media_item["filesize"] == 0: + file_size = self.report_file_size_and_md5_to_airtime( + dst, media_item["id"], host, username + ) media_item["filesize"] = file_size - media_item['file_ready'] = True + media_item["file_ready"] = True except Exception as e: self.logger.error("Could not copy from %s to %s" % (src, dst)) self.logger.error(e) @@ -95,7 +104,7 @@ class PypoFile(Thread): try: file_size = os.path.getsize(file_path) - with open(file_path, 'rb') as fh: + with open(file_path, "rb") as fh: m = hashlib.md5() while True: data = fh.read(8192) @@ -105,15 +114,21 @@ class PypoFile(Thread): md5_hash = m.hexdigest() except (OSError, IOError) as e: file_size = 0 - self.logger.error("Error getting file size and md5 hash for file id %s" % file_id) + self.logger.error( + "Error getting file size and md5 hash for file id %s" % file_id + ) self.logger.error(e) # Make PUT request to Airtime to update the file size and hash - error_msg = "Could not update media file %s with file size and md5 hash" % file_id + error_msg = ( + "Could not update media file %s with file size and md5 hash" % file_id + ) try: put_url = "%s://%s:%s/rest/media/%s" % (host[0], host[1], host[2], file_id) - payload = json.dumps({'filesize': file_size, 'md5': md5_hash}) - response = requests.put(put_url, data=payload, auth=requests.auth.HTTPBasicAuth(api_key, '')) + payload = json.dumps({"filesize": file_size, "md5": md5_hash}) + response = requests.put( + put_url, data=payload, auth=requests.auth.HTTPBasicAuth(api_key, "") + ) if not response.ok: self.logger.error(error_msg) except (ConnectionError, Timeout): @@ -160,7 +175,9 @@ class PypoFile(Thread): try: config.readfp(open(config_path)) except IOError as e: - logging.debug("Failed to open config file at %s: %s" % (config_path, e.strerror)) + logging.debug( + "Failed to open config file at %s: %s" % (config_path, e.strerror) + ) sys.exit() except Exception as e: logging.debug(e.strerror) @@ -189,12 +206,12 @@ class PypoFile(Thread): except Empty as e: pass - media_item = self.get_highest_priority_media_item(self.media) if media_item is not None: self.copy_file(media_item) except Exception as e: import traceback + top = traceback.format_exc() self.logger.error(str(e)) self.logger.error(top) @@ -204,9 +221,10 @@ class PypoFile(Thread): """ Entry point of the thread """ - try: self.main() + try: + self.main() except Exception as e: top = traceback.format_exc() - self.logger.error('PypoFile Exception: %s', top) + self.logger.error("PypoFile Exception: %s", top) time.sleep(5) - self.logger.info('PypoFile thread exiting') + self.logger.info("PypoFile thread exiting") diff --git a/python_apps/pypo/pypo/pypoliqqueue.py b/python_apps/pypo/pypo/pypoliqqueue.py index e41266316..74b2bf8f2 100644 --- a/python_apps/pypo/pypo/pypoliqqueue.py +++ b/python_apps/pypo/pypo/pypoliqqueue.py @@ -11,12 +11,17 @@ import time from queue import Empty import signal + + def keyboardInterruptHandler(signum, frame): logger = logging.getLogger() - logger.info('\nKeyboard Interrupt\n') + logger.info("\nKeyboard Interrupt\n") sys.exit(0) + + signal.signal(signal.SIGINT, keyboardInterruptHandler) + class PypoLiqQueue(Thread): def __init__(self, q, pypo_liquidsoap, logger): Thread.__init__(self) @@ -35,18 +40,20 @@ class PypoLiqQueue(Thread): self.logger.info("waiting indefinitely for schedule") media_schedule = self.queue.get(block=True) else: - self.logger.info("waiting %ss until next scheduled item" % \ - time_until_next_play) - media_schedule = self.queue.get(block=True, \ - timeout=time_until_next_play) + self.logger.info( + "waiting %ss until next scheduled item" % time_until_next_play + ) + media_schedule = self.queue.get( + block=True, timeout=time_until_next_play + ) except Empty as e: - #Time to push a scheduled item. + # Time to push a scheduled item. media_item = schedule_deque.popleft() self.pypo_liquidsoap.play(media_item) if len(schedule_deque): - time_until_next_play = \ - self.date_interval_to_seconds( - schedule_deque[0]['start'] - datetime.utcnow()) + time_until_next_play = self.date_interval_to_seconds( + schedule_deque[0]["start"] - datetime.utcnow() + ) if time_until_next_play < 0: time_until_next_play = 0 else: @@ -54,7 +61,7 @@ class PypoLiqQueue(Thread): else: self.logger.info("New schedule received: %s", media_schedule) - #new schedule received. Replace old one with this. + # new schedule received. Replace old one with this. schedule_deque.clear() keys = sorted(media_schedule.keys()) @@ -63,28 +70,28 @@ class PypoLiqQueue(Thread): if len(keys): time_until_next_play = self.date_interval_to_seconds( - media_schedule[keys[0]]['start'] - - datetime.utcnow()) + media_schedule[keys[0]]["start"] - datetime.utcnow() + ) else: time_until_next_play = None - def date_interval_to_seconds(self, interval): """ Convert timedelta object into int representing the number of seconds. If number of seconds is less than 0, then return 0. """ - seconds = (interval.microseconds + \ - (interval.seconds + interval.days * 24 * 3600) * 10 ** 6) / float(10 ** 6) - if seconds < 0: seconds = 0 + seconds = ( + interval.microseconds + + (interval.seconds + interval.days * 24 * 3600) * 10 ** 6 + ) / float(10 ** 6) + if seconds < 0: + seconds = 0 return seconds def run(self): - try: self.main() + try: + self.main() except Exception as e: - self.logger.error('PypoLiqQueue Exception: %s', traceback.format_exc()) - - - + self.logger.error("PypoLiqQueue Exception: %s", traceback.format_exc()) diff --git a/python_apps/pypo/pypo/pypoliquidsoap.py b/python_apps/pypo/pypo/pypoliquidsoap.py index 7d7854a87..151fc73bd 100644 --- a/python_apps/pypo/pypo/pypoliquidsoap.py +++ b/python_apps/pypo/pypo/pypoliquidsoap.py @@ -8,27 +8,25 @@ from datetime import timedelta from . import eventtypes import time -class PypoLiquidsoap(): + +class PypoLiquidsoap: def __init__(self, logger, telnet_lock, host, port): self.logger = logger self.liq_queue_tracker = { - "s0": None, - "s1": None, - "s2": None, - "s3": None, - "s4": None, - } + "s0": None, + "s1": None, + "s2": None, + "s3": None, + "s4": None, + } - self.telnet_liquidsoap = TelnetLiquidsoap(telnet_lock, \ - logger,\ - host,\ - port,\ - list(self.liq_queue_tracker.keys())) + self.telnet_liquidsoap = TelnetLiquidsoap( + telnet_lock, logger, host, port, list(self.liq_queue_tracker.keys()) + ) def get_telnet_dispatcher(self): return self.telnet_liquidsoap - def play(self, media_item): if media_item["type"] == eventtypes.FILE: self.handle_file_type(media_item) @@ -37,28 +35,32 @@ class PypoLiquidsoap(): elif media_item["type"] == eventtypes.STREAM_BUFFER_START: self.telnet_liquidsoap.start_web_stream_buffer(media_item) elif media_item["type"] == eventtypes.STREAM_OUTPUT_START: - if media_item['row_id'] != self.telnet_liquidsoap.current_prebuffering_stream_id: - #this is called if the stream wasn't scheduled sufficiently ahead of time - #so that the prebuffering stage could take effect. Let's do the prebuffering now. + if ( + media_item["row_id"] + != self.telnet_liquidsoap.current_prebuffering_stream_id + ): + # this is called if the stream wasn't scheduled sufficiently ahead of time + # so that the prebuffering stage could take effect. Let's do the prebuffering now. self.telnet_liquidsoap.start_web_stream_buffer(media_item) self.telnet_liquidsoap.start_web_stream(media_item) - elif media_item['type'] == eventtypes.STREAM_BUFFER_END: + elif media_item["type"] == eventtypes.STREAM_BUFFER_END: self.telnet_liquidsoap.stop_web_stream_buffer() - elif media_item['type'] == eventtypes.STREAM_OUTPUT_END: + elif media_item["type"] == eventtypes.STREAM_OUTPUT_END: self.telnet_liquidsoap.stop_web_stream_output() - else: raise UnknownMediaItemType(str(media_item)) + else: + raise UnknownMediaItemType(str(media_item)) def handle_file_type(self, media_item): """ - Wait 200 seconds (2000 iterations) for file to become ready, + Wait 200 seconds (2000 iterations) for file to become ready, otherwise give up on it. """ iter_num = 0 - while not media_item['file_ready'] and iter_num < 2000: + while not media_item["file_ready"] and iter_num < 2000: time.sleep(0.1) iter_num += 1 - if media_item['file_ready']: + if media_item["file_ready"]: available_queue = self.find_available_queue() try: @@ -68,27 +70,29 @@ class PypoLiquidsoap(): self.logger.error(e) raise else: - self.logger.warn("File %s did not become ready in less than 5 seconds. Skipping...", media_item['dst']) + self.logger.warn( + "File %s did not become ready in less than 5 seconds. Skipping...", + media_item["dst"], + ) def handle_event_type(self, media_item): - if media_item['event_type'] == "kick_out": + if media_item["event_type"] == "kick_out": self.telnet_liquidsoap.disconnect_source("live_dj") - elif media_item['event_type'] == "switch_off": + elif media_item["event_type"] == "switch_off": self.telnet_liquidsoap.switch_source("live_dj", "off") - def is_media_item_finished(self, media_item): if media_item is None: return True else: - return datetime.utcnow() > media_item['end'] + return datetime.utcnow() > media_item["end"] def find_available_queue(self): available_queue = None for i in self.liq_queue_tracker: mi = self.liq_queue_tracker[i] if mi == None or self.is_media_item_finished(mi): - #queue "i" is available. Push to this queue + # queue "i" is available. Push to this queue available_queue = i if available_queue == None: @@ -96,7 +100,6 @@ class PypoLiquidsoap(): return available_queue - def verify_correct_present_media(self, scheduled_now): """ verify whether Liquidsoap is currently playing the correct files. @@ -122,11 +125,13 @@ class PypoLiquidsoap(): """ try: - scheduled_now_files = \ - [x for x in scheduled_now if x["type"] == eventtypes.FILE] + scheduled_now_files = [ + x for x in scheduled_now if x["type"] == eventtypes.FILE + ] - scheduled_now_webstream = \ - [x for x in scheduled_now if x["type"] == eventtypes.STREAM_OUTPUT_START] + scheduled_now_webstream = [ + x for x in scheduled_now if x["type"] == eventtypes.STREAM_OUTPUT_START + ] schedule_ids = set([x["row_id"] for x in scheduled_now_files]) @@ -141,19 +146,21 @@ class PypoLiquidsoap(): to_be_removed = set() to_be_added = set() - #Iterate over the new files, and compare them to currently scheduled - #tracks. If already in liquidsoap queue still need to make sure they don't - #have different attributes - #if replay gain changes, it shouldn't change the amplification of the currently playing song + # Iterate over the new files, and compare them to currently scheduled + # tracks. If already in liquidsoap queue still need to make sure they don't + # have different attributes + # if replay gain changes, it shouldn't change the amplification of the currently playing song for i in scheduled_now_files: if i["row_id"] in row_id_map: mi = row_id_map[i["row_id"]] - correct = mi['start'] == i['start'] and \ - mi['end'] == i['end'] and \ - mi['row_id'] == i['row_id'] + correct = ( + mi["start"] == i["start"] + and mi["end"] == i["end"] + and mi["row_id"] == i["row_id"] + ) if not correct: - #need to re-add + # need to re-add self.logger.info("Track %s found to have new attr." % i) to_be_removed.add(i["row_id"]) to_be_added.add(i["row_id"]) @@ -162,37 +169,38 @@ class PypoLiquidsoap(): to_be_added.update(schedule_ids - liq_queue_ids) if to_be_removed: - self.logger.info("Need to remove items from Liquidsoap: %s" % \ - to_be_removed) + self.logger.info( + "Need to remove items from Liquidsoap: %s" % to_be_removed + ) - #remove files from Liquidsoap's queue + # remove files from Liquidsoap's queue for i in self.liq_queue_tracker: mi = self.liq_queue_tracker[i] if mi is not None and mi["row_id"] in to_be_removed: self.stop(i) if to_be_added: - self.logger.info("Need to add items to Liquidsoap *now*: %s" % \ - to_be_added) + self.logger.info( + "Need to add items to Liquidsoap *now*: %s" % to_be_added + ) for i in scheduled_now_files: if i["row_id"] in to_be_added: self.modify_cue_point(i) self.play(i) - #handle webstreams + # handle webstreams current_stream_id = self.telnet_liquidsoap.get_current_stream_id() if scheduled_now_webstream: if int(current_stream_id) != int(scheduled_now_webstream[0]["row_id"]): self.play(scheduled_now_webstream[0]) elif current_stream_id != "-1": - #something is playing and it shouldn't be. + # something is playing and it shouldn't be. self.telnet_liquidsoap.stop_web_stream_buffer() self.telnet_liquidsoap.stop_web_stream_output() except KeyError as e: self.logger.error("Error: Malformed event in schedule. " + str(e)) - def stop(self, queue): self.telnet_liquidsoap.queue_remove(queue) self.liq_queue_tracker[queue] = None @@ -209,24 +217,32 @@ class PypoLiquidsoap(): tnow = datetime.utcnow() - link_start = link['start'] + link_start = link["start"] diff_td = tnow - link_start diff_sec = self.date_interval_to_seconds(diff_td) if diff_sec > 0: - self.logger.debug("media item was supposed to start %s ago. Preparing to start..", diff_sec) - original_cue_in_td = timedelta(seconds=float(link['cue_in'])) - link['cue_in'] = self.date_interval_to_seconds(original_cue_in_td) + diff_sec + self.logger.debug( + "media item was supposed to start %s ago. Preparing to start..", + diff_sec, + ) + original_cue_in_td = timedelta(seconds=float(link["cue_in"])) + link["cue_in"] = ( + self.date_interval_to_seconds(original_cue_in_td) + diff_sec + ) def date_interval_to_seconds(self, interval): """ Convert timedelta object into int representing the number of seconds. If number of seconds is less than 0, then return 0. """ - seconds = (interval.microseconds + \ - (interval.seconds + interval.days * 24 * 3600) * 10 ** 6) / float(10 ** 6) - if seconds < 0: seconds = 0 + seconds = ( + interval.microseconds + + (interval.seconds + interval.days * 24 * 3600) * 10 ** 6 + ) / float(10 ** 6) + if seconds < 0: + seconds = 0 return seconds @@ -237,5 +253,6 @@ class PypoLiquidsoap(): class UnknownMediaItemType(Exception): pass + class NoQueueAvailableException(Exception): pass diff --git a/python_apps/pypo/pypo/pypomessagehandler.py b/python_apps/pypo/pypo/pypomessagehandler.py index 9c8b8e28a..927fc33d9 100644 --- a/python_apps/pypo/pypo/pypomessagehandler.py +++ b/python_apps/pypo/pypo/pypomessagehandler.py @@ -6,6 +6,7 @@ import os import sys from threading import Thread import time + # For RabbitMQ from kombu.connection import Connection from kombu.messaging import Exchange, Queue @@ -26,17 +27,18 @@ class RabbitConsumer(ConsumerMixin): def get_consumers(self, Consumer, channel): return [ - Consumer(self.queues, callbacks=[self.on_message], accept=['text/plain']), + Consumer(self.queues, callbacks=[self.on_message], accept=["text/plain"]), ] def on_message(self, body, message): self.handler.handle_message(message.payload) message.ack() + class PypoMessageHandler(Thread): def __init__(self, pq, rq, config): Thread.__init__(self) - self.logger = logging.getLogger('message_h') + self.logger = logging.getLogger("message_h") self.pypo_queue = pq self.recorder_queue = rq self.config = config @@ -44,13 +46,17 @@ class PypoMessageHandler(Thread): def init_rabbit_mq(self): self.logger.info("Initializing RabbitMQ stuff") try: - schedule_exchange = Exchange("airtime-pypo", "direct", durable=True, auto_delete=True) + schedule_exchange = Exchange( + "airtime-pypo", "direct", durable=True, auto_delete=True + ) schedule_queue = Queue("pypo-fetch", exchange=schedule_exchange, key="foo") - with Connection(self.config["host"], \ - self.config["user"], \ - self.config["password"], \ - self.config["vhost"], \ - heartbeat = 5) as connection: + with Connection( + self.config["host"], + self.config["user"], + self.config["password"], + self.config["vhost"], + heartbeat=5, + ) as connection: rabbit = RabbitConsumer(connection, [schedule_queue], self) rabbit.run() except Exception as e: @@ -60,6 +66,7 @@ class PypoMessageHandler(Thread): Handle a message from RabbitMQ, put it into our yucky global var. Hopefully there is a better way to do this. """ + def handle_message(self, message): try: self.logger.info("Received event from RabbitMQ: %s" % message) @@ -69,36 +76,36 @@ class PypoMessageHandler(Thread): except (UnicodeDecodeError, AttributeError): pass m = json.loads(message) - command = m['event_type'] + command = m["event_type"] self.logger.info("Handling command: " + command) - if command == 'update_schedule': + if command == "update_schedule": self.logger.info("Updating schedule...") self.pypo_queue.put(message) - elif command == 'reset_liquidsoap_bootstrap': + elif command == "reset_liquidsoap_bootstrap": self.logger.info("Resetting bootstrap vars...") self.pypo_queue.put(message) - elif command == 'update_stream_setting': + elif command == "update_stream_setting": self.logger.info("Updating stream setting...") self.pypo_queue.put(message) - elif command == 'update_stream_format': + elif command == "update_stream_format": self.logger.info("Updating stream format...") self.pypo_queue.put(message) - elif command == 'update_station_name': + elif command == "update_station_name": self.logger.info("Updating station name...") self.pypo_queue.put(message) - elif command == 'switch_source': + elif command == "switch_source": self.logger.info("switch_source command received...") self.pypo_queue.put(message) - elif command == 'update_transition_fade': + elif command == "update_transition_fade": self.logger.info("Updating trasition fade...") self.pypo_queue.put(message) - elif command == 'disconnect_source': + elif command == "disconnect_source": self.logger.info("disconnect_source command received...") self.pypo_queue.put(message) - elif command == 'update_recorder_schedule': + elif command == "update_recorder_schedule": self.recorder_queue.put(message) - elif command == 'cancel_recording': + elif command == "cancel_recording": self.recorder_queue.put(message) else: self.logger.info("Unknown command: %s" % command) @@ -109,9 +116,11 @@ class PypoMessageHandler(Thread): try: self.init_rabbit_mq() except Exception as e: - self.logger.error('Exception: %s', e) + self.logger.error("Exception: %s", e) self.logger.error("traceback: %s", traceback.format_exc()) - self.logger.error("Error connecting to RabbitMQ Server. Trying again in few seconds") + self.logger.error( + "Error connecting to RabbitMQ Server. Trying again in few seconds" + ) time.sleep(5) """ @@ -119,7 +128,7 @@ class PypoMessageHandler(Thread): Wait for schedule updates from RabbitMQ, but in case there aren't any, poll the server to get the upcoming schedule. """ + def run(self): while True: self.main() - diff --git a/python_apps/pypo/pypo/pypopush.py b/python_apps/pypo/pypo/pypopush.py index 52dd1e9fd..f38b03c79 100644 --- a/python_apps/pypo/pypo/pypopush.py +++ b/python_apps/pypo/pypo/pypopush.py @@ -29,10 +29,12 @@ PUSH_INTERVAL = 2 def is_stream(media_item): - return media_item['type'] == 'stream_output_start' + return media_item["type"] == "stream_output_start" + def is_file(media_item): - return media_item['type'] == 'file' + return media_item["type"] == "file" + class PypoPush(Thread): def __init__(self, q, telnet_lock, pypo_liquidsoap, config): @@ -44,20 +46,19 @@ class PypoPush(Thread): self.config = config self.pushed_objects = {} - self.logger = logging.getLogger('push') + self.logger = logging.getLogger("push") self.current_prebuffering_stream_id = None self.queue_id = 0 self.future_scheduled_queue = Queue() self.pypo_liquidsoap = pypo_liquidsoap - self.plq = PypoLiqQueue(self.future_scheduled_queue, \ - self.pypo_liquidsoap, \ - self.logger) + self.plq = PypoLiqQueue( + self.future_scheduled_queue, self.pypo_liquidsoap, self.logger + ) self.plq.daemon = True self.plq.start() - def main(self): loops = 0 heartbeat_period = math.floor(30 / PUSH_INTERVAL) @@ -72,10 +73,11 @@ class PypoPush(Thread): raise else: self.logger.debug(media_schedule) - #separate media_schedule list into currently_playing and - #scheduled_for_future lists - currently_playing, scheduled_for_future = \ - self.separate_present_future(media_schedule) + # separate media_schedule list into currently_playing and + # scheduled_for_future lists + currently_playing, scheduled_for_future = self.separate_present_future( + media_schedule + ) self.pypo_liquidsoap.verify_correct_present_media(currently_playing) self.future_scheduled_queue.put(scheduled_for_future) @@ -85,7 +87,6 @@ class PypoPush(Thread): loops = 0 loops += 1 - def separate_present_future(self, media_schedule): tnow = datetime.utcnow() @@ -96,7 +97,7 @@ class PypoPush(Thread): for mkey in sorted_keys: media_item = media_schedule[mkey] - diff_td = tnow - media_item['start'] + diff_td = tnow - media_item["start"] diff_sec = self.date_interval_to_seconds(diff_td) if diff_sec >= 0: @@ -111,8 +112,10 @@ class PypoPush(Thread): Convert timedelta object into int representing the number of seconds. If number of seconds is less than 0, then return 0. """ - seconds = (interval.microseconds + \ - (interval.seconds + interval.days * 24 * 3600) * 10 ** 6) / float(10 ** 6) + seconds = ( + interval.microseconds + + (interval.seconds + interval.days * 24 * 3600) * 10 ** 6 + ) / float(10 ** 6) return seconds @@ -120,18 +123,18 @@ class PypoPush(Thread): def stop_web_stream_all(self): try: self.telnet_lock.acquire() - tn = telnetlib.Telnet(self.config['LS_HOST'], self.config['LS_PORT']) + tn = telnetlib.Telnet(self.config["LS_HOST"], self.config["LS_PORT"]) - #msg = 'dynamic_source.read_stop_all xxx\n' - msg = 'http.stop\n' + # msg = 'dynamic_source.read_stop_all xxx\n' + msg = "http.stop\n" self.logger.debug(msg) tn.write(msg) - msg = 'dynamic_source.output_stop\n' + msg = "dynamic_source.output_stop\n" self.logger.debug(msg) tn.write(msg) - msg = 'dynamic_source.id -1\n' + msg = "dynamic_source.id -1\n" self.logger.debug(msg) tn.write(msg) @@ -145,10 +148,10 @@ class PypoPush(Thread): def run(self): while True: - try: self.main() + try: + self.main() except Exception as e: top = traceback.format_exc() - self.logger.error('Pypo Push Exception: %s', top) + self.logger.error("Pypo Push Exception: %s", top) time.sleep(5) - self.logger.info('PypoPush thread exiting') - + self.logger.info("PypoPush thread exiting") diff --git a/python_apps/pypo/pypo/recorder.py b/python_apps/pypo/pypo/recorder.py index 3e22d6443..2778d0091 100644 --- a/python_apps/pypo/pypo/recorder.py +++ b/python_apps/pypo/pypo/recorder.py @@ -24,6 +24,7 @@ import mutagen from api_clients import version1 as v1_api_client from api_clients import version2 as api_client + def api_client(logger): """ api_client returns the correct instance of AirtimeApiClient. Although there is only one @@ -31,15 +32,17 @@ def api_client(logger): """ return v1_api_client.AirtimeApiClient(logger) + # loading config file try: - config = ConfigObj('/etc/airtime/airtime.conf') + config = ConfigObj("/etc/airtime/airtime.conf") except Exception as e: print("Error loading config file: {}".format(e)) sys.exit() # TODO : add docstrings everywhere in this module + def getDateTimeObj(time): # TODO : clean up for this function later. # - use tuples to parse result from split (instead of indices) @@ -49,17 +52,20 @@ def getDateTimeObj(time): # shadowed # - add docstring to document all behaviour of this function timeinfo = time.split(" ") - date = [ int(x) for x in timeinfo[0].split("-") ] - my_time = [ int(x) for x in timeinfo[1].split(":") ] - return datetime.datetime(date[0], date[1], date[2], my_time[0], my_time[1], my_time[2], 0, None) + date = [int(x) for x in timeinfo[0].split("-")] + my_time = [int(x) for x in timeinfo[1].split(":")] + return datetime.datetime( + date[0], date[1], date[2], my_time[0], my_time[1], my_time[2], 0, None + ) + PUSH_INTERVAL = 2 -class ShowRecorder(Thread): - def __init__ (self, show_instance, show_name, filelength, start_time): +class ShowRecorder(Thread): + def __init__(self, show_instance, show_name, filelength, start_time): Thread.__init__(self) - self.logger = logging.getLogger('recorder') + self.logger = logging.getLogger("recorder") self.api_client = api_client(self.logger) self.filelength = filelength self.start_time = start_time @@ -75,35 +81,41 @@ class ShowRecorder(Thread): if config["pypo"]["record_file_type"] in ["mp3", "ogg"]: filetype = config["pypo"]["record_file_type"] else: - filetype = "ogg"; + filetype = "ogg" joined_path = os.path.join(config["pypo"]["base_recorded_files"], filename) filepath = "%s.%s" % (joined_path, filetype) br = config["pypo"]["record_bitrate"] sr = config["pypo"]["record_samplerate"] - c = config["pypo"]["record_channels"] + c = config["pypo"]["record_channels"] ss = config["pypo"]["record_sample_size"] - #-f:16,2,44100 - #-b:256 - command = "ecasound -f:%s,%s,%s -i alsa -o %s,%s000 -t:%s" % \ - (ss, c, sr, filepath, br, length) + # -f:16,2,44100 + # -b:256 + command = "ecasound -f:%s,%s,%s -i alsa -o %s,%s000 -t:%s" % ( + ss, + c, + sr, + filepath, + br, + length, + ) args = command.split(" ") self.logger.info("starting record") self.logger.info("command " + command) - self.p = Popen(args,stdout=PIPE,stderr=PIPE) + self.p = Popen(args, stdout=PIPE, stderr=PIPE) - #blocks at the following line until the child process - #quits + # blocks at the following line until the child process + # quits self.p.wait() outmsgs = self.p.stdout.readlines() for msg in outmsgs: - m = re.search('^ERROR',msg) + m = re.search("^ERROR", msg) if not m == None: - self.logger.info('Recording error is found: %s', outmsgs) + self.logger.info("Recording error is found: %s", outmsgs) self.logger.info("finishing record, return code %s", self.p.returncode) code = self.p.returncode @@ -112,21 +124,25 @@ class ShowRecorder(Thread): return code, filepath def cancel_recording(self): - #send signal interrupt (2) + # send signal interrupt (2) self.logger.info("Show manually cancelled!") - if (self.p is not None): + if self.p is not None: self.p.send_signal(signal.SIGINT) - #if self.p is defined, then the child process ecasound is recording + # if self.p is defined, then the child process ecasound is recording def is_recording(self): - return (self.p is not None) + return self.p is not None def upload_file(self, filepath): filename = os.path.split(filepath)[1] # files is what requests actually expects - files = {'file': open(filepath, "rb"), 'name': filename, 'show_instance': self.show_instance} + files = { + "file": open(filepath, "rb"), + "name": filename, + "show_instance": self.show_instance, + } self.api_client.upload_recorded_show(files, self.show_instance) @@ -136,27 +152,25 @@ class ShowRecorder(Thread): self.start_time, self.show_name, self.show_instance """ try: - full_date, full_time = self.start_time.split(" ",1) + full_date, full_time = self.start_time.split(" ", 1) # No idea why we translated - to : before - #full_time = full_time.replace(":","-") + # full_time = full_time.replace(":","-") self.logger.info("time: %s" % full_time) artist = "Airtime Show Recorder" - #set some metadata for our file daemon - recorded_file = mutagen.File(filepath, easy = True) - recorded_file['artist'] = artist - recorded_file['date'] = full_date - recorded_file['title'] = "%s-%s-%s" % (self.show_name, - full_date, full_time) - #You cannot pass ints into the metadata of a file. Even tracknumber needs to be a string - recorded_file['tracknumber'] = self.show_instance + # set some metadata for our file daemon + recorded_file = mutagen.File(filepath, easy=True) + recorded_file["artist"] = artist + recorded_file["date"] = full_date + recorded_file["title"] = "%s-%s-%s" % (self.show_name, full_date, full_time) + # You cannot pass ints into the metadata of a file. Even tracknumber needs to be a string + recorded_file["tracknumber"] = self.show_instance recorded_file.save() except Exception as e: top = traceback.format_exc() - self.logger.error('Exception: %s', e) + self.logger.error("Exception: %s", e) self.logger.error("traceback: %s", top) - def run(self): code, filepath = self.record_show() @@ -174,14 +188,15 @@ class ShowRecorder(Thread): self.logger.info("problem recording show") os.remove(filepath) + class Recorder(Thread): def __init__(self, q): Thread.__init__(self) - self.logger = logging.getLogger('recorder') + self.logger = logging.getLogger("recorder") self.api_client = api_client(self.logger) self.sr = None self.shows_to_record = {} - self.server_timezone = '' + self.server_timezone = "" self.queue = q self.loops = 0 self.logger.info("RecorderFetch: init complete") @@ -189,7 +204,7 @@ class Recorder(Thread): success = False while not success: try: - self.api_client.register_component('show-recorder') + self.api_client.register_component("show-recorder") success = True except Exception as e: self.logger.error(str(e)) @@ -205,7 +220,7 @@ class Recorder(Thread): msg = json.loads(message) command = msg["event_type"] self.logger.info("Received msg from Pypo Message Handler: %s", msg) - if command == 'cancel_recording': + if command == "cancel_recording": if self.currently_recording(): self.cancel_recording() else: @@ -218,14 +233,18 @@ class Recorder(Thread): def process_recorder_schedule(self, m): self.logger.info("Parsing recording show schedules...") temp_shows_to_record = {} - shows = m['shows'] + shows = m["shows"] for show in shows: - show_starts = getDateTimeObj(show['starts']) - show_end = getDateTimeObj(show['ends']) + show_starts = getDateTimeObj(show["starts"]) + show_end = getDateTimeObj(show["ends"]) time_delta = show_end - show_starts - temp_shows_to_record[show['starts']] = [time_delta, - show['instance_id'], show['name'], m['server_timezone']] + temp_shows_to_record[show["starts"]] = [ + time_delta, + show["instance_id"], + show["name"], + m["server_timezone"], + ] self.shows_to_record = temp_shows_to_record def get_time_till_next_show(self): @@ -237,7 +256,7 @@ class Recorder(Thread): next_show = getDateTimeObj(start_time) delta = next_show - tnow - s = '%s.%s' % (delta.seconds, delta.microseconds) + s = "%s.%s" % (delta.seconds, delta.microseconds) out = float(s) if out < 5: @@ -257,7 +276,8 @@ class Recorder(Thread): return False def start_record(self): - if len(self.shows_to_record) == 0: return None + if len(self.shows_to_record) == 0: + return None try: delta = self.get_time_till_next_show() if delta < 5: @@ -273,16 +293,25 @@ class Recorder(Thread): T = pytz.timezone(server_timezone) start_time_on_UTC = getDateTimeObj(start_time) - start_time_on_server = start_time_on_UTC.replace(tzinfo=pytz.utc).astimezone(T) - start_time_formatted = '%(year)d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:%(sec)02d' % \ - {'year': start_time_on_server.year, 'month': start_time_on_server.month, 'day': start_time_on_server.day, \ - 'hour': start_time_on_server.hour, 'min': start_time_on_server.minute, 'sec': start_time_on_server.second} - + start_time_on_server = start_time_on_UTC.replace( + tzinfo=pytz.utc + ).astimezone(T) + start_time_formatted = ( + "%(year)d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:%(sec)02d" + % { + "year": start_time_on_server.year, + "month": start_time_on_server.month, + "day": start_time_on_server.day, + "hour": start_time_on_server.hour, + "min": start_time_on_server.minute, + "sec": start_time_on_server.second, + } + ) seconds_waiting = 0 - #avoiding CC-5299 - while(True): + # avoiding CC-5299 + while True: if self.currently_recording(): self.logger.info("Previous record not finished, sleeping 100ms") seconds_waiting = seconds_waiting + 0.1 @@ -290,16 +319,21 @@ class Recorder(Thread): else: show_length_seconds = show_length.seconds - seconds_waiting - self.sr = ShowRecorder(show_instance, show_name, show_length_seconds, start_time_formatted) + self.sr = ShowRecorder( + show_instance, + show_name, + show_length_seconds, + start_time_formatted, + ) self.sr.start() break - #remove show from shows to record. + # remove show from shows to record. del self.shows_to_record[start_time] - #self.time_till_next_show = self.get_time_till_next_show() - except Exception as e : + # self.time_till_next_show = self.get_time_till_next_show() + except Exception as e: top = traceback.format_exc() - self.logger.error('Exception: %s', e) + self.logger.error("Exception: %s", e) self.logger.error("traceback: %s", top) def run(self): @@ -318,7 +352,7 @@ class Recorder(Thread): self.process_recorder_schedule(temp) self.logger.info("Bootstrap recorder schedule received: %s", temp) except Exception as e: - self.logger.error( traceback.format_exc() ) + self.logger.error(traceback.format_exc()) self.logger.error(e) self.logger.info("Bootstrap complete: got initial copy of the schedule") @@ -338,16 +372,16 @@ class Recorder(Thread): self.process_recorder_schedule(temp) self.logger.info("updated recorder schedule received: %s", temp) except Exception as e: - self.logger.error( traceback.format_exc() ) + self.logger.error(traceback.format_exc()) self.logger.error(e) - try: self.handle_message() + try: + self.handle_message() except Exception as e: - self.logger.error( traceback.format_exc() ) - self.logger.error('Pypo Recorder Exception: %s', e) + self.logger.error(traceback.format_exc()) + self.logger.error("Pypo Recorder Exception: %s", e) time.sleep(PUSH_INTERVAL) self.loops += 1 - except Exception as e : + except Exception as e: top = traceback.format_exc() - self.logger.error('Exception: %s', e) + self.logger.error("Exception: %s", e) self.logger.error("traceback: %s", top) - diff --git a/python_apps/pypo/pypo/telnetliquidsoap.py b/python_apps/pypo/pypo/telnetliquidsoap.py index 65d6bb168..15184b2e4 100644 --- a/python_apps/pypo/pypo/telnetliquidsoap.py +++ b/python_apps/pypo/pypo/telnetliquidsoap.py @@ -4,32 +4,36 @@ import telnetlib from .timeout import ls_timeout import traceback + def create_liquidsoap_annotation(media): # We need liq_start_next value in the annotate. That is the value that controls overlap duration of crossfade. - filename = media['dst'] - annotation = ('annotate:media_id="%s",liq_start_next="0",liq_fade_in="%s",' + \ - 'liq_fade_out="%s",liq_cue_in="%s",liq_cue_out="%s",' + \ - 'schedule_table_id="%s",replay_gain="%s dB"') % \ - (media['id'], - float(media['fade_in']) / 1000, - float(media['fade_out']) / 1000, - float(media['cue_in']), - float(media['cue_out']), - media['row_id'], - media['replay_gain']) + filename = media["dst"] + annotation = ( + 'annotate:media_id="%s",liq_start_next="0",liq_fade_in="%s",' + + 'liq_fade_out="%s",liq_cue_in="%s",liq_cue_out="%s",' + + 'schedule_table_id="%s",replay_gain="%s dB"' + ) % ( + media["id"], + float(media["fade_in"]) / 1000, + float(media["fade_out"]) / 1000, + float(media["cue_in"]), + float(media["cue_out"]), + media["row_id"], + media["replay_gain"], + ) # Override the the artist/title that Liquidsoap extracts from a file's metadata # with the metadata we get from Airtime. (You can modify metadata in Airtime's library, # which doesn't get saved back to the file.) - if 'metadata' in media: + if "metadata" in media: - if 'artist_name' in media['metadata']: - artist_name = media['metadata']['artist_name'] + if "artist_name" in media["metadata"]: + artist_name = media["metadata"]["artist_name"] if isinstance(artist_name, str): annotation += ',artist="%s"' % (artist_name.replace('"', '\\"')) - if 'track_title' in media['metadata']: - track_title = media['metadata']['track_title'] + if "track_title" in media["metadata"]: + track_title = media["metadata"]["track_title"] if isinstance(track_title, str): annotation += ',title="%s"' % (track_title.replace('"', '\\"')) @@ -37,8 +41,8 @@ def create_liquidsoap_annotation(media): return annotation -class TelnetLiquidsoap: +class TelnetLiquidsoap: def __init__(self, telnet_lock, logger, ls_host, ls_port, queues): self.telnet_lock = telnet_lock self.ls_host = ls_host @@ -53,9 +57,9 @@ class TelnetLiquidsoap: def __is_empty(self, queue_id): return True tn = self.__connect() - msg = '%s.queue\nexit\n' % queue_id - tn.write(msg.encode('utf-8')) - output = tn.read_all().decode('utf-8').splitlines() + msg = "%s.queue\nexit\n" % queue_id + tn.write(msg.encode("utf-8")) + output = tn.read_all().decode("utf-8").splitlines() if len(output) == 3: return len(output[0]) == 0 else: @@ -68,12 +72,12 @@ class TelnetLiquidsoap: tn = self.__connect() for i in self.queues: - msg = 'queues.%s_skip\n' % i + msg = "queues.%s_skip\n" % i self.logger.debug(msg) - tn.write(msg.encode('utf-8')) + tn.write(msg.encode("utf-8")) - tn.write("exit\n".encode('utf-8')) - self.logger.debug(tn.read_all().decode('utf-8')) + tn.write("exit\n".encode("utf-8")) + self.logger.debug(tn.read_all().decode("utf-8")) except Exception: raise finally: @@ -85,18 +89,17 @@ class TelnetLiquidsoap: self.telnet_lock.acquire() tn = self.__connect() - msg = 'queues.%s_skip\n' % queue_id + msg = "queues.%s_skip\n" % queue_id self.logger.debug(msg) - tn.write(msg.encode('utf-8')) + tn.write(msg.encode("utf-8")) - tn.write("exit\n".encode('utf-8')) - self.logger.debug(tn.read_all().decode('utf-8')) + tn.write("exit\n".encode("utf-8")) + self.logger.debug(tn.read_all().decode("utf-8")) except Exception: raise finally: self.telnet_lock.release() - @ls_timeout def queue_push(self, queue_id, media_item): try: @@ -107,40 +110,39 @@ class TelnetLiquidsoap: tn = self.__connect() annotation = create_liquidsoap_annotation(media_item) - msg = '%s.push %s\n' % (queue_id, annotation) + msg = "%s.push %s\n" % (queue_id, annotation) self.logger.debug(msg) - tn.write(msg.encode('utf-8')) + tn.write(msg.encode("utf-8")) - show_name = media_item['show_name'] - msg = 'vars.show_name %s\n' % show_name - tn.write(msg.encode('utf-8')) + show_name = media_item["show_name"] + msg = "vars.show_name %s\n" % show_name + tn.write(msg.encode("utf-8")) self.logger.debug(msg) - tn.write("exit\n".encode('utf-8')) - self.logger.debug(tn.read_all().decode('utf-8')) + tn.write("exit\n".encode("utf-8")) + self.logger.debug(tn.read_all().decode("utf-8")) except Exception: raise finally: self.telnet_lock.release() - @ls_timeout def stop_web_stream_buffer(self): try: self.telnet_lock.acquire() tn = telnetlib.Telnet(self.ls_host, self.ls_port) - #dynamic_source.stop http://87.230.101.24:80/top100station.mp3 + # dynamic_source.stop http://87.230.101.24:80/top100station.mp3 - msg = 'http.stop\n' + msg = "http.stop\n" self.logger.debug(msg) - tn.write(msg.encode('utf-8')) + tn.write(msg.encode("utf-8")) - msg = 'dynamic_source.id -1\n' + msg = "dynamic_source.id -1\n" self.logger.debug(msg) - tn.write(msg.encode('utf-8')) + tn.write(msg.encode("utf-8")) - tn.write("exit\n".encode('utf-8')) - self.logger.debug(tn.read_all().decode('utf-8')) + tn.write("exit\n".encode("utf-8")) + self.logger.debug(tn.read_all().decode("utf-8")) except Exception as e: self.logger.error(str(e)) @@ -153,14 +155,14 @@ class TelnetLiquidsoap: try: self.telnet_lock.acquire() tn = telnetlib.Telnet(self.ls_host, self.ls_port) - #dynamic_source.stop http://87.230.101.24:80/top100station.mp3 + # dynamic_source.stop http://87.230.101.24:80/top100station.mp3 - msg = 'dynamic_source.output_stop\n' + msg = "dynamic_source.output_stop\n" self.logger.debug(msg) - tn.write(msg.encode('utf-8')) + tn.write(msg.encode("utf-8")) - tn.write("exit\n".encode('utf-8')) - self.logger.debug(tn.read_all().decode('utf-8')) + tn.write("exit\n".encode("utf-8")) + self.logger.debug(tn.read_all().decode("utf-8")) except Exception as e: self.logger.error(str(e)) @@ -174,16 +176,16 @@ class TelnetLiquidsoap: self.telnet_lock.acquire() tn = telnetlib.Telnet(self.ls_host, self.ls_port) - #TODO: DO we need this? - msg = 'streams.scheduled_play_start\n' - tn.write(msg.encode('utf-8')) + # TODO: DO we need this? + msg = "streams.scheduled_play_start\n" + tn.write(msg.encode("utf-8")) - msg = 'dynamic_source.output_start\n' + msg = "dynamic_source.output_start\n" self.logger.debug(msg) - tn.write(msg.encode('utf-8')) + tn.write(msg.encode("utf-8")) - tn.write("exit\n".encode('utf-8')) - self.logger.debug(tn.read_all().decode('utf-8')) + tn.write("exit\n".encode("utf-8")) + self.logger.debug(tn.read_all().decode("utf-8")) self.current_prebuffering_stream_id = None except Exception as e: @@ -198,18 +200,18 @@ class TelnetLiquidsoap: self.telnet_lock.acquire() tn = telnetlib.Telnet(self.ls_host, self.ls_port) - msg = 'dynamic_source.id %s\n' % media_item['row_id'] + msg = "dynamic_source.id %s\n" % media_item["row_id"] self.logger.debug(msg) - tn.write(msg.encode('utf-8')) + tn.write(msg.encode("utf-8")) - msg = 'http.restart %s\n' % media_item['uri'] + msg = "http.restart %s\n" % media_item["uri"] self.logger.debug(msg) - tn.write(msg.encode('utf-8')) + tn.write(msg.encode("utf-8")) - tn.write("exit\n".encode('utf-8')) - self.logger.debug(tn.read_all().decode('utf-8')) + tn.write("exit\n".encode("utf-8")) + self.logger.debug(tn.read_all().decode("utf-8")) - self.current_prebuffering_stream_id = media_item['row_id'] + self.current_prebuffering_stream_id = media_item["row_id"] except Exception as e: self.logger.error(str(e)) self.logger.error(traceback.format_exc()) @@ -222,12 +224,12 @@ class TelnetLiquidsoap: self.telnet_lock.acquire() tn = telnetlib.Telnet(self.ls_host, self.ls_port) - msg = 'dynamic_source.get_id\n' + msg = "dynamic_source.get_id\n" self.logger.debug(msg) - tn.write(msg.encode('utf-8')) + tn.write(msg.encode("utf-8")) - tn.write("exit\n".encode('utf-8')) - stream_id = tn.read_all().decode('utf-8').splitlines()[0] + tn.write("exit\n".encode("utf-8")) + stream_id = tn.read_all().decode("utf-8").splitlines()[0] self.logger.debug("stream_id: %s" % stream_id) return stream_id @@ -239,20 +241,20 @@ class TelnetLiquidsoap: @ls_timeout def disconnect_source(self, sourcename): - self.logger.debug('Disconnecting source: %s', sourcename) + self.logger.debug("Disconnecting source: %s", sourcename) command = "" - if(sourcename == "master_dj"): + if sourcename == "master_dj": command += "master_harbor.stop\n" - elif(sourcename == "live_dj"): + elif sourcename == "live_dj": command += "live_dj_harbor.stop\n" try: self.telnet_lock.acquire() tn = telnetlib.Telnet(self.ls_host, self.ls_port) self.logger.info(command) - tn.write(command.encode('utf-8')) - tn.write('exit\n'.encode('utf-8')) - tn.read_all().decode('utf-8') + tn.write(command.encode("utf-8")) + tn.write("exit\n".encode("utf-8")) + tn.read_all().decode("utf-8") except Exception as e: self.logger.error(traceback.format_exc()) finally: @@ -267,18 +269,17 @@ class TelnetLiquidsoap: for i in commands: self.logger.info(i) if type(i) is str: - i = i.encode('utf-8') + i = i.encode("utf-8") tn.write(i) - tn.write('exit\n'.encode('utf-8')) - tn.read_all().decode('utf-8') + tn.write("exit\n".encode("utf-8")) + tn.read_all().decode("utf-8") except Exception as e: self.logger.error(str(e)) self.logger.error(traceback.format_exc()) finally: self.telnet_lock.release() - def switch_source(self, sourcename, status): self.logger.debug('Switching source: %s to "%s" status', sourcename, status) command = "streams." @@ -296,15 +297,15 @@ class TelnetLiquidsoap: self.telnet_send([command]) -class DummyTelnetLiquidsoap: +class DummyTelnetLiquidsoap: def __init__(self, telnet_lock, logger): self.telnet_lock = telnet_lock self.liquidsoap_mock_queues = {} self.logger = logger for i in range(4): - self.liquidsoap_mock_queues["s"+str(i)] = [] + self.liquidsoap_mock_queues["s" + str(i)] = [] @ls_timeout def queue_push(self, queue_id, media_item): @@ -313,6 +314,7 @@ class DummyTelnetLiquidsoap: self.logger.info("Pushing %s to queue %s" % (media_item, queue_id)) from datetime import datetime + print("Time now: {:s}".format(datetime.utcnow())) annotation = create_liquidsoap_annotation(media_item) @@ -329,6 +331,7 @@ class DummyTelnetLiquidsoap: self.logger.info("Purging queue %s" % queue_id) from datetime import datetime + print("Time now: {:s}".format(datetime.utcnow())) except Exception: @@ -336,5 +339,6 @@ class DummyTelnetLiquidsoap: finally: self.telnet_lock.release() + class QueueNotEmptyException(Exception): pass diff --git a/python_apps/pypo/pypo/testpypoliqqueue.py b/python_apps/pypo/pypo/testpypoliqqueue.py index 15897737e..50f1b107b 100644 --- a/python_apps/pypo/pypo/testpypoliqqueue.py +++ b/python_apps/pypo/pypo/testpypoliqqueue.py @@ -13,14 +13,17 @@ import logging from datetime import datetime from datetime import timedelta + def keyboardInterruptHandler(signum, frame): logger = logging.getLogger() - logger.info('\nKeyboard Interrupt\n') + logger.info("\nKeyboard Interrupt\n") sys.exit(0) + + signal.signal(signal.SIGINT, keyboardInterruptHandler) # configure logging -format = '%(levelname)s - %(pathname)s - %(lineno)s - %(asctime)s - %(message)s' +format = "%(levelname)s - %(pathname)s - %(lineno)s - %(asctime)s - %(message)s" logging.basicConfig(level=logging.DEBUG, format=format) logging.captureWarnings(True) @@ -30,19 +33,18 @@ pypoPush_q = Queue() pypoLiq_q = Queue() liq_queue_tracker = { - "s0": None, - "s1": None, - "s2": None, - "s3": None, - } + "s0": None, + "s1": None, + "s2": None, + "s3": None, +} -#dummy_telnet_liquidsoap = DummyTelnetLiquidsoap(telnet_lock, logging) -dummy_telnet_liquidsoap = TelnetLiquidsoap(telnet_lock, logging, \ - "localhost", \ - 1234) +# dummy_telnet_liquidsoap = DummyTelnetLiquidsoap(telnet_lock, logging) +dummy_telnet_liquidsoap = TelnetLiquidsoap(telnet_lock, logging, "localhost", 1234) -plq = PypoLiqQueue(pypoLiq_q, telnet_lock, logging, liq_queue_tracker, \ - dummy_telnet_liquidsoap) +plq = PypoLiqQueue( + pypoLiq_q, telnet_lock, logging, liq_queue_tracker, dummy_telnet_liquidsoap +) plq.daemon = True plq.start() @@ -54,47 +56,43 @@ media_schedule = {} start_dt = datetime.utcnow() + timedelta(seconds=1) end_dt = datetime.utcnow() + timedelta(seconds=6) -media_schedule[start_dt] = {"id": 5, \ - "type":"file", \ - "row_id":9, \ - "uri":"", \ - "dst":"/home/martin/Music/ipod/Hot Chocolate - You Sexy Thing.mp3", \ - "fade_in":0, \ - "fade_out":0, \ - "cue_in":0, \ - "cue_out":300, \ - "start": start_dt, \ - "end": end_dt, \ - "show_name":"Untitled", \ - "replay_gain": 0, \ - "independent_event": True \ - } - +media_schedule[start_dt] = { + "id": 5, + "type": "file", + "row_id": 9, + "uri": "", + "dst": "/home/martin/Music/ipod/Hot Chocolate - You Sexy Thing.mp3", + "fade_in": 0, + "fade_out": 0, + "cue_in": 0, + "cue_out": 300, + "start": start_dt, + "end": end_dt, + "show_name": "Untitled", + "replay_gain": 0, + "independent_event": True, +} start_dt = datetime.utcnow() + timedelta(seconds=2) end_dt = datetime.utcnow() + timedelta(seconds=6) -media_schedule[start_dt] = {"id": 5, \ - "type":"file", \ - "row_id":9, \ - "uri":"", \ - "dst":"/home/martin/Music/ipod/Good Charlotte - bloody valentine.mp3", \ - "fade_in":0, \ - "fade_out":0, \ - "cue_in":0, \ - "cue_out":300, \ - "start": start_dt, \ - "end": end_dt, \ - "show_name":"Untitled", \ - "replay_gain": 0, \ - "independent_event": True \ - } +media_schedule[start_dt] = { + "id": 5, + "type": "file", + "row_id": 9, + "uri": "", + "dst": "/home/martin/Music/ipod/Good Charlotte - bloody valentine.mp3", + "fade_in": 0, + "fade_out": 0, + "cue_in": 0, + "cue_out": 300, + "start": start_dt, + "end": end_dt, + "show_name": "Untitled", + "replay_gain": 0, + "independent_event": True, +} pypoLiq_q.put(media_schedule) plq.join() - - - - - diff --git a/python_apps/pypo/pypo/timeout.py b/python_apps/pypo/pypo/timeout.py index 2520b8e21..9cc5bcbc4 100644 --- a/python_apps/pypo/pypo/timeout.py +++ b/python_apps/pypo/pypo/timeout.py @@ -2,12 +2,13 @@ import threading from . import pypofetch -def __timeout(func, timeout_duration, default, args, kwargs): +def __timeout(func, timeout_duration, default, args, kwargs): class InterruptableThread(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.result = default + def run(self): self.result = func(*args, **kwargs) @@ -21,10 +22,10 @@ def __timeout(func, timeout_duration, default, args, kwargs): it.join(timeout_duration) if it.isAlive(): - """Restart Liquidsoap and try the command one more time. If it + """Restart Liquidsoap and try the command one more time. If it fails again then there is something critically wrong...""" if first_attempt: - #restart liquidsoap + # restart liquidsoap pypofetch.PypoFetch.ref.restart_liquidsoap() else: raise Exception("Thread did not terminate") @@ -33,7 +34,9 @@ def __timeout(func, timeout_duration, default, args, kwargs): first_attempt = False + def ls_timeout(f, timeout=15, default=None): def new_f(*args, **kwargs): return __timeout(f, timeout, default, args, kwargs) + return new_f diff --git a/python_apps/pypo/setup.py b/python_apps/pypo/setup.py index e2b9873c4..1469cfbb6 100644 --- a/python_apps/pypo/setup.py +++ b/python_apps/pypo/setup.py @@ -10,64 +10,63 @@ print(script_path) os.chdir(script_path) # Allows us to avoid installing the upstart init script when deploying on Airtime Pro: -if '--no-init-script' in sys.argv: +if "--no-init-script" in sys.argv: data_files = [] - sys.argv.remove('--no-init-script') # super hax + sys.argv.remove("--no-init-script") # super hax else: pypo_files = [] - for root, dirnames, filenames in os.walk('pypo'): + for root, dirnames, filenames in os.walk("pypo"): for filename in filenames: pypo_files.append(os.path.join(root, filename)) data_files = [ - ('/etc/init', ['install/upstart/airtime-playout.conf.template']), - ('/etc/init', ['install/upstart/airtime-liquidsoap.conf.template']), - ('/etc/init.d', ['install/sysvinit/airtime-playout']), - ('/etc/init.d', ['install/sysvinit/airtime-liquidsoap']), - ('/var/log/airtime/pypo', []), - ('/var/log/airtime/pypo-liquidsoap', []), - ('/var/tmp/airtime/pypo', []), - ('/var/tmp/airtime/pypo/cache', []), - ('/var/tmp/airtime/pypo/files', []), - ('/var/tmp/airtime/pypo/tmp', []), - ] + ("/etc/init", ["install/upstart/airtime-playout.conf.template"]), + ("/etc/init", ["install/upstart/airtime-liquidsoap.conf.template"]), + ("/etc/init.d", ["install/sysvinit/airtime-playout"]), + ("/etc/init.d", ["install/sysvinit/airtime-liquidsoap"]), + ("/var/log/airtime/pypo", []), + ("/var/log/airtime/pypo-liquidsoap", []), + ("/var/tmp/airtime/pypo", []), + ("/var/tmp/airtime/pypo/cache", []), + ("/var/tmp/airtime/pypo/files", []), + ("/var/tmp/airtime/pypo/tmp", []), + ] print(data_files) -setup(name='airtime-playout', - version='1.0', - description='Airtime Playout Engine', - url='http://github.com/sourcefabric/Airtime', - author='sourcefabric', - license='AGPLv3', - packages=['pypo', 'pypo.media', 'pypo.media.update', - 'liquidsoap'], - package_data={'': ['**/*.liq', '*.cfg', '*.types']}, - scripts=[ - 'bin/airtime-playout', - 'bin/airtime-liquidsoap', - 'bin/pyponotify' - ], - install_requires=[ - 'amqplib', - 'anyjson', - 'argparse', - 'configobj', - 'docopt', - 'future', - 'kombu', - 'mutagen', - 'PyDispatcher', - 'pyinotify', - 'pytz', - 'requests', - 'defusedxml', - 'packaging', - ], - zip_safe=False, - data_files=data_files) +setup( + name="airtime-playout", + version="1.0", + description="Airtime Playout Engine", + url="http://github.com/sourcefabric/Airtime", + author="sourcefabric", + license="AGPLv3", + packages=["pypo", "pypo.media", "pypo.media.update", "liquidsoap"], + package_data={"": ["**/*.liq", "*.cfg", "*.types"]}, + scripts=["bin/airtime-playout", "bin/airtime-liquidsoap", "bin/pyponotify"], + install_requires=[ + "amqplib", + "anyjson", + "argparse", + "configobj", + "docopt", + "future", + "kombu", + "mutagen", + "PyDispatcher", + "pyinotify", + "pytz", + "requests", + "defusedxml", + "packaging", + ], + zip_safe=False, + data_files=data_files, +) # Reload the initctl config so that playout services works if data_files: print("Reloading initctl configuration") - #call(['initctl', 'reload-configuration']) - print("Run \"sudo service airtime-playout start\" and \"sudo service airtime-liquidsoap start\"") + # call(['initctl', 'reload-configuration']) + print( + 'Run "sudo service airtime-playout start" and "sudo service airtime-liquidsoap start"' + ) diff --git a/utils/airtime-import/airtime-import b/utils/airtime-import/airtime-import index c3c2500d1..74a7340b0 100755 --- a/utils/airtime-import/airtime-import +++ b/utils/airtime-import/airtime-import @@ -9,7 +9,7 @@ import json import shutil import commands -#sys.path.append('/usr/lib/airtime/media-monitor/mm2/') +# sys.path.append('/usr/lib/airtime/media-monitor/mm2/') from mm2.media.monitor.pure import is_file_supported # create logger @@ -22,86 +22,97 @@ logging.disable(50) # add ch to logger logger.addHandler(ch) -if (os.geteuid() != 0): - print 'Must be a root user.' +if os.geteuid() != 0: + print "Must be a root user." sys.exit() # loading config file try: - config = ConfigObj('/etc/airtime/airtime.conf') + config = ConfigObj("/etc/airtime/airtime.conf") except Exception, e: - print('Error loading config file: %s', e) + print ("Error loading config file: %s", e) sys.exit() api_client = apc.AirtimeApiClient(config) -#helper functions +# helper functions # copy or move files # flag should be 'copy' or 'move' def copy_or_move_files_to(paths, dest, flag): try: for path in paths: - if (path[0] == "/" or path[0] == "~"): + if path[0] == "/" or path[0] == "~": path = os.path.realpath(path) else: - path = currentDir+path - path = apc.encode_to(path, 'utf-8') - dest = apc.encode_to(dest, 'utf-8') - if(os.path.exists(path)): - if(os.path.isdir(path)): + path = currentDir + path + path = apc.encode_to(path, "utf-8") + dest = apc.encode_to(dest, "utf-8") + if os.path.exists(path): + if os.path.isdir(path): path = format_dir_string(path) - #construct full path + # construct full path sub_path = [] for temp in os.listdir(path): - sub_path.append(path+temp) + sub_path.append(path + temp) copy_or_move_files_to(sub_path, dest, flag) - elif(os.path.isfile(path)): - #copy file to dest - if(is_file_supported(path)): - destfile = dest+os.path.basename(path) - if(flag == 'copy'): - print "Copying %(src)s to %(dest)s..." % {'src':path, 'dest':destfile} + elif os.path.isfile(path): + # copy file to dest + if is_file_supported(path): + destfile = dest + os.path.basename(path) + if flag == "copy": + print "Copying %(src)s to %(dest)s..." % { + "src": path, + "dest": destfile, + } shutil.copyfile(path, destfile) - elif(flag == 'move'): - print "Moving %(src)s to %(dest)s..." % {'src':path, 'dest':destfile} + elif flag == "move": + print "Moving %(src)s to %(dest)s..." % { + "src": path, + "dest": destfile, + } shutil.move(path, destfile) else: print "Cannot find file or path: %s" % path except Exception as e: - print "Error: ", e + print "Error: ", e + def format_dir_string(path): - if(path[-1] != '/'): - path = path+'/' + if path[-1] != "/": + path = path + "/" return path + def helper_get_stor_dir(): try: res = api_client.list_all_watched_dirs() except Exception, e: return res - if(res['dirs']['1'][-1] != '/'): - out = res['dirs']['1']+'/' + if res["dirs"]["1"][-1] != "/": + out = res["dirs"]["1"] + "/" return out else: - return res['dirs']['1'] + return res["dirs"]["1"] + def checkOtherOption(args): for i in args: - if(i[0] == '-'): + if i[0] == "-": return True -def errorIfMultipleOption(args, msg=''): - if(checkOtherOption(args)): - if(msg != ''): + +def errorIfMultipleOption(args, msg=""): + if checkOtherOption(args): + if msg != "": raise OptionValueError(msg) else: raise OptionValueError("This option cannot be combined with other options") + def printHelp(): storage_dir = helper_get_stor_dir() - if(storage_dir is None): + if storage_dir is None: storage_dir = "Unknown" else: storage_dir += "imported/" @@ -129,58 +140,70 @@ There are two ways to import audio files into Airtime: parser.print_help() print "" + def CopyAction(option, opt, value, parser): errorIfMultipleOption(parser.rargs) - if(len(parser.rargs) == 0 ): - raise OptionValueError("No argument found. This option requires at least one argument.") + if len(parser.rargs) == 0: + raise OptionValueError( + "No argument found. This option requires at least one argument." + ) stor = helper_get_stor_dir() - if(stor is None): + if stor is None: print "Unable to connect to the Airtime server." return - dest = stor+"organize/" - copy_or_move_files_to(parser.rargs, dest, 'copy') + dest = stor + "organize/" + copy_or_move_files_to(parser.rargs, dest, "copy") + def MoveAction(option, opt, value, parser): errorIfMultipleOption(parser.rargs) - if(len(parser.rargs) == 0 ): - raise OptionValueError("No argument found. This option requires at least one argument.") + if len(parser.rargs) == 0: + raise OptionValueError( + "No argument found. This option requires at least one argument." + ) stor = helper_get_stor_dir() - if(stor is None): + if stor is None: exit("Unable to connect to the Airtime server.") - dest = stor+"organize/" - copy_or_move_files_to(parser.rargs, dest, 'move') + dest = stor + "organize/" + copy_or_move_files_to(parser.rargs, dest, "move") + def WatchAddAction(option, opt, value, parser): errorIfMultipleOption(parser.rargs) - if(len(parser.rargs) > 1): - raise OptionValueError("Too many arguments. This option requires exactly one argument.") - elif(len(parser.rargs) == 0 ): - raise OptionValueError("No argument found. This option requires exactly one argument.") + if len(parser.rargs) > 1: + raise OptionValueError( + "Too many arguments. This option requires exactly one argument." + ) + elif len(parser.rargs) == 0: + raise OptionValueError( + "No argument found. This option requires exactly one argument." + ) path = parser.rargs[0] - if (path[0] == "/" or path[0] == "~"): + if path[0] == "/" or path[0] == "~": path = os.path.realpath(path) else: - path = currentDir+path - path = apc.encode_to(path, 'utf-8') - if(os.path.isdir(path)): - #os.chmod(path, 0765) + path = currentDir + path + path = apc.encode_to(path, "utf-8") + if os.path.isdir(path): + # os.chmod(path, 0765) try: res = api_client.add_watched_dir(path) except Exception, e: exit("Unable to connect to the server.") # success - if(res['msg']['code'] == 0): + if res["msg"]["code"] == 0: print "%s added to watched folder list successfully" % path else: - print "Adding a watched folder failed: %s" % res['msg']['error'] + print "Adding a watched folder failed: %s" % res["msg"]["error"] print "This error most likely caused by wrong permissions" print "Try fixing this error by chmodding the parent directory(ies)" else: print "Given path is not a directory: %s" % path + def WatchListAction(option, opt, value, parser): errorIfMultipleOption(parser.rargs) - if(len(parser.rargs) > 0): + if len(parser.rargs) > 0: raise OptionValueError("This option doesn't take any arguments.") try: res = api_client.list_all_watched_dirs() @@ -188,120 +211,184 @@ def WatchListAction(option, opt, value, parser): exit("Unable to connect to the Airtime server.") dirs = res["dirs"].items() # there will be always 1 which is storage folder - if(len(dirs) == 1): - print "No watch folders found" + if len(dirs) == 1: + print "No watch folders found" else: for key, v in dirs: - if(key != '1'): + if key != "1": print v + def WatchRemoveAction(option, opt, value, parser): errorIfMultipleOption(parser.rargs) - if(len(parser.rargs) > 1): - raise OptionValueError("Too many arguments. This option requires exactly one argument.") - elif(len(parser.rargs) == 0 ): - raise OptionValueError("No argument found. This option requires exactly one argument.") + if len(parser.rargs) > 1: + raise OptionValueError( + "Too many arguments. This option requires exactly one argument." + ) + elif len(parser.rargs) == 0: + raise OptionValueError( + "No argument found. This option requires exactly one argument." + ) path = parser.rargs[0] - if (path[0] == "/" or path[0] == "~"): + if path[0] == "/" or path[0] == "~": path = os.path.realpath(path) else: - path = currentDir+path - path = apc.encode_to(path, 'utf-8') - if(os.path.isdir(path)): + path = currentDir + path + path = apc.encode_to(path, "utf-8") + if os.path.isdir(path): try: res = api_client.remove_watched_dir(path) except Exception, e: exit("Unable to connect to the Airtime server.") # success - if(res['msg']['code'] == 0): + if res["msg"]["code"] == 0: print "%s removed from watch folder list successfully." % path else: - print "Removing the watch folder failed: %s" % res['msg']['error'] + print "Removing the watch folder failed: %s" % res["msg"]["error"] else: print "The given path is not a directory: %s" % path + def StorageSetAction(option, opt, value, parser): bypass = False - isF = '-f' in parser.rargs - isForce = '--force' in parser.rargs - if(isF or isForce ): + isF = "-f" in parser.rargs + isForce = "--force" in parser.rargs + if isF or isForce: bypass = True - if(isF): - parser.rargs.remove('-f') - if(isForce): - parser.rargs.remove('--force') - if(not bypass): - errorIfMultipleOption(parser.rargs, "Only [-f] and [--force] option is allowed with this option.") - possibleInput = ['y','Y','n','N'] - confirm = raw_input("Are you sure you want to change the storage directory? (y/N)") - confirm = confirm or 'N' - while(confirm not in possibleInput): + if isF: + parser.rargs.remove("-f") + if isForce: + parser.rargs.remove("--force") + if not bypass: + errorIfMultipleOption( + parser.rargs, "Only [-f] and [--force] option is allowed with this option." + ) + possibleInput = ["y", "Y", "n", "N"] + confirm = raw_input( + "Are you sure you want to change the storage directory? (y/N)" + ) + confirm = confirm or "N" + while confirm not in possibleInput: print "Not an acceptable input: %s\n" % confirm - confirm = raw_input("Are you sure you want to change the storage directory? (y/N) ") - confirm = confirm or 'N' - if(confirm == 'n' or confirm =='N'): + confirm = raw_input( + "Are you sure you want to change the storage directory? (y/N) " + ) + confirm = confirm or "N" + if confirm == "n" or confirm == "N": sys.exit(1) - if(len(parser.rargs) > 1): - raise OptionValueError("Too many arguments. This option requires exactly one argument.") - elif(len(parser.rargs) == 0 ): - raise OptionValueError("No argument found. This option requires exactly one argument.") + if len(parser.rargs) > 1: + raise OptionValueError( + "Too many arguments. This option requires exactly one argument." + ) + elif len(parser.rargs) == 0: + raise OptionValueError( + "No argument found. This option requires exactly one argument." + ) path = parser.rargs[0] - if (path[0] == "/" or path[0] == "~"): + if path[0] == "/" or path[0] == "~": path = os.path.realpath(path) else: - path = currentDir+path - path = apc.encode_to(path, 'utf-8') - if(os.path.isdir(path)): + path = currentDir + path + path = apc.encode_to(path, "utf-8") + if os.path.isdir(path): try: res = api_client.set_storage_dir(path) except Exception, e: exit("Unable to connect to the Airtime server.") # success - if(res['msg']['code'] == 0): + if res["msg"]["code"] == 0: print "Successfully set storage folder to %s" % path else: - print "Setting storage folder failed: %s" % res['msg']['error'] + print "Setting storage folder failed: %s" % res["msg"]["error"] else: print "The given path is not a directory: %s" % path + def StorageGetAction(option, opt, value, parser): errorIfMultipleOption(parser.rargs) - if(len(parser.rargs) > 0): + if len(parser.rargs) > 0: raise OptionValueError("This option does not take any arguments.") print helper_get_stor_dir() + class OptionValueError(RuntimeError): def __init__(self, msg): self.msg = msg + usage = """[-c|--copy FILE/DIR [FILE/DIR...]] [-m|--move FILE/DIR [FILE/DIR...]] [--watch-add DIR] [--watch-list] [--watch-remove DIR] [--storage-dir-set DIR] [--storage-dir-get]""" parser = OptionParser(usage=usage, add_help_option=False) -parser.add_option('-c','--copy', action='callback', callback=CopyAction, metavar='FILE', help='Copy FILE(s) into the storage directory.\nYou can specify multiple files or directories.') -parser.add_option('-m','--move', action='callback', callback=MoveAction, metavar='FILE', help='Move FILE(s) into the storage directory.\nYou can specify multiple files or directories.') -parser.add_option('--watch-add', action='callback', callback=WatchAddAction, help='Add DIR to the watched folders list.') -parser.add_option('--watch-list', action='callback', callback=WatchListAction, help='Show the list of folders that are watched.') -parser.add_option('--watch-remove', action='callback', callback=WatchRemoveAction, help='Remove DIR from the watched folders list.') -parser.add_option('--storage-dir-set', action='callback', callback=StorageSetAction, help='Set storage dir to DIR.') -parser.add_option('--storage-dir-get', action='callback', callback=StorageGetAction, help='Show the current storage dir.') -parser.add_option('-h', '--help', dest='help', action='store_true', help='show this help message and exit') +parser.add_option( + "-c", + "--copy", + action="callback", + callback=CopyAction, + metavar="FILE", + help="Copy FILE(s) into the storage directory.\nYou can specify multiple files or directories.", +) +parser.add_option( + "-m", + "--move", + action="callback", + callback=MoveAction, + metavar="FILE", + help="Move FILE(s) into the storage directory.\nYou can specify multiple files or directories.", +) +parser.add_option( + "--watch-add", + action="callback", + callback=WatchAddAction, + help="Add DIR to the watched folders list.", +) +parser.add_option( + "--watch-list", + action="callback", + callback=WatchListAction, + help="Show the list of folders that are watched.", +) +parser.add_option( + "--watch-remove", + action="callback", + callback=WatchRemoveAction, + help="Remove DIR from the watched folders list.", +) +parser.add_option( + "--storage-dir-set", + action="callback", + callback=StorageSetAction, + help="Set storage dir to DIR.", +) +parser.add_option( + "--storage-dir-get", + action="callback", + callback=StorageGetAction, + help="Show the current storage dir.", +) +parser.add_option( + "-h", + "--help", + dest="help", + action="store_true", + help="show this help message and exit", +) # pop "--dir" -#sys.argv.pop(1) +# sys.argv.pop(1) # pop "invoked pwd" -currentDir = os.getcwd() #sys.argv.pop(1)+'/' +currentDir = os.getcwd() # sys.argv.pop(1)+'/' -if('-l' in sys.argv or '--link' in sys.argv): +if "-l" in sys.argv or "--link" in sys.argv: print "\nThe [-l][--link] option is deprecated. Please use the --watch-add option.\nTry 'airtime-import -h' for more detail.\n" sys.exit() -if('-h' in sys.argv): +if "-h" in sys.argv: printHelp() sys.exit() -if(len(sys.argv) == 1 or '-' not in sys.argv[1]): +if len(sys.argv) == 1 or "-" not in sys.argv[1]: printHelp() sys.exit() @@ -309,10 +396,10 @@ try: (option, args) = parser.parse_args() except Exception, e: printHelp() - if hasattr(e, 'msg'): - print "Error: "+e.msg + if hasattr(e, "msg"): + print "Error: " + e.msg else: - print "Error: ",e + print "Error: ", e sys.exit() except SystemExit: printHelp() @@ -321,7 +408,3 @@ except SystemExit: if option.help: printHelp() sys.exit() - - - - diff --git a/utils/airtime-silan b/utils/airtime-silan index 626777c63..f7598636f 100755 --- a/utils/airtime-silan +++ b/utils/airtime-silan @@ -21,14 +21,14 @@ logging.disable(50) logger.addHandler(ch) if os.geteuid() != 0: - print 'Must be a root user.' + print "Must be a root user." sys.exit(1) # loading config file try: - config = ConfigObj('/etc/airtime/airtime.conf') + config = ConfigObj("/etc/airtime/airtime.conf") except Exception, e: - print('Error loading config file: %s', e) + print ("Error loading config file: %s", e) sys.exit(1) api_client = apc.AirtimeApiClient(config) @@ -43,25 +43,29 @@ try: # filepath files = api_client.get_files_without_silan_value() total_files = len(files) - if total_files == 0: break + if total_files == 0: + break processed_data = [] total = 0 for f in files: - full_path = f['fp'] + full_path = f["fp"] # silence detect(set default queue in and out) try: - command = ['silan', '-b' '-f', 'JSON', full_path] + command = ["silan", "-b" "-f", "JSON", full_path] proc = subprocess.Popen(command, stdout=subprocess.PIPE) - out = proc.communicate()[0].strip('\r\n') + out = proc.communicate()[0].strip("\r\n") info = json.loads(out) data = {} - data['cuein'] = str('{0:f}'.format(info['sound'][0][0])) - data['cueout'] = str('{0:f}'.format(info['sound'][-1][1])) - data['length'] = str('{0:f}'.format(info['file duration'])) - processed_data.append((f['id'], data)) + data["cuein"] = str("{0:f}".format(info["sound"][0][0])) + data["cueout"] = str("{0:f}".format(info["sound"][-1][1])) + data["length"] = str("{0:f}".format(info["file duration"])) + processed_data.append((f["id"], data)) total += 1 if total % 5 == 0: - print "Total %s / %s files has been processed.." % (total, total_files) + print "Total %s / %s files has been processed.." % ( + total, + total_files, + ) except Exception, e: print e print traceback.format_exc() @@ -70,7 +74,7 @@ try: try: print api_client.update_cue_values_by_silan(processed_data) - except Exception ,e: + except Exception, e: print e print traceback.format_exc() print "Total %d songs Processed" % subtotal diff --git a/utils/airtime-test-soundcard.py b/utils/airtime-test-soundcard.py index 834999966..074d5cde1 100644 --- a/utils/airtime-test-soundcard.py +++ b/utils/airtime-test-soundcard.py @@ -16,32 +16,35 @@ if os.geteuid() == 0: print "Please run this program as non-root" sys.exit(1) + def printUsage(): print "airtime-test-soundcard [-v] [-o alsa | ao | oss | portaudio | pulseaudio ] [-h]" print " Where: " print " -v verbose mode" print " -o Linux Sound API (default: alsa)" print " -h show help menu " - + + def find_liquidsoap_binary(): """ Starting with Airtime 2.0, we don't know the exact location of the Liquidsoap binary because it may have been installed through a debian package. Let's find the location of this binary. """ - + rv = subprocess.call("which airtime-liquidsoap > /dev/null", shell=True) if rv == 0: return "airtime-liquidsoap" return None + try: - optlist, args = getopt.getopt(sys.argv[1:], 'hvo:') + optlist, args = getopt.getopt(sys.argv[1:], "hvo:") except getopt.GetoptError, g: printUsage() sys.exit(1) - + sound_api_types = set(["alsa", "ao", "oss", "portaudio", "pulseaudio"]) verbose = False @@ -63,26 +66,25 @@ for o, a in optlist: try: print "Sound API: %s" % sound_api print "Outputting to soundcard. You should be able to hear a monotonous tone. Press ctrl-c to quit." - + liquidsoap_exe = find_liquidsoap_binary() - + if liquidsoap_exe is None: raise Exception("Liquidsoap not found!") - + command = "%s 'output.%s(sine())'" % (liquidsoap_exe, sound_api) - + if not verbose: command += " > /dev/null" - - #print command + + # print command rv = subprocess.call(command, shell=True) - - #if we reach this point, it means that our subprocess exited without the user - #doing a keyboard interrupt. This means there was a problem outputting to the - #soundcard. Print appropriate message. - print "There was an error using the selected sound API. Please select a different API " + \ - "and run this program again. Use the -h option for help" - + + # if we reach this point, it means that our subprocess exited without the user + # doing a keyboard interrupt. This means there was a problem outputting to the + # soundcard. Print appropriate message. + print "There was an error using the selected sound API. Please select a different API " + "and run this program again. Use the -h option for help" + except KeyboardInterrupt, ki: print "\nExiting" except Exception, e: diff --git a/utils/airtime-test-stream.py b/utils/airtime-test-stream.py index 675f934ac..843e53153 100644 --- a/utils/airtime-test-stream.py +++ b/utils/airtime-test-stream.py @@ -16,6 +16,7 @@ if os.geteuid() == 0: print "Please run this program as non-root" sys.exit(1) + def printUsage(): print "airtime-test-stream [-v] [-o icecast | shoutcast ] [-H hostname] [-P port] [-u username] [-p password] [-m mount]" print " Where: " @@ -42,7 +43,8 @@ def find_liquidsoap_binary(): return None -optlist, args = getopt.getopt(sys.argv[1:], 'hvo:H:P:u:p:m:') + +optlist, args = getopt.getopt(sys.argv[1:], "hvo:H:P:u:p:m:") stream_types = set(["shoutcast", "icecast"]) verbose = False @@ -89,31 +91,38 @@ try: print "Mount: %s\n" % mount url = "http://%s:%s/%s" % (host, port, mount) - print "Outputting to %s streaming server. You should be able to hear a monotonous tone on '%s'. Press ctrl-c to quit." % (stream_type, url) + print "Outputting to %s streaming server. You should be able to hear a monotonous tone on '%s'. Press ctrl-c to quit." % ( + stream_type, + url, + ) liquidsoap_exe = find_liquidsoap_binary() if liquidsoap_exe is None: raise Exception("Liquidsoap not found!") if stream_type == "icecast": - command = "%s 'output.icecast(%%vorbis, host = \"%s\", port = %s, user= \"%s\", password = \"%s\", mount=\"%s\", sine())'" % (liquidsoap_exe, host, port, user, password, mount) + command = ( + '%s \'output.icecast(%%vorbis, host = "%s", port = %s, user= "%s", password = "%s", mount="%s", sine())\'' + % (liquidsoap_exe, host, port, user, password, mount) + ) else: - command = "%s 'output.shoutcast(%%mp3, host=\"%s\", port = %s, user= \"%s\", password = \"%s\", sine())'" \ - % (liquidsoap_exe, host, port, user, password) + command = ( + '%s \'output.shoutcast(%%mp3, host="%s", port = %s, user= "%s", password = "%s", sine())\'' + % (liquidsoap_exe, host, port, user, password) + ) if not verbose: - command += " 2>/dev/null | grep \"failed\"" + command += ' 2>/dev/null | grep "failed"' else: print command - #print command + # print command rv = subprocess.call(command, shell=True) - #if we reach this point, it means that our subprocess exited without the user - #doing a keyboard interrupt. This means there was a problem outputting to the - #stream server. Print appropriate message. - print "There was an error with your stream configuration. Please review your configuration " + \ - "and run this program again. Use the -h option for help" + # if we reach this point, it means that our subprocess exited without the user + # doing a keyboard interrupt. This means there was a problem outputting to the + # stream server. Print appropriate message. + print "There was an error with your stream configuration. Please review your configuration " + "and run this program again. Use the -h option for help" except KeyboardInterrupt, ki: print "\nExiting" diff --git a/utils/upgrade.py b/utils/upgrade.py index 6b196555e..23c9eacfe 100755 --- a/utils/upgrade.py +++ b/utils/upgrade.py @@ -7,39 +7,45 @@ import requests from urlparse import urlparse import sys -CONFIG_PATH='/etc/airtime/airtime.conf' +CONFIG_PATH = "/etc/airtime/airtime.conf" GENERAL_CONFIG_SECTION = "general" + def read_config_file(config_path): """Parse the application's config file located at config_path.""" config = ConfigParser.SafeConfigParser() try: config.readfp(open(config_path)) except IOError as e: - print "Failed to open config file at " + config_path + ": " + e.strerror + print "Failed to open config file at " + config_path + ": " + e.strerror exit(-1) except Exception: - print e.strerror + print e.strerror exit(-1) return config -if __name__ == '__main__': + +if __name__ == "__main__": config = read_config_file(CONFIG_PATH) - api_key = config.get(GENERAL_CONFIG_SECTION, 'api_key') - base_url = config.get(GENERAL_CONFIG_SECTION, 'base_url') - base_dir = config.get(GENERAL_CONFIG_SECTION, 'base_dir') - base_port = config.get(GENERAL_CONFIG_SECTION, 'base_port', 80) + api_key = config.get(GENERAL_CONFIG_SECTION, "api_key") + base_url = config.get(GENERAL_CONFIG_SECTION, "base_url") + base_dir = config.get(GENERAL_CONFIG_SECTION, "base_dir") + base_port = config.get(GENERAL_CONFIG_SECTION, "base_port", 80) action = "upgrade" station_url = "" default_url = "http://%s:%s%s" % (base_url, base_port, base_dir) parser = argparse.ArgumentParser() - parser.add_argument('--downgrade', help='Downgrade the station', action="store_true") - parser.add_argument('station_url', help='station URL', nargs='?', default=default_url) + parser.add_argument( + "--downgrade", help="Downgrade the station", action="store_true" + ) + parser.add_argument( + "station_url", help="station URL", nargs="?", default=default_url + ) args = parser.parse_args() - + if args.downgrade: action = "downgrade" @@ -47,12 +53,11 @@ if __name__ == '__main__': station_url = args.station_url # Add http:// if you were lazy and didn't pass a scheme to this script - url = urlparse(station_url) + url = urlparse(station_url) if not url.scheme: station_url = "http://%s" % station_url print "Requesting %s..." % action - r = requests.get("%s/%s" % (station_url, action), auth=(api_key, '')) + r = requests.get("%s/%s" % (station_url, action), auth=(api_key, "")) print r.text r.raise_for_status() - From c04daca03320190f64a2b713fd01431cb0dbc577 Mon Sep 17 00:00:00 2001 From: Jonas L Date: Thu, 27 May 2021 16:29:12 +0200 Subject: [PATCH 06/28] Pin pre-commit action version Co-authored-by: Kyle Robbertze --- .github/workflows/lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index b943e5ae8..841014ef1 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -11,4 +11,4 @@ jobs: steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 - - uses: pre-commit/action@v2 + - uses: pre-commit/action@v2.0.2 From 1af089219ff7eab105fee0f01c1ae6c24b570526 Mon Sep 17 00:00:00 2001 From: jo Date: Thu, 27 May 2021 16:32:06 +0200 Subject: [PATCH 07/28] Bump pre-commit/actions version to 2.0.3 --- .github/workflows/lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 841014ef1..ee25d33d9 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -11,4 +11,4 @@ jobs: steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 - - uses: pre-commit/action@v2.0.2 + - uses: pre-commit/action@v2.0.3 From d8195f0fd89dd23e95e1b2276e4b940236a885df Mon Sep 17 00:00:00 2001 From: jo Date: Thu, 27 May 2021 16:33:20 +0200 Subject: [PATCH 08/28] Fix trailing whitespaces in files --- CREDITS | 88 +++++++++---------- changelog | 78 ++++++++-------- dev_tools/README_TRANSLATIONS | 2 +- dev_tools/auto_schedule_show.php | 46 +++++----- dev_tools/compare_cc_files_to_fs.py | 2 +- .../liquidsoap_compile/liquidsoap-compile.sh | 2 +- dev_tools/liquidsoap_compile/run.sh | 8 +- dev_tools/release/release.sh | 2 +- dev_tools/scripts/git-config-git-merge-po | 2 +- dev_tools/scripts/git-merge-po | 8 +- dev_tools/toggle-pypo-debug.sh | 4 +- docs/_includes/head.html | 6 +- docs/_includes/navbar.html | 2 +- docs/_includes/scripts.html | 7 +- docs/_layouts/article.html | 6 +- docs/_layouts/default.html | 8 +- docs/_layouts/home.html | 6 +- docs/css/creative.min.css | 50 +++++------ docs/docs.html | 2 +- docs/search.html | 2 +- installer/lxc-bootstrap | 2 +- installer/vagrant/centos.sh | 4 +- logo/logotype.ai | 8 +- .../airtime_analyzer/message_listener.py | 30 +++---- .../airtime_analyzer/status_reporter.py | 8 +- .../airtime_analyzer/tools/ftp-upload-hook.sh | 16 ++-- .../airtime_analyzer/tools/message_sender.php | 8 +- .../tools/test-hook-script.sh | 2 +- python_apps/pypo/liquidsoap/1.1/fdkaac.liq | 2 +- python_apps/pypo/liquidsoap/1.1/ls_lib.liq | 10 +-- python_apps/pypo/liquidsoap/1.1/ls_script.liq | 8 +- python_apps/pypo/liquidsoap/1.1/mp3.liq | 2 +- python_apps/pypo/liquidsoap/1.1/ogg.liq | 2 +- python_apps/pypo/liquidsoap/1.1/opus.liq | 2 +- python_apps/pypo/liquidsoap/1.3/fdkaac.liq | 2 +- python_apps/pypo/liquidsoap/1.3/ls_lib.liq | 10 +-- python_apps/pypo/liquidsoap/1.3/ls_script.liq | 8 +- python_apps/pypo/liquidsoap/1.3/mp3.liq | 2 +- python_apps/pypo/liquidsoap/1.3/ogg.liq | 2 +- python_apps/pypo/liquidsoap/1.3/opus.liq | 2 +- python_apps/pypo/liquidsoap/1.4/fdkaac.liq | 2 +- python_apps/pypo/liquidsoap/1.4/mp3.liq | 2 +- python_apps/pypo/liquidsoap/1.4/ogg.liq | 2 +- python_apps/pypo/liquidsoap/1.4/opus.liq | 2 +- utils/airtime-log.php | 10 +-- utils/rivendell-converter.sh | 2 +- 46 files changed, 240 insertions(+), 241 deletions(-) diff --git a/CREDITS b/CREDITS index ad4150f37..984f42413 100644 --- a/CREDITS +++ b/CREDITS @@ -92,7 +92,7 @@ Martin Konecny (martin.konecny@sourcefabric.org) James Moon (james.moon@sourcefabric.org) Role: Software Developer - + Denise Rigato (denise.rigato@sourcefabric.org) Role: Software Developer @@ -124,7 +124,7 @@ Martin Konecny (martin.konecny@sourcefabric.org) James Moon (james.moon@sourcefabric.org) Role: Software Developer - + Denise Rigato (denise.rigato@sourcefabric.org) Role: Software Developer @@ -147,7 +147,7 @@ Naomi Aro (naomi.aro@sourcefabric.org) James Moon (james.moon@sourcefabric.org) Role: Software Developer - + Denise Rigato (denise.rigato@sourcefabric.org) Role: Software Developer @@ -198,7 +198,7 @@ Naomi Aro (naomi.aro@sourcefabric.org) James Moon (james.moon@sourcefabric.org) Role: Software Developer - + Denise Rigato (denise.rigato@sourcefabric.org) Role: Software Developer @@ -267,7 +267,7 @@ Martin Konecny (martin.konecny@sourcefabric.org) James Moon (james.moon@sourcefabric.org) Role: Software Developer - + Yuchen Wang (yuchen.wang@sourcefabric.org) Role: Software Developer @@ -350,10 +350,10 @@ Version 1.6.1 ------------- Same as previous version. -Version 1.6.0 +Version 1.6.0 ------------- This version marks a major change to the project, completely replacing the -custom audio player with liquidsoap, dropping the custom desktop GUI, and +custom audio player with liquidsoap, dropping the custom desktop GUI, and completely rewriting the web interface. The project has also been renamed from "Campcaster" to "Airtime" for this release. @@ -361,11 +361,11 @@ Paul Baranowski (paul.baranowski@sourcefabric.org) Role: Project Lead / Software Developer Highlights: - Integration and development of liquidsoap scheduler - - Separation of playlists from the scheduler + - Separation of playlists from the scheduler Naomi Aro (naomi.aro@sourcefabric.org) Role: Software Developer - Highlights: + Highlights: - New User Interface - Conversion to Propel DB backend @@ -397,44 +397,44 @@ generated radio station based in Basel, Switzerland powered by Campcaster. We ar very grateful for their contributions, and specifically to Thomas Gilgen, Dirk Claes, Rigzen Latshang and Fabiano Sidler. -Douglas Arellanes +Douglas Arellanes - Tester and user feedback -Robin Gareus +Robin Gareus - Packaging -Ferenc Gerlits +Ferenc Gerlits - Studio GUI -Sebastian Göbel +Sebastian Göbel - Web interface, storage server -Nebojsa Grujic +Nebojsa Grujic - Scheduler, XML-RPC interface, Gstreamer plugins -Tomáš Hlava +Tomáš Hlava - Bug fixes -Sava Tatić +Sava Tatić - Manager Version 1.3.0 - "Dakar" ----------------------- -Douglas Arellanes +Douglas Arellanes - Tester and user feedback -Ferenc Gerlits +Ferenc Gerlits - Studio GUI, scheduler, packaging -Sebastian Göbel +Sebastian Göbel - Web interface -Tomáš Hlava +Tomáš Hlava - Bug fixes -Sava Tatić +Sava Tatić - Manager @@ -442,19 +442,19 @@ Version 1.2.0 - "Kotor" ----------------------- In alphabetical order: -Douglas Arellanes +Douglas Arellanes - Tester and user feedback -Paul Baranowski +Paul Baranowski - Project manager, HTML UI, storage server -Ferenc Gerlits +Ferenc Gerlits - Studio GUI, scheduler, packaging -Tomáš Hlava +Tomáš Hlava - Bug fixes -Robert Klajn - - Superuser feedback -Mark Kretschmann +Robert Klajn + - Superuser feedback +Mark Kretschmann - Audio player -Sava Tatić +Sava Tatić - Manager @@ -462,40 +462,40 @@ Version 1.1.X - "Freetown" -------------------------- In alphabetical order: -Douglas Arellanes +Douglas Arellanes - Tester and user feedback -Paul Baranowski +Paul Baranowski - Project manager, HTML UI, storage server, scheduler -János Csikós +János Csikós - HTML UI -Ferenc Gerlits +Ferenc Gerlits - Studio GUI, scheduler, packaging -Tomáš Hlava +Tomáš Hlava - Storage server, network hub -Mark Kretschmann +Mark Kretschmann - Audio player -Ákos Maróy +Ákos Maróy - Architecture design, scheduler, audio player Sava Tatić - Manager Version 1.0 ----------- -The original Campcaster (LiveSupport) concept was drafted by Micz Flor. It was -fully developed by Robert Klajn, Douglas Arellanes, Ákos Maróy, and Sava Tatić. -The user interface has been designed by Charles Truett, based on the initial work -done by a team of his then-fellow Parsons School of Design students Turi McKinley, -Catalin Lazia and Sangita Shah. The team was led by then-head of the school's +The original Campcaster (LiveSupport) concept was drafted by Micz Flor. It was +fully developed by Robert Klajn, Douglas Arellanes, Ákos Maróy, and Sava Tatić. +The user interface has been designed by Charles Truett, based on the initial work +done by a team of his then-fellow Parsons School of Design students Turi McKinley, +Catalin Lazia and Sangita Shah. The team was led by then-head of the school's Department of Digital Design Colleen Macklin, assisted by Kunal Jain. In alphabetical order: Douglas Arellanes -Michael Aschauer -Micz Flor +Michael Aschauer +Micz Flor Ferenc Gerlits Sebastian Göbel Tomáš Hlava Nadine Kokot -Ákos Maróy +Ákos Maróy Sava Tatić Charles Truett diff --git a/changelog b/changelog index c2b3edfb9..c3c28732c 100644 --- a/changelog +++ b/changelog @@ -49,14 +49,14 @@ * Much faster library import (Silan analyzer runs in background) * Fixed zombie process sometimes being created * Other - * Upgrade to Mutagen (tag reader) 1.21 + * Upgrade to Mutagen (tag reader) 1.21 2.3.0 - Jan 21st, 2013 * New features * Localization (Chinese, Czech, English, French, German, Italian, Korean, Portuguese, Russian, Spanish) * User management page for non-admin users - * Listener statistics (Icecast/Shoutcast) + * Listener statistics (Icecast/Shoutcast) * Airtime no longer requires Apache document root * Replay Gain offset in real-time * Enable/disable replay gain @@ -113,7 +113,7 @@ * Playlist Builder should remember your position instead of resetting to the first page every time an operation was performed * If Master or Live input source is disconnected, Airtime will no longer automatically switch off that source. This should allow the source to reconnect and continue playback. - + * Bug fixes * Fixed playout engine sometimes not receiving new schedule which could result in dead air * Fixed script timeout which caused Apache to become unresponsive @@ -174,7 +174,7 @@ * Fixed Airtime could stop automatically recording after 2 hours if the web interface isn't used. * Fixed upgrading from 1.8.2 when the stor directory was a symlink would cause filenames to not be preserved. * Fixed Day View in the Now Playing tab showed some items on incorrect days. - * Fixed problems with having an equal '=' sign as an icecast password + * Fixed problems with having an equal '=' sign as an icecast password * Other * Various optimizations to make Airtime feel snappier in the browser. Various views should load much quicker. @@ -295,11 +295,11 @@ - Fixed pypo hanging if web server is unavailable - Fixed items that were being dragged and dropped in the Playlist Builder being obscured by other UI elements. - + 1.9.3 - August 26th, 2011 * Improvements - It is now possible to upgrade your system while a show is playing. - Playout will be temporarily interrupted for about 5-10 seconds and then + Playout will be temporarily interrupted for about 5-10 seconds and then playout will resume. Previously playout would not resume until the next scheduled show. * Fixes @@ -324,7 +324,7 @@ - Prevent users from doing a manual install of Airtime if they already have the Debian package version installed * Changes - - Support Settings moved to a separate page accessible by Admin user only. + - Support Settings moved to a separate page accessible by Admin user only. 1.9.0 - August 9, 2011 @@ -333,20 +333,20 @@ The cool stuff: - Human-readable file structure. The directory structure and file names on disk are now human-readable. This means you can easily find files using your file browser on your server. - - Magic file synchronization. Edits to your files are automatically + - Magic file synchronization. Edits to your files are automatically noticed by Airtime. If you edit any files on disk, such as trimming the length of a track, Airtime will automatically notice this and adjust the playlist lengths and shows for that audio file. - - Auto-import and multiple-directory support. You can set any number of - directories to be watched by Airtime. Any new files you add to watched + - Auto-import and multiple-directory support. You can set any number of + directories to be watched by Airtime. Any new files you add to watched directories will be automatically imported into Airtime, and any deleted files will be automatically removed. - - The "airtime-import" command line tool can now set watched directories + - The "airtime-import" command line tool can now set watched directories and change the storage directory. - Graceful recovery from reboot. If the playout engine starts up and detects that a show should be playing at the current time, it will skip - to the right point in the track and start playing. Previously, Airtime - would not play anything until the next show started. This also fixes a + to the right point in the track and start playing. Previously, Airtime + would not play anything until the next show started. This also fixes a problem where the metadata on the stream was lost when a file had cue-in/out values set. Thanks to the Liquidsoap developers for implementing the ability to do all of this! @@ -354,28 +354,28 @@ The cool stuff: - A new "Program Manager" role. A program manager can create shows but can't change the preferences or modify users. - No more rebooting after install! Airtime now uses standard SystemV initd - scripts instead of non-standard daemontools. This also makes for a much + scripts instead of non-standard daemontools. This also makes for a much faster install. - - Frontend widgets are much easier to use and their theme can be modified + - Frontend widgets are much easier to use and their theme can be modified with CSS (Click here for more info and installation instructions). - Improved installation - only one command to install on Ubuntu! * Improvements: - - Cumulative time shown on playlists. The Playlist Builder now shows the + - Cumulative time shown on playlists. The Playlist Builder now shows the total time since the beginning of the playlist for each song. - - "End Time" instead of "Duration". In the Add/Edit Show dialog, we + - "End Time" instead of "Duration". In the Add/Edit Show dialog, we replaced the "Duration" field with "End Time". Users reported that this was a much more intuitive way to schedule the show. Duration is still shown as a read-only field. - Feedback & promotion system. Airtime now includes a way to send feedback - and promote your site on the Sourcefabric web page. This will greatly - enhance our ability to understand who is using the software, which in - turn will allow us to make appropriate features and receive grant + and promote your site on the Sourcefabric web page. This will greatly + enhance our ability to understand who is using the software, which in + turn will allow us to make appropriate features and receive grant funding. - The show recorder can now instantly cancel a show thanks to the use of RabbitMQ. - Only admins have the ability to delete files now. - - The playout engine now runs with a higher priority. This should help + - The playout engine now runs with a higher priority. This should help prevent any problems with audio skipping. - Airtime has been contained. It is now easier to run other apps on the same system with Airtime because it no longer messes with the system-wide @@ -386,12 +386,12 @@ The cool stuff: page( above the search box). * Bug fixes: - - Fixed bug where you couldn't import a file with a name longer than 255 + - Fixed bug where you couldn't import a file with a name longer than 255 characters. - Fixed bug where searching an audio archive of 15K+ files was slow. - Fixed bug where upgrading from more than one version back (e.g. 1.8.0 -> 1.9.0) did not work. - - Fixed bug where the wrong file length was reported for very large CBR + - Fixed bug where the wrong file length was reported for very large CBR mp3 files (thanks to mutagen developers for the patch!) 1.8.2 - June 8, 2011 @@ -456,30 +456,30 @@ Highlights: 1.8.0 - April 19, 2011 * The biggest feature of this release is the ability to edit shows. You can - change everything from ‘Name’, ‘Description’, and ‘URL’, to repeat and + change everything from ‘Name’, ‘Description’, and ‘URL’, to repeat and rebroadcast days. Show instances will be dynamically created or removed as - needed. Radio stations will be pleased to know they can now have up to + needed. Radio stations will be pleased to know they can now have up to ten rebroadcast shows too. * Airtime’s calendar now looks, feels and performs better than ever. Loading - a station schedule is now five to eight times faster. In our tests of 1.7, - if the month calendar had shows scheduled for every hour of every day, it + a station schedule is now five to eight times faster. In our tests of 1.7, + if the month calendar had shows scheduled for every hour of every day, it used to take 16 seconds to load. Now in 1.8 it takes two seconds. * It is possible to have up to ten rebroadcast shows now, in 1.7 it was only up to five. - * Airtime’s new installation script has two options for increased install - flexibility: --preserve to keep your existing config files, or --overwrite - to replace your existing config files with new ones. Uninstall no longer + * Airtime’s new installation script has two options for increased install + flexibility: --preserve to keep your existing config files, or --overwrite + to replace your existing config files with new ones. Uninstall no longer removes Airtime config files or the music storage directory. - * New improved look & feel of the calendar (thanks to the "FullCalendar" + * New improved look & feel of the calendar (thanks to the "FullCalendar" jQuery project). - * Installation now puts files in standard locations in the Linux file + * Installation now puts files in standard locations in the Linux file hierarchy, which prepares the project to be accepted into Ubuntu and Debian. - Also because of our wish to be part of those projects, the default output - stream type is now OGG instead of MP3 -- due to MP3 licensing issues. + Also because of our wish to be part of those projects, the default output + stream type is now OGG instead of MP3 -- due to MP3 licensing issues. This configuration can be changed in "/etc/airtime/liquidsoap.conf". - * You now have the ability to start and stop pypo and the show recorder from - the command line with the commands "airtime-pypo-start", - "airtime-pypo-stop", "airtime-show-recorder-start", and + * You now have the ability to start and stop pypo and the show recorder from + the command line with the commands "airtime-pypo-start", + "airtime-pypo-stop", "airtime-show-recorder-start", and "airtime-show-recorder-stop". * Bug fixes: - CC-2192 Schedule sent to pypo is not sorted by start time. @@ -520,7 +520,7 @@ Highlights: * Bug fixes: - CC-2082 OGG stream dies after every song when using MPlayer - CC-1894 Warn users about time zone differences or clock drift problems on - the server + the server - CC-2058 Utilities are not in the system $PATH - CC-2051 Unable to change user password - CC-2030 Icon needed for Cue In/Out @@ -531,7 +531,7 @@ Bug fixes: * CC-1973 Liquidsoap crashes after multi-day playout * CC-1970 API key fix (Security fix) - Each time you run the install scripts, a new API key is now generated. - * CC-1992 Editing metadata goes blank on 'submit' + * CC-1992 Editing metadata goes blank on 'submit' * CC-1993 ui start time and song time unsynchronized 1.6.0 - Feb 14, 2011 diff --git a/dev_tools/README_TRANSLATIONS b/dev_tools/README_TRANSLATIONS index d5ccfdce3..71adfb32f 100644 --- a/dev_tools/README_TRANSLATIONS +++ b/dev_tools/README_TRANSLATIONS @@ -7,5 +7,5 @@ To update the Airtime translations: - Commit the updated files. - Push to GitHub. - Transifex will then pick up the updated files in about 24 hours, and they'll be available for translation there. -- After translators have updated strings, they'll be automatically downloaded and committed to our git repo by +- After translators have updated strings, they'll be automatically downloaded and committed to our git repo by a script running here at Sourcefabric (contact Andrey). diff --git a/dev_tools/auto_schedule_show.php b/dev_tools/auto_schedule_show.php index d9f47bc44..82abe7eeb 100644 --- a/dev_tools/auto_schedule_show.php +++ b/dev_tools/auto_schedule_show.php @@ -1,13 +1,13 @@ format("Y-m-d H:i:s"); @@ -79,7 +79,7 @@ function insertIntoCcShowInstances($conn, $show_id, $starts, $ends, $files){ $values = "('$starts', '$ends', $show_id, 0, 0, NULL, NULL, TIMESTAMP '$ends' - TIMESTAMP '$starts', '$now', 'f')"; $query = "INSERT INTO cc_show_instances $columns values $values "; echo $query.PHP_EOL; - + $result = query($conn, $query); $query = "SELECT currval('cc_show_instances_id_seq');"; @@ -92,7 +92,7 @@ function insertIntoCcShowInstances($conn, $show_id, $starts, $ends, $files){ while ($row = pg_fetch_array($result)) { $show_instance_id = $row["currval"]; } - + return $show_instance_id; } @@ -102,9 +102,9 @@ function insertIntoCcShowInstances($conn, $show_id, $starts, $ends, $files){ */ function insertIntoCcSchedule($conn, $files, $show_instance_id, $p_starts, $p_ends){ $columns = "(starts, ends, file_id, clip_length, fade_in, fade_out, cue_in, cue_out, media_item_played, instance_id)"; - + $starts = $p_starts; - + foreach($files as $file){ $endsDateTime = new DateTime($starts, new DateTimeZone("UTC")); @@ -115,9 +115,9 @@ function insertIntoCcSchedule($conn, $files, $show_instance_id, $p_starts, $p_en $values = "('$starts', '$ends', $file[id], '$file[length]', '00:00:00', '00:00:00', '00:00:00', '$file[length]', 'f', $show_instance_id)"; $query = "INSERT INTO cc_schedule $columns VALUES $values"; echo $query.PHP_EOL; - + $starts = $ends; - $result = query($conn, $query); + $result = query($conn, $query); } } @@ -131,7 +131,7 @@ function getEndTime($startDateTime, $p_files){ foreach ($p_files as $file){ $startDateTime->add(getDateInterval($file['length'])); } - + return $startDateTime; } @@ -142,7 +142,7 @@ function rabbitMqNotify(){ echo "Contacting $url".PHP_EOL; $ch = curl_init($url); curl_exec($ch); - curl_close($ch); + curl_close($ch); } $conn = pg_connect("host=localhost port=5432 dbname=airtime user=airtime password=airtime"); @@ -152,9 +152,9 @@ if (!$conn) { } if (count($argv) > 1){ - if ($argv[1] == "--clean"){ + if ($argv[1] == "--clean"){ $tables = array("cc_schedule", "cc_show_instances", "cc_show"); - + foreach($tables as $table){ $query = "DELETE FROM $table"; echo $query.PHP_EOL; @@ -162,9 +162,9 @@ if (count($argv) > 1){ } rabbitMqNotify(); exit(0); - } else { + } else { $str = <<format("Y-m-d H:i:s"); //$ends = $endDateTime->format("Y-m-d H:i:s"); -$files = getFileFromCcFiles($conn); +$files = getFileFromCcFiles($conn); $show_id = insertIntoCcShow($conn); $endDateTime = getEndTime(clone $startDateTime, $files); diff --git a/dev_tools/compare_cc_files_to_fs.py b/dev_tools/compare_cc_files_to_fs.py index 5b83d27df..993778769 100644 --- a/dev_tools/compare_cc_files_to_fs.py +++ b/dev_tools/compare_cc_files_to_fs.py @@ -30,7 +30,7 @@ class AirtimeMediaMonitorBootstrap: config = ConfigObj("/etc/airtime/airtime.conf") self.api_client = apc.api_client_factory(config) - """ + """ try: logging.config.fileConfig("logging.cfg") except Exception, e: diff --git a/dev_tools/liquidsoap_compile/liquidsoap-compile.sh b/dev_tools/liquidsoap_compile/liquidsoap-compile.sh index 1f4a7d84a..67b394352 100755 --- a/dev_tools/liquidsoap_compile/liquidsoap-compile.sh +++ b/dev_tools/liquidsoap_compile/liquidsoap-compile.sh @@ -90,7 +90,7 @@ fi rm -rf liquidsoap-full git clone https://github.com/savonet/liquidsoap-full cd liquidsoap-full -git checkout master +git checkout master make init make update diff --git a/dev_tools/liquidsoap_compile/run.sh b/dev_tools/liquidsoap_compile/run.sh index 2e20ff22a..bf7cf4b98 100755 --- a/dev_tools/liquidsoap_compile/run.sh +++ b/dev_tools/liquidsoap_compile/run.sh @@ -26,7 +26,7 @@ build_env () { echo "Please use -u to assign sudo username before build environments." exit 1 fi - + echo "build_env $1" #exec > >(tee ./liquidsoap_compile_logs/build_env_$1.log) os=`echo $1 | awk '/(debian)/'` @@ -40,7 +40,7 @@ build_env () { useradd tmp echo "User tmp is created." fi - + apt-get update apt-get --force-yes -y install debootstrap dchroot echo [$1] > /etc/schroot/chroot.d/$1.conf @@ -87,7 +87,7 @@ compile_liq () { else mv ./liquidsoap-compile_logs/compile_liq_$1.log ./liquidsoap-compile_logs/fail_to_compile_liq_$1.log fi -} +} os_versions=("ubuntu_lucid_32" "ubuntu_lucid_64" "ubuntu_precise_32" "ubuntu_precise_64" "ubuntu_quantal_32" "ubuntu_quantal_64" "ubuntu_raring_32" "ubuntu_raring_64" "debian_squeeze_32" "debian_squeeze_64" "debian_wheezy_32" "debian_wheezy_64") @@ -147,7 +147,7 @@ do compile_liq ${os_versions[$i]} | tee ./liquidsoap-compile_logs/compile_liq_${os_versions[$i]}.log flag=0 fi - done + done if [ $flag = 1 ];then echo "Unsupported Platform from:" for k in "${os_versions[@]}" diff --git a/dev_tools/release/release.sh b/dev_tools/release/release.sh index 975145dd3..1f25f9147 100755 --- a/dev_tools/release/release.sh +++ b/dev_tools/release/release.sh @@ -74,7 +74,7 @@ tar -czf $target_file \ --exclude dev_tools \ --exclude vendor/phing \ --exclude vendor/simplepie/simplepie/tests \ - libretime-${suffix} + libretime-${suffix} echo " Done" popd diff --git a/dev_tools/scripts/git-config-git-merge-po b/dev_tools/scripts/git-config-git-merge-po index 6fe011f1b..a91926ce8 100644 --- a/dev_tools/scripts/git-config-git-merge-po +++ b/dev_tools/scripts/git-config-git-merge-po @@ -1,5 +1,5 @@ [merge "pofile"] name = Gettext merge driver driver = git merge-po %O %A %B - + diff --git a/dev_tools/scripts/git-merge-po b/dev_tools/scripts/git-merge-po index 820904236..e717fc75d 100644 --- a/dev_tools/scripts/git-merge-po +++ b/dev_tools/scripts/git-merge-po @@ -1,6 +1,6 @@ #!/bin/sh # -# https://gist.github.com/mezis/1605647 +# https://gist.github.com/mezis/1605647 # by Julien Letessier (mezis) # # Custom Git merge driver - merges PO files using msgcat(1) @@ -8,15 +8,15 @@ # - Install gettext # # - Place this script in your PATH -# +# # - Add this to your .git/config : # # [merge "pofile"] # name = Gettext merge driver # driver = git merge-po %O %A %B -# +# # - Add this to .gitattributes : -# +# # *.po merge=pofile # *.pot merge=pofile # diff --git a/dev_tools/toggle-pypo-debug.sh b/dev_tools/toggle-pypo-debug.sh index 6468066a1..b10c6b47e 100755 --- a/dev_tools/toggle-pypo-debug.sh +++ b/dev_tools/toggle-pypo-debug.sh @@ -5,7 +5,7 @@ if [[ $EUID -ne 0 ]]; then fi usage () { - echo "Use --enable or --disable flag. Enable is to set up environment" + echo "Use --enable or --disable flag. Enable is to set up environment" echo "for specified user. --disable is to reset it back to pypo user" } @@ -28,7 +28,7 @@ elif [ "$1" = "--disable" ]; then echo "Changing ownership to user $1" chmod 644 /etc/airtime/airtime.conf chown -Rv $user:$user /var/tmp/airtime/pypo/ - chmod -v a+r /etc/airtime/api_client.cfg + chmod -v a+r /etc/airtime/api_client.cfg /etc/init.d/airtime-playout stop-liquidsoap diff --git a/docs/_includes/head.html b/docs/_includes/head.html index 4c48eeda6..188aeae12 100644 --- a/docs/_includes/head.html +++ b/docs/_includes/head.html @@ -4,13 +4,13 @@ - + - + - + {{ site.title }} - {{ page.title }} \ No newline at end of file diff --git a/docs/_includes/navbar.html b/docs/_includes/navbar.html index 4a7ea4c1b..cc568c395 100644 --- a/docs/_includes/navbar.html +++ b/docs/_includes/navbar.html @@ -1,6 +1,6 @@