Format code using black

This commit is contained in:
jo 2021-05-27 16:23:02 +02:00
parent efe4fa027e
commit c27f020d73
85 changed files with 3238 additions and 2243 deletions

View File

@ -2,7 +2,8 @@
from django.apps import AppConfig from django.apps import AppConfig
from django.db.models.signals import pre_save from django.db.models.signals import pre_save
class LibreTimeAPIConfig(AppConfig): class LibreTimeAPIConfig(AppConfig):
name = 'libretimeapi' name = "libretimeapi"
verbose_name = 'LibreTime API' verbose_name = "LibreTime API"
default_auto_field = 'django.db.models.AutoField' default_auto_field = "django.db.models.AutoField"

View File

@ -1,21 +1,23 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from django.contrib.auth.models import BaseUserManager from django.contrib.auth.models import BaseUserManager
class UserManager(BaseUserManager): class UserManager(BaseUserManager):
def create_user(self, username, type, email, first_name, last_name, password): def create_user(self, username, type, email, first_name, last_name, password):
user = self.model(username=username, user = self.model(
type=type, username=username,
email=email, type=type,
first_name=first_name, email=email,
last_name=last_name) first_name=first_name,
last_name=last_name,
)
user.set_password(password) user.set_password(password)
user.save(using=self._db) user.save(using=self._db)
return user return user
def create_superuser(self, username, email, first_name, last_name, password): def create_superuser(self, username, email, first_name, last_name, password):
user = self.create_user(username, 'A', email, first_name, last_name, password) user = self.create_user(username, "A", email, first_name, last_name, password)
return user return user
def get_by_natural_key(self, username): def get_by_natural_key(self, username):
return self.get(username=username) return self.get(username=username)

View File

@ -15,18 +15,20 @@ class LoginAttempt(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_login_attempts' db_table = "cc_login_attempts"
class Session(models.Model): class Session(models.Model):
sessid = models.CharField(primary_key=True, max_length=32) sessid = models.CharField(primary_key=True, max_length=32)
userid = models.ForeignKey('User', models.DO_NOTHING, db_column='userid', blank=True, null=True) userid = models.ForeignKey(
"User", models.DO_NOTHING, db_column="userid", blank=True, null=True
)
login = models.CharField(max_length=255, blank=True, null=True) login = models.CharField(max_length=255, blank=True, null=True)
ts = models.DateTimeField(blank=True, null=True) ts = models.DateTimeField(blank=True, null=True)
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_sess' db_table = "cc_sess"
USER_TYPE_CHOICES = () USER_TYPE_CHOICES = ()
@ -35,12 +37,14 @@ for item in USER_TYPES.items():
class User(AbstractBaseUser): class User(AbstractBaseUser):
username = models.CharField(db_column='login', unique=True, max_length=255) username = models.CharField(db_column="login", unique=True, max_length=255)
password = models.CharField(db_column='pass', max_length=255) # Field renamed because it was a Python reserved word. password = models.CharField(
db_column="pass", max_length=255
) # Field renamed because it was a Python reserved word.
type = models.CharField(max_length=1, choices=USER_TYPE_CHOICES) type = models.CharField(max_length=1, choices=USER_TYPE_CHOICES)
first_name = models.CharField(max_length=255) first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255) last_name = models.CharField(max_length=255)
last_login = models.DateTimeField(db_column='lastlogin', blank=True, null=True) last_login = models.DateTimeField(db_column="lastlogin", blank=True, null=True)
lastfail = models.DateTimeField(blank=True, null=True) lastfail = models.DateTimeField(blank=True, null=True)
skype_contact = models.CharField(max_length=1024, blank=True, null=True) skype_contact = models.CharField(max_length=1024, blank=True, null=True)
jabber_contact = models.CharField(max_length=1024, blank=True, null=True) jabber_contact = models.CharField(max_length=1024, blank=True, null=True)
@ -48,13 +52,13 @@ class User(AbstractBaseUser):
cell_phone = models.CharField(max_length=1024, blank=True, null=True) cell_phone = models.CharField(max_length=1024, blank=True, null=True)
login_attempts = models.IntegerField(blank=True, null=True) login_attempts = models.IntegerField(blank=True, null=True)
USERNAME_FIELD = 'username' USERNAME_FIELD = "username"
EMAIL_FIELD = 'email' EMAIL_FIELD = "email"
REQUIRED_FIELDS = ['type', 'email', 'first_name', 'last_name'] REQUIRED_FIELDS = ["type", "email", "first_name", "last_name"]
objects = UserManager() objects = UserManager()
def get_full_name(self): def get_full_name(self):
return '{} {}'.format(self.first_name, self.last_name) return "{} {}".format(self.first_name, self.last_name)
def get_short_name(self): def get_short_name(self):
return self.first_name return self.first_name
@ -66,7 +70,7 @@ class User(AbstractBaseUser):
self.password = hashlib.md5(password.encode()).hexdigest() self.password = hashlib.md5(password.encode()).hexdigest()
def is_staff(self): def is_staff(self):
print('is_staff') print("is_staff")
return self.type == ADMIN return self.type == ADMIN
def check_password(self, password): def check_password(self, password):
@ -82,6 +86,7 @@ class User(AbstractBaseUser):
(managed = True), then this can be replaced with (managed = True), then this can be replaced with
django.contrib.auth.models.PermissionMixin. django.contrib.auth.models.PermissionMixin.
""" """
def is_superuser(self): def is_superuser(self):
return self.type == ADMIN return self.type == ADMIN
@ -125,7 +130,7 @@ class User(AbstractBaseUser):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_subjs' db_table = "cc_subjs"
class UserToken(models.Model): class UserToken(models.Model):
@ -139,4 +144,4 @@ class UserToken(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_subjs_token' db_table = "cc_subjs_token"

View File

@ -4,11 +4,13 @@ from django.db import models
class CeleryTask(models.Model): class CeleryTask(models.Model):
task_id = models.CharField(max_length=256) task_id = models.CharField(max_length=256)
track_reference = models.ForeignKey('ThirdPartyTrackReference', models.DO_NOTHING, db_column='track_reference') track_reference = models.ForeignKey(
"ThirdPartyTrackReference", models.DO_NOTHING, db_column="track_reference"
)
name = models.CharField(max_length=256, blank=True, null=True) name = models.CharField(max_length=256, blank=True, null=True)
dispatch_time = models.DateTimeField(blank=True, null=True) dispatch_time = models.DateTimeField(blank=True, null=True)
status = models.CharField(max_length=256) status = models.CharField(max_length=256)
class Meta: class Meta:
managed = False managed = False
db_table = 'celery_tasks' db_table = "celery_tasks"

View File

@ -8,5 +8,4 @@ class Country(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_country' db_table = "cc_country"

View File

@ -6,11 +6,20 @@ class File(models.Model):
name = models.CharField(max_length=255) name = models.CharField(max_length=255)
mime = models.CharField(max_length=255) mime = models.CharField(max_length=255)
ftype = models.CharField(max_length=128) ftype = models.CharField(max_length=128)
directory = models.ForeignKey('MusicDir', models.DO_NOTHING, db_column='directory', blank=True, null=True) directory = models.ForeignKey(
"MusicDir", models.DO_NOTHING, db_column="directory", blank=True, null=True
)
filepath = models.TextField(blank=True, null=True) filepath = models.TextField(blank=True, null=True)
import_status = models.IntegerField() import_status = models.IntegerField()
currently_accessing = models.IntegerField(db_column='currentlyaccessing') currently_accessing = models.IntegerField(db_column="currentlyaccessing")
edited_by = models.ForeignKey('User', models.DO_NOTHING, db_column='editedby', blank=True, null=True, related_name='edited_files') edited_by = models.ForeignKey(
"User",
models.DO_NOTHING,
db_column="editedby",
blank=True,
null=True,
related_name="edited_files",
)
mtime = models.DateTimeField(blank=True, null=True) mtime = models.DateTimeField(blank=True, null=True)
utime = models.DateTimeField(blank=True, null=True) utime = models.DateTimeField(blank=True, null=True)
lptime = models.DateTimeField(blank=True, null=True) lptime = models.DateTimeField(blank=True, null=True)
@ -59,8 +68,10 @@ class File(models.Model):
contributor = models.CharField(max_length=512, blank=True, null=True) contributor = models.CharField(max_length=512, blank=True, null=True)
language = models.CharField(max_length=512, blank=True, null=True) language = models.CharField(max_length=512, blank=True, null=True)
file_exists = models.BooleanField(blank=True, null=True) file_exists = models.BooleanField(blank=True, null=True)
replay_gain = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True) replay_gain = models.DecimalField(
owner = models.ForeignKey('User', models.DO_NOTHING, blank=True, null=True) max_digits=8, decimal_places=2, blank=True, null=True
)
owner = models.ForeignKey("User", models.DO_NOTHING, blank=True, null=True)
cuein = models.DurationField(blank=True, null=True) cuein = models.DurationField(blank=True, null=True)
cueout = models.DurationField(blank=True, null=True) cueout = models.DurationField(blank=True, null=True)
silan_check = models.BooleanField(blank=True, null=True) silan_check = models.BooleanField(blank=True, null=True)
@ -77,10 +88,10 @@ class File(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_files' db_table = "cc_files"
permissions = [ permissions = [
('change_own_file', 'Change the files where they are the owner'), ("change_own_file", "Change the files where they are the owner"),
('delete_own_file', 'Delete the files where they are the owner'), ("delete_own_file", "Delete the files where they are the owner"),
] ]
@ -92,15 +103,16 @@ class MusicDir(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_music_dirs' db_table = "cc_music_dirs"
class CloudFile(models.Model): class CloudFile(models.Model):
storage_backend = models.CharField(max_length=512) storage_backend = models.CharField(max_length=512)
resource_id = models.TextField() resource_id = models.TextField()
filename = models.ForeignKey(File, models.DO_NOTHING, blank=True, null=True, filename = models.ForeignKey(
db_column='cc_file_id') File, models.DO_NOTHING, blank=True, null=True, db_column="cc_file_id"
)
class Meta: class Meta:
managed = False managed = False
db_table = 'cloud_file' db_table = "cloud_file"

View File

@ -8,7 +8,7 @@ class Playlist(models.Model):
name = models.CharField(max_length=255) name = models.CharField(max_length=255)
mtime = models.DateTimeField(blank=True, null=True) mtime = models.DateTimeField(blank=True, null=True)
utime = models.DateTimeField(blank=True, null=True) utime = models.DateTimeField(blank=True, null=True)
creator = models.ForeignKey('User', models.DO_NOTHING, blank=True, null=True) creator = models.ForeignKey("User", models.DO_NOTHING, blank=True, null=True)
description = models.CharField(max_length=512, blank=True, null=True) description = models.CharField(max_length=512, blank=True, null=True)
length = models.DurationField(blank=True, null=True) length = models.DurationField(blank=True, null=True)
@ -17,7 +17,7 @@ class Playlist(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_playlist' db_table = "cc_playlist"
class PlaylistContent(models.Model): class PlaylistContent(models.Model):
@ -39,4 +39,4 @@ class PlaylistContent(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_playlistcontents' db_table = "cc_playlistcontents"

View File

@ -4,13 +4,13 @@ from .files import File
class ListenerCount(models.Model): class ListenerCount(models.Model):
timestamp = models.ForeignKey('Timestamp', models.DO_NOTHING) timestamp = models.ForeignKey("Timestamp", models.DO_NOTHING)
mount_name = models.ForeignKey('MountName', models.DO_NOTHING) mount_name = models.ForeignKey("MountName", models.DO_NOTHING)
listener_count = models.IntegerField() listener_count = models.IntegerField()
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_listener_count' db_table = "cc_listener_count"
class LiveLog(models.Model): class LiveLog(models.Model):
@ -20,18 +20,20 @@ class LiveLog(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_live_log' db_table = "cc_live_log"
class PlayoutHistory(models.Model): class PlayoutHistory(models.Model):
file = models.ForeignKey(File, models.DO_NOTHING, blank=True, null=True) file = models.ForeignKey(File, models.DO_NOTHING, blank=True, null=True)
starts = models.DateTimeField() starts = models.DateTimeField()
ends = models.DateTimeField(blank=True, null=True) ends = models.DateTimeField(blank=True, null=True)
instance = models.ForeignKey('ShowInstance', models.DO_NOTHING, blank=True, null=True) instance = models.ForeignKey(
"ShowInstance", models.DO_NOTHING, blank=True, null=True
)
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_playout_history' db_table = "cc_playout_history"
class PlayoutHistoryMetadata(models.Model): class PlayoutHistoryMetadata(models.Model):
@ -41,7 +43,7 @@ class PlayoutHistoryMetadata(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_playout_history_metadata' db_table = "cc_playout_history_metadata"
class PlayoutHistoryTemplate(models.Model): class PlayoutHistoryTemplate(models.Model):
@ -50,7 +52,7 @@ class PlayoutHistoryTemplate(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_playout_history_template' db_table = "cc_playout_history_template"
class PlayoutHistoryTemplateField(models.Model): class PlayoutHistoryTemplateField(models.Model):
@ -63,7 +65,7 @@ class PlayoutHistoryTemplateField(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_playout_history_template_field' db_table = "cc_playout_history_template_field"
class Timestamp(models.Model): class Timestamp(models.Model):
@ -71,4 +73,4 @@ class Timestamp(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_timestamp' db_table = "cc_timestamp"

View File

@ -8,14 +8,14 @@ class ImportedPodcast(models.Model):
auto_ingest = models.BooleanField() auto_ingest = models.BooleanField()
auto_ingest_timestamp = models.DateTimeField(blank=True, null=True) auto_ingest_timestamp = models.DateTimeField(blank=True, null=True)
album_override = models.BooleanField() album_override = models.BooleanField()
podcast = models.ForeignKey('Podcast', models.DO_NOTHING) podcast = models.ForeignKey("Podcast", models.DO_NOTHING)
def get_owner(self): def get_owner(self):
return self.podcast.owner return self.podcast.owner
class Meta: class Meta:
managed = False managed = False
db_table = 'imported_podcast' db_table = "imported_podcast"
class Podcast(models.Model): class Podcast(models.Model):
@ -32,17 +32,19 @@ class Podcast(models.Model):
itunes_subtitle = models.CharField(max_length=4096, blank=True, null=True) itunes_subtitle = models.CharField(max_length=4096, blank=True, null=True)
itunes_category = models.CharField(max_length=4096, blank=True, null=True) itunes_category = models.CharField(max_length=4096, blank=True, null=True)
itunes_explicit = models.CharField(max_length=4096, blank=True, null=True) itunes_explicit = models.CharField(max_length=4096, blank=True, null=True)
owner = models.ForeignKey(User, models.DO_NOTHING, db_column='owner', blank=True, null=True) owner = models.ForeignKey(
User, models.DO_NOTHING, db_column="owner", blank=True, null=True
)
def get_owner(self): def get_owner(self):
return self.owner return self.owner
class Meta: class Meta:
managed = False managed = False
db_table = 'podcast' db_table = "podcast"
permissions = [ permissions = [
('change_own_podcast', 'Change the podcasts where they are the owner'), ("change_own_podcast", "Change the podcasts where they are the owner"),
('delete_own_podcast', 'Delete the podcasts where they are the owner'), ("delete_own_podcast", "Delete the podcasts where they are the owner"),
] ]
@ -60,10 +62,16 @@ class PodcastEpisode(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'podcast_episodes' db_table = "podcast_episodes"
permissions = [ permissions = [
('change_own_podcastepisode', 'Change the episodes of podcasts where they are the owner'), (
('delete_own_podcastepisode', 'Delete the episodes of podcasts where they are the owner'), "change_own_podcastepisode",
"Change the episodes of podcasts where they are the owner",
),
(
"delete_own_podcastepisode",
"Delete the episodes of podcasts where they are the owner",
),
] ]
@ -75,4 +83,4 @@ class StationPodcast(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'station_podcast' db_table = "station_podcast"

View File

@ -3,14 +3,16 @@ from django.db import models
class Preference(models.Model): class Preference(models.Model):
subjid = models.ForeignKey('User', models.DO_NOTHING, db_column='subjid', blank=True, null=True) subjid = models.ForeignKey(
"User", models.DO_NOTHING, db_column="subjid", blank=True, null=True
)
keystr = models.CharField(unique=True, max_length=255, blank=True, null=True) keystr = models.CharField(unique=True, max_length=255, blank=True, null=True)
valstr = models.TextField(blank=True, null=True) valstr = models.TextField(blank=True, null=True)
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_pref' db_table = "cc_pref"
unique_together = (('subjid', 'keystr'),) unique_together = (("subjid", "keystr"),)
class MountName(models.Model): class MountName(models.Model):
@ -18,7 +20,7 @@ class MountName(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_mount_name' db_table = "cc_mount_name"
class StreamSetting(models.Model): class StreamSetting(models.Model):
@ -28,4 +30,4 @@ class StreamSetting(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_stream_setting' db_table = "cc_stream_setting"

View File

@ -7,14 +7,14 @@ class Schedule(models.Model):
starts = models.DateTimeField() starts = models.DateTimeField()
ends = models.DateTimeField() ends = models.DateTimeField()
file = models.ForeignKey(File, models.DO_NOTHING, blank=True, null=True) file = models.ForeignKey(File, models.DO_NOTHING, blank=True, null=True)
stream = models.ForeignKey('Webstream', models.DO_NOTHING, blank=True, null=True) stream = models.ForeignKey("Webstream", models.DO_NOTHING, blank=True, null=True)
clip_length = models.DurationField(blank=True, null=True) clip_length = models.DurationField(blank=True, null=True)
fade_in = models.TimeField(blank=True, null=True) fade_in = models.TimeField(blank=True, null=True)
fade_out = models.TimeField(blank=True, null=True) fade_out = models.TimeField(blank=True, null=True)
cue_in = models.DurationField() cue_in = models.DurationField()
cue_out = models.DurationField() cue_out = models.DurationField()
media_item_played = models.BooleanField(blank=True, null=True) media_item_played = models.BooleanField(blank=True, null=True)
instance = models.ForeignKey('ShowInstance', models.DO_NOTHING) instance = models.ForeignKey("ShowInstance", models.DO_NOTHING)
playout_status = models.SmallIntegerField() playout_status = models.SmallIntegerField()
broadcasted = models.SmallIntegerField() broadcasted = models.SmallIntegerField()
position = models.IntegerField() position = models.IntegerField()
@ -24,8 +24,8 @@ class Schedule(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_schedule' db_table = "cc_schedule"
permissions = [ permissions = [
('change_own_schedule', 'Change the content on their shows'), ("change_own_schedule", "Change the content on their shows"),
('delete_own_schedule', 'Delete the content on their shows'), ("delete_own_schedule", "Delete the content on their shows"),
] ]

View File

@ -8,5 +8,4 @@ class ServiceRegister(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_service_register' db_table = "cc_service_register"

View File

@ -27,7 +27,7 @@ class Show(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_show' db_table = "cc_show"
class ShowDays(models.Model): class ShowDays(models.Model):
@ -47,16 +47,16 @@ class ShowDays(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_show_days' db_table = "cc_show_days"
class ShowHost(models.Model): class ShowHost(models.Model):
show = models.ForeignKey(Show, models.DO_NOTHING) show = models.ForeignKey(Show, models.DO_NOTHING)
subjs = models.ForeignKey('User', models.DO_NOTHING) subjs = models.ForeignKey("User", models.DO_NOTHING)
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_show_hosts' db_table = "cc_show_hosts"
class ShowInstance(models.Model): class ShowInstance(models.Model):
@ -66,7 +66,7 @@ class ShowInstance(models.Model):
show = models.ForeignKey(Show, models.DO_NOTHING) show = models.ForeignKey(Show, models.DO_NOTHING)
record = models.SmallIntegerField(blank=True, null=True) record = models.SmallIntegerField(blank=True, null=True)
rebroadcast = models.SmallIntegerField(blank=True, null=True) rebroadcast = models.SmallIntegerField(blank=True, null=True)
instance = models.ForeignKey('self', models.DO_NOTHING, blank=True, null=True) instance = models.ForeignKey("self", models.DO_NOTHING, blank=True, null=True)
file = models.ForeignKey(File, models.DO_NOTHING, blank=True, null=True) file = models.ForeignKey(File, models.DO_NOTHING, blank=True, null=True)
time_filled = models.DurationField(blank=True, null=True) time_filled = models.DurationField(blank=True, null=True)
created = models.DateTimeField() created = models.DateTimeField()
@ -79,7 +79,7 @@ class ShowInstance(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_show_instances' db_table = "cc_show_instances"
class ShowRebroadcast(models.Model): class ShowRebroadcast(models.Model):
@ -92,4 +92,4 @@ class ShowRebroadcast(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_show_rebroadcast' db_table = "cc_show_rebroadcast"

View File

@ -6,7 +6,7 @@ class SmartBlock(models.Model):
name = models.CharField(max_length=255) name = models.CharField(max_length=255)
mtime = models.DateTimeField(blank=True, null=True) mtime = models.DateTimeField(blank=True, null=True)
utime = models.DateTimeField(blank=True, null=True) utime = models.DateTimeField(blank=True, null=True)
creator = models.ForeignKey('User', models.DO_NOTHING, blank=True, null=True) creator = models.ForeignKey("User", models.DO_NOTHING, blank=True, null=True)
description = models.CharField(max_length=512, blank=True, null=True) description = models.CharField(max_length=512, blank=True, null=True)
length = models.DurationField(blank=True, null=True) length = models.DurationField(blank=True, null=True)
type = models.CharField(max_length=7, blank=True, null=True) type = models.CharField(max_length=7, blank=True, null=True)
@ -16,16 +16,22 @@ class SmartBlock(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_block' db_table = "cc_block"
permissions = [ permissions = [
('change_own_smartblock', 'Change the smartblocks where they are the owner'), (
('delete_own_smartblock', 'Delete the smartblocks where they are the owner'), "change_own_smartblock",
"Change the smartblocks where they are the owner",
),
(
"delete_own_smartblock",
"Delete the smartblocks where they are the owner",
),
] ]
class SmartBlockContent(models.Model): class SmartBlockContent(models.Model):
block = models.ForeignKey(SmartBlock, models.DO_NOTHING, blank=True, null=True) block = models.ForeignKey(SmartBlock, models.DO_NOTHING, blank=True, null=True)
file = models.ForeignKey('File', models.DO_NOTHING, blank=True, null=True) file = models.ForeignKey("File", models.DO_NOTHING, blank=True, null=True)
position = models.IntegerField(blank=True, null=True) position = models.IntegerField(blank=True, null=True)
trackoffset = models.FloatField() trackoffset = models.FloatField()
cliplength = models.DurationField(blank=True, null=True) cliplength = models.DurationField(blank=True, null=True)
@ -39,10 +45,16 @@ class SmartBlockContent(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_blockcontents' db_table = "cc_blockcontents"
permissions = [ permissions = [
('change_own_smartblockcontent', 'Change the content of smartblocks where they are the owner'), (
('delete_own_smartblockcontent', 'Delete the content of smartblocks where they are the owner'), "change_own_smartblockcontent",
"Change the content of smartblocks where they are the owner",
),
(
"delete_own_smartblockcontent",
"Delete the content of smartblocks where they are the owner",
),
] ]
@ -59,9 +71,14 @@ class SmartBlockCriteria(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_blockcriteria' db_table = "cc_blockcriteria"
permissions = [ permissions = [
('change_own_smartblockcriteria', 'Change the criteria of smartblocks where they are the owner'), (
('delete_own_smartblockcriteria', 'Delete the criteria of smartblocks where they are the owner'), "change_own_smartblockcriteria",
"Change the criteria of smartblocks where they are the owner",
),
(
"delete_own_smartblockcriteria",
"Delete the criteria of smartblocks where they are the owner",
),
] ]

View File

@ -12,7 +12,8 @@ class ThirdPartyTrackReference(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'third_party_track_references' db_table = "third_party_track_references"
class TrackType(models.Model): class TrackType(models.Model):
code = models.CharField(max_length=16, unique=True) code = models.CharField(max_length=16, unique=True)
@ -22,5 +23,4 @@ class TrackType(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_track_types' db_table = "cc_track_types"

View File

@ -1,12 +1,12 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
GUEST = 'G' GUEST = "G"
DJ = 'H' DJ = "H"
PROGRAM_MANAGER = 'P' PROGRAM_MANAGER = "P"
ADMIN = 'A' ADMIN = "A"
USER_TYPES = { USER_TYPES = {
GUEST: 'Guest', GUEST: "Guest",
DJ: 'DJ', DJ: "DJ",
PROGRAM_MANAGER: 'Program Manager', PROGRAM_MANAGER: "Program Manager",
ADMIN: 'Admin', ADMIN: "Admin",
} }

View File

@ -21,10 +21,10 @@ class Webstream(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_webstream' db_table = "cc_webstream"
permissions = [ permissions = [
('change_own_webstream', 'Change the webstreams where they are the owner'), ("change_own_webstream", "Change the webstreams where they are the owner"),
('delete_own_webstream', 'Delete the webstreams where they are the owner'), ("delete_own_webstream", "Delete the webstreams where they are the owner"),
] ]
@ -38,4 +38,4 @@ class WebstreamMetadata(models.Model):
class Meta: class Meta:
managed = False managed = False
db_table = 'cc_webstream_metadata' db_table = "cc_webstream_metadata"

View File

@ -5,98 +5,101 @@ from .models.user_constants import GUEST, DJ, PROGRAM_MANAGER, USER_TYPES
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
GUEST_PERMISSIONS = ['view_schedule', GUEST_PERMISSIONS = [
'view_show', "view_schedule",
'view_showdays', "view_show",
'view_showhost', "view_showdays",
'view_showinstance', "view_showhost",
'view_showrebroadcast', "view_showinstance",
'view_file', "view_showrebroadcast",
'view_podcast', "view_file",
'view_podcastepisode', "view_podcast",
'view_playlist', "view_podcastepisode",
'view_playlistcontent', "view_playlist",
'view_smartblock', "view_playlistcontent",
'view_smartblockcontent', "view_smartblock",
'view_smartblockcriteria', "view_smartblockcontent",
'view_webstream', "view_smartblockcriteria",
'view_apiroot', "view_webstream",
] "view_apiroot",
DJ_PERMISSIONS = GUEST_PERMISSIONS + ['add_file', ]
'add_podcast', DJ_PERMISSIONS = GUEST_PERMISSIONS + [
'add_podcastepisode', "add_file",
'add_playlist', "add_podcast",
'add_playlistcontent', "add_podcastepisode",
'add_smartblock', "add_playlist",
'add_smartblockcontent', "add_playlistcontent",
'add_smartblockcriteria', "add_smartblock",
'add_webstream', "add_smartblockcontent",
'change_own_schedule', "add_smartblockcriteria",
'change_own_file', "add_webstream",
'change_own_podcast', "change_own_schedule",
'change_own_podcastepisode', "change_own_file",
'change_own_playlist', "change_own_podcast",
'change_own_playlistcontent', "change_own_podcastepisode",
'change_own_smartblock', "change_own_playlist",
'change_own_smartblockcontent', "change_own_playlistcontent",
'change_own_smartblockcriteria', "change_own_smartblock",
'change_own_webstream', "change_own_smartblockcontent",
'delete_own_schedule', "change_own_smartblockcriteria",
'delete_own_file', "change_own_webstream",
'delete_own_podcast', "delete_own_schedule",
'delete_own_podcastepisode', "delete_own_file",
'delete_own_playlist', "delete_own_podcast",
'delete_own_playlistcontent', "delete_own_podcastepisode",
'delete_own_smartblock', "delete_own_playlist",
'delete_own_smartblockcontent', "delete_own_playlistcontent",
'delete_own_smartblockcriteria', "delete_own_smartblock",
'delete_own_webstream', "delete_own_smartblockcontent",
] "delete_own_smartblockcriteria",
PROGRAM_MANAGER_PERMISSIONS = GUEST_PERMISSIONS + ['add_show', "delete_own_webstream",
'add_showdays', ]
'add_showhost', PROGRAM_MANAGER_PERMISSIONS = GUEST_PERMISSIONS + [
'add_showinstance', "add_show",
'add_showrebroadcast', "add_showdays",
'add_file', "add_showhost",
'add_podcast', "add_showinstance",
'add_podcastepisode', "add_showrebroadcast",
'add_playlist', "add_file",
'add_playlistcontent', "add_podcast",
'add_smartblock', "add_podcastepisode",
'add_smartblockcontent', "add_playlist",
'add_smartblockcriteria', "add_playlistcontent",
'add_webstream', "add_smartblock",
'change_schedule', "add_smartblockcontent",
'change_show', "add_smartblockcriteria",
'change_showdays', "add_webstream",
'change_showhost', "change_schedule",
'change_showinstance', "change_show",
'change_showrebroadcast', "change_showdays",
'change_file', "change_showhost",
'change_podcast', "change_showinstance",
'change_podcastepisode', "change_showrebroadcast",
'change_playlist', "change_file",
'change_playlistcontent', "change_podcast",
'change_smartblock', "change_podcastepisode",
'change_smartblockcontent', "change_playlist",
'change_smartblockcriteria', "change_playlistcontent",
'change_webstream', "change_smartblock",
'delete_schedule', "change_smartblockcontent",
'delete_show', "change_smartblockcriteria",
'delete_showdays', "change_webstream",
'delete_showhost', "delete_schedule",
'delete_showinstance', "delete_show",
'delete_showrebroadcast', "delete_showdays",
'delete_file', "delete_showhost",
'delete_podcast', "delete_showinstance",
'delete_podcastepisode', "delete_showrebroadcast",
'delete_playlist', "delete_file",
'delete_playlistcontent', "delete_podcast",
'delete_smartblock', "delete_podcastepisode",
'delete_smartblockcontent', "delete_playlist",
'delete_smartblockcriteria', "delete_playlistcontent",
'delete_webstream', "delete_smartblock",
] "delete_smartblockcontent",
"delete_smartblockcriteria",
"delete_webstream",
]
GROUPS = { GROUPS = {
GUEST: GUEST_PERMISSIONS, GUEST: GUEST_PERMISSIONS,

View File

@ -4,21 +4,22 @@ from django.conf import settings
from .models.user_constants import DJ from .models.user_constants import DJ
REQUEST_PERMISSION_TYPE_MAP = { REQUEST_PERMISSION_TYPE_MAP = {
'GET': 'view', "GET": "view",
'HEAD': 'view', "HEAD": "view",
'OPTIONS': 'view', "OPTIONS": "view",
'POST': 'change', "POST": "change",
'PUT': 'change', "PUT": "change",
'DELETE': 'delete', "DELETE": "delete",
'PATCH': 'change', "PATCH": "change",
} }
def get_own_obj(request, view): def get_own_obj(request, view):
user = request.user user = request.user
if user is None or user.type != DJ: if user is None or user.type != DJ:
return '' return ""
if request.method == 'GET': if request.method == "GET":
return '' return ""
qs = view.queryset.all() qs = view.queryset.all()
try: try:
model_owners = [] model_owners = []
@ -27,32 +28,34 @@ def get_own_obj(request, view):
if owner not in model_owners: if owner not in model_owners:
model_owners.append(owner) model_owners.append(owner)
if len(model_owners) == 1 and user in model_owners: if len(model_owners) == 1 and user in model_owners:
return 'own_' return "own_"
except AttributeError: except AttributeError:
return '' return ""
return '' return ""
def get_permission_for_view(request, view): def get_permission_for_view(request, view):
try: try:
permission_type = REQUEST_PERMISSION_TYPE_MAP[request.method] permission_type = REQUEST_PERMISSION_TYPE_MAP[request.method]
if view.__class__.__name__ == 'APIRootView': if view.__class__.__name__ == "APIRootView":
return '{}_apiroot'.format(permission_type) return "{}_apiroot".format(permission_type)
model = view.model_permission_name model = view.model_permission_name
own_obj = get_own_obj(request, view) own_obj = get_own_obj(request, view)
return '{permission_type}_{own_obj}{model}'.format(permission_type=permission_type, return "{permission_type}_{own_obj}{model}".format(
own_obj=own_obj, permission_type=permission_type, own_obj=own_obj, model=model
model=model) )
except AttributeError: except AttributeError:
return None return None
def check_authorization_header(request):
auth_header = request.META.get('Authorization')
if not auth_header:
auth_header = request.META.get('HTTP_AUTHORIZATION', '')
if auth_header.startswith('Api-Key'): def check_authorization_header(request):
auth_header = request.META.get("Authorization")
if not auth_header:
auth_header = request.META.get("HTTP_AUTHORIZATION", "")
if auth_header.startswith("Api-Key"):
token = auth_header.split()[1] token = auth_header.split()[1]
if token == settings.CONFIG.get('general', 'api_key'): if token == settings.CONFIG.get("general", "api_key"):
return True return True
return False return False
@ -63,6 +66,7 @@ class IsAdminOrOwnUser(BasePermission):
Django's standard permission system. For details see Django's standard permission system. For details see
https://www.django-rest-framework.org/api-guide/permissions/#custom-permissions https://www.django-rest-framework.org/api-guide/permissions/#custom-permissions
""" """
def has_permission(self, request, view): def has_permission(self, request, view):
if request.user.is_superuser(): if request.user.is_superuser():
return True return True
@ -84,6 +88,7 @@ class IsSystemTokenOrUser(BasePermission):
an API-Key header. All standard-users (i.e. not using the API-Key) have their an API-Key header. All standard-users (i.e. not using the API-Key) have their
permissions checked against Django's standard permission system. permissions checked against Django's standard permission system.
""" """
def has_permission(self, request, view): def has_permission(self, request, view):
if request.user and request.user.is_authenticated: if request.user and request.user.is_authenticated:
perm = get_permission_for_view(request, view) perm = get_permission_for_view(request, view)
@ -91,7 +96,7 @@ class IsSystemTokenOrUser(BasePermission):
# model. This use-case allows users to view the base of the API # model. This use-case allows users to view the base of the API
# explorer. Their assigned group permissions determine further access # explorer. Their assigned group permissions determine further access
# into the explorer. # into the explorer.
if perm == 'view_apiroot': if perm == "view_apiroot":
return True return True
return request.user.has_perm(perm) return request.user.has_perm(perm)
return check_authorization_header(request) return check_authorization_header(request)

View File

@ -3,264 +3,305 @@ from django.contrib.auth import get_user_model
from rest_framework import serializers from rest_framework import serializers
from .models import * from .models import *
class UserSerializer(serializers.HyperlinkedModelSerializer): class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = get_user_model() model = get_user_model()
fields = [ fields = [
'item_url', "item_url",
'username', "username",
'type', "type",
'first_name', "first_name",
'last_name', "last_name",
'lastfail', "lastfail",
'skype_contact', "skype_contact",
'jabber_contact', "jabber_contact",
'email', "email",
'cell_phone', "cell_phone",
'login_attempts', "login_attempts",
] ]
class SmartBlockSerializer(serializers.HyperlinkedModelSerializer): class SmartBlockSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = SmartBlock model = SmartBlock
fields = '__all__' fields = "__all__"
class SmartBlockContentSerializer(serializers.HyperlinkedModelSerializer): class SmartBlockContentSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = SmartBlockContent model = SmartBlockContent
fields = '__all__' fields = "__all__"
class SmartBlockCriteriaSerializer(serializers.HyperlinkedModelSerializer): class SmartBlockCriteriaSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = SmartBlockCriteria model = SmartBlockCriteria
fields = '__all__' fields = "__all__"
class CountrySerializer(serializers.HyperlinkedModelSerializer): class CountrySerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = Country model = Country
fields = '__all__' fields = "__all__"
class FileSerializer(serializers.HyperlinkedModelSerializer): class FileSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = File model = File
fields = '__all__' fields = "__all__"
class ListenerCountSerializer(serializers.HyperlinkedModelSerializer): class ListenerCountSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = ListenerCount model = ListenerCount
fields = '__all__' fields = "__all__"
class LiveLogSerializer(serializers.HyperlinkedModelSerializer): class LiveLogSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = LiveLog model = LiveLog
fields = '__all__' fields = "__all__"
class LoginAttemptSerializer(serializers.HyperlinkedModelSerializer): class LoginAttemptSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = LoginAttempt model = LoginAttempt
fields = '__all__' fields = "__all__"
class MountNameSerializer(serializers.HyperlinkedModelSerializer): class MountNameSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = MountName model = MountName
fields = '__all__' fields = "__all__"
class MusicDirSerializer(serializers.HyperlinkedModelSerializer): class MusicDirSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = MusicDir model = MusicDir
fields = '__all__' fields = "__all__"
class PlaylistSerializer(serializers.HyperlinkedModelSerializer): class PlaylistSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = Playlist model = Playlist
fields = '__all__' fields = "__all__"
class PlaylistContentSerializer(serializers.HyperlinkedModelSerializer): class PlaylistContentSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = PlaylistContent model = PlaylistContent
fields = '__all__' fields = "__all__"
class PlayoutHistorySerializer(serializers.HyperlinkedModelSerializer): class PlayoutHistorySerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = PlayoutHistory model = PlayoutHistory
fields = '__all__' fields = "__all__"
class PlayoutHistoryMetadataSerializer(serializers.HyperlinkedModelSerializer): class PlayoutHistoryMetadataSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = PlayoutHistoryMetadata model = PlayoutHistoryMetadata
fields = '__all__' fields = "__all__"
class PlayoutHistoryTemplateSerializer(serializers.HyperlinkedModelSerializer): class PlayoutHistoryTemplateSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = PlayoutHistoryTemplate model = PlayoutHistoryTemplate
fields = '__all__' fields = "__all__"
class PlayoutHistoryTemplateFieldSerializer(serializers.HyperlinkedModelSerializer): class PlayoutHistoryTemplateFieldSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = PlayoutHistoryTemplateField model = PlayoutHistoryTemplateField
fields = '__all__' fields = "__all__"
class PreferenceSerializer(serializers.HyperlinkedModelSerializer): class PreferenceSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = Preference model = Preference
fields = '__all__' fields = "__all__"
class ScheduleSerializer(serializers.HyperlinkedModelSerializer): class ScheduleSerializer(serializers.HyperlinkedModelSerializer):
file_id = serializers.IntegerField(source='file.id', read_only=True) file_id = serializers.IntegerField(source="file.id", read_only=True)
stream_id = serializers.IntegerField(source='stream.id', read_only=True) stream_id = serializers.IntegerField(source="stream.id", read_only=True)
instance_id = serializers.IntegerField(source='instance.id', read_only=True) instance_id = serializers.IntegerField(source="instance.id", read_only=True)
class Meta: class Meta:
model = Schedule model = Schedule
fields = [ fields = [
'item_url', "item_url",
'id', "id",
'starts', "starts",
'ends', "ends",
'clip_length', "clip_length",
'fade_in', "fade_in",
'fade_out', "fade_out",
'cue_in', "cue_in",
'cue_out', "cue_out",
'media_item_played', "media_item_played",
'file', "file",
'file_id', "file_id",
'stream', "stream",
'stream_id', "stream_id",
'instance', "instance",
'instance_id', "instance_id",
] ]
class ServiceRegisterSerializer(serializers.HyperlinkedModelSerializer): class ServiceRegisterSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = ServiceRegister model = ServiceRegister
fields = '__all__' fields = "__all__"
class SessionSerializer(serializers.HyperlinkedModelSerializer): class SessionSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = Session model = Session
fields = '__all__' fields = "__all__"
class ShowSerializer(serializers.HyperlinkedModelSerializer): class ShowSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = Show model = Show
fields = [ fields = [
'item_url', "item_url",
'id', "id",
'name', "name",
'url', "url",
'genre', "genre",
'description', "description",
'color', "color",
'background_color', "background_color",
'linked', "linked",
'is_linkable', "is_linkable",
'image_path', "image_path",
'has_autoplaylist', "has_autoplaylist",
'autoplaylist_repeat', "autoplaylist_repeat",
'autoplaylist', "autoplaylist",
] ]
class ShowDaysSerializer(serializers.HyperlinkedModelSerializer): class ShowDaysSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = ShowDays model = ShowDays
fields = '__all__' fields = "__all__"
class ShowHostSerializer(serializers.HyperlinkedModelSerializer): class ShowHostSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = ShowHost model = ShowHost
fields = '__all__' fields = "__all__"
class ShowInstanceSerializer(serializers.HyperlinkedModelSerializer): class ShowInstanceSerializer(serializers.HyperlinkedModelSerializer):
show_id = serializers.IntegerField(source='show.id', read_only=True) show_id = serializers.IntegerField(source="show.id", read_only=True)
file_id = serializers.IntegerField(source='file.id', read_only=True) file_id = serializers.IntegerField(source="file.id", read_only=True)
class Meta: class Meta:
model = ShowInstance model = ShowInstance
fields = [ fields = [
'item_url', "item_url",
'id', "id",
'description', "description",
'starts', "starts",
'ends', "ends",
'record', "record",
'rebroadcast', "rebroadcast",
'time_filled', "time_filled",
'created', "created",
'last_scheduled', "last_scheduled",
'modified_instance', "modified_instance",
'autoplaylist_built', "autoplaylist_built",
'show', "show",
'show_id', "show_id",
'instance', "instance",
'file', "file",
'file_id', "file_id",
] ]
class ShowRebroadcastSerializer(serializers.HyperlinkedModelSerializer): class ShowRebroadcastSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = ShowRebroadcast model = ShowRebroadcast
fields = '__all__' fields = "__all__"
class StreamSettingSerializer(serializers.HyperlinkedModelSerializer): class StreamSettingSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = StreamSetting model = StreamSetting
fields = '__all__' fields = "__all__"
class UserTokenSerializer(serializers.HyperlinkedModelSerializer): class UserTokenSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = UserToken model = UserToken
fields = '__all__' fields = "__all__"
class TimestampSerializer(serializers.HyperlinkedModelSerializer): class TimestampSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = Timestamp model = Timestamp
fields = '__all__' fields = "__all__"
class WebstreamSerializer(serializers.HyperlinkedModelSerializer): class WebstreamSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = Webstream model = Webstream
fields = '__all__' fields = "__all__"
class WebstreamMetadataSerializer(serializers.HyperlinkedModelSerializer): class WebstreamMetadataSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = WebstreamMetadata model = WebstreamMetadata
fields = '__all__' fields = "__all__"
class CeleryTaskSerializer(serializers.HyperlinkedModelSerializer): class CeleryTaskSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = CeleryTask model = CeleryTask
fields = '__all__' fields = "__all__"
class CloudFileSerializer(serializers.HyperlinkedModelSerializer): class CloudFileSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = CloudFile model = CloudFile
fields = '__all__' fields = "__all__"
class ImportedPodcastSerializer(serializers.HyperlinkedModelSerializer): class ImportedPodcastSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = ImportedPodcast model = ImportedPodcast
fields = '__all__' fields = "__all__"
class PodcastSerializer(serializers.HyperlinkedModelSerializer): class PodcastSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = Podcast model = Podcast
fields = '__all__' fields = "__all__"
class PodcastEpisodeSerializer(serializers.HyperlinkedModelSerializer): class PodcastEpisodeSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = PodcastEpisode model = PodcastEpisode
fields = '__all__' fields = "__all__"
class StationPodcastSerializer(serializers.HyperlinkedModelSerializer): class StationPodcastSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = StationPodcast model = StationPodcast
fields = '__all__' fields = "__all__"
class ThirdPartyTrackReferenceSerializer(serializers.HyperlinkedModelSerializer): class ThirdPartyTrackReferenceSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = ThirdPartyTrackReference model = ThirdPartyTrackReference
fields = '__all__' fields = "__all__"
class TrackTypeSerializer(serializers.HyperlinkedModelSerializer): class TrackTypeSerializer(serializers.HyperlinkedModelSerializer):
class Meta: class Meta:
model = TrackType model = TrackType
fields = '__all__' fields = "__all__"

View File

@ -3,10 +3,11 @@ import configparser
import os import os
from .utils import read_config_file, get_random_string from .utils import read_config_file, get_random_string
LIBRETIME_CONF_DIR = os.getenv('LIBRETIME_CONF_DIR', '/etc/airtime') LIBRETIME_CONF_DIR = os.getenv("LIBRETIME_CONF_DIR", "/etc/airtime")
DEFAULT_CONFIG_PATH = os.getenv('LIBRETIME_CONF_FILE', DEFAULT_CONFIG_PATH = os.getenv(
os.path.join(LIBRETIME_CONF_DIR, 'airtime.conf')) "LIBRETIME_CONF_FILE", os.path.join(LIBRETIME_CONF_DIR, "airtime.conf")
API_VERSION = '2.0.0' )
API_VERSION = "2.0.0"
try: try:
CONFIG = read_config_file(DEFAULT_CONFIG_PATH) CONFIG = read_config_file(DEFAULT_CONFIG_PATH)
@ -18,70 +19,70 @@ except IOError:
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret! # SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_random_string(CONFIG.get('general', 'api_key', fallback='')) SECRET_KEY = get_random_string(CONFIG.get("general", "api_key", fallback=""))
# SECURITY WARNING: don't run with debug turned on in production! # SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('LIBRETIME_DEBUG', False) DEBUG = os.getenv("LIBRETIME_DEBUG", False)
ALLOWED_HOSTS = ['*'] ALLOWED_HOSTS = ["*"]
# Application definition # Application definition
INSTALLED_APPS = [ INSTALLED_APPS = [
'libretimeapi.apps.LibreTimeAPIConfig', "libretimeapi.apps.LibreTimeAPIConfig",
'django.contrib.admin', "django.contrib.admin",
'django.contrib.auth', "django.contrib.auth",
'django.contrib.contenttypes', "django.contrib.contenttypes",
'django.contrib.sessions', "django.contrib.sessions",
'django.contrib.messages', "django.contrib.messages",
'django.contrib.staticfiles', "django.contrib.staticfiles",
'rest_framework', "rest_framework",
'url_filter', "url_filter",
] ]
MIDDLEWARE = [ MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware', "django.middleware.security.SecurityMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware', "django.contrib.sessions.middleware.SessionMiddleware",
'django.middleware.common.CommonMiddleware', "django.middleware.common.CommonMiddleware",
'django.middleware.csrf.CsrfViewMiddleware', "django.middleware.csrf.CsrfViewMiddleware",
'django.contrib.auth.middleware.AuthenticationMiddleware', "django.contrib.auth.middleware.AuthenticationMiddleware",
'django.contrib.messages.middleware.MessageMiddleware', "django.contrib.messages.middleware.MessageMiddleware",
'django.middleware.clickjacking.XFrameOptionsMiddleware', "django.middleware.clickjacking.XFrameOptionsMiddleware",
] ]
ROOT_URLCONF = 'libretimeapi.urls' ROOT_URLCONF = "libretimeapi.urls"
TEMPLATES = [ TEMPLATES = [
{ {
'BACKEND': 'django.template.backends.django.DjangoTemplates', "BACKEND": "django.template.backends.django.DjangoTemplates",
'DIRS': [], "DIRS": [],
'APP_DIRS': True, "APP_DIRS": True,
'OPTIONS': { "OPTIONS": {
'context_processors': [ "context_processors": [
'django.template.context_processors.debug', "django.template.context_processors.debug",
'django.template.context_processors.request', "django.template.context_processors.request",
'django.contrib.auth.context_processors.auth', "django.contrib.auth.context_processors.auth",
'django.contrib.messages.context_processors.messages', "django.contrib.messages.context_processors.messages",
], ],
}, },
}, },
] ]
WSGI_APPLICATION = 'libretimeapi.wsgi.application' WSGI_APPLICATION = "libretimeapi.wsgi.application"
# Database # Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases # https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = { DATABASES = {
'default': { "default": {
'ENGINE': 'django.db.backends.postgresql', "ENGINE": "django.db.backends.postgresql",
'NAME': CONFIG.get('database', 'dbname', fallback=''), "NAME": CONFIG.get("database", "dbname", fallback=""),
'USER': CONFIG.get('database', 'dbuser', fallback=''), "USER": CONFIG.get("database", "dbuser", fallback=""),
'PASSWORD': CONFIG.get('database', 'dbpass', fallback=''), "PASSWORD": CONFIG.get("database", "dbpass", fallback=""),
'HOST': CONFIG.get('database', 'host', fallback=''), "HOST": CONFIG.get("database", "host", fallback=""),
'PORT': '5432', "PORT": "5432",
} }
} }
@ -91,40 +92,40 @@ DATABASES = {
AUTH_PASSWORD_VALIDATORS = [ AUTH_PASSWORD_VALIDATORS = [
{ {
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
}, },
{ {
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
}, },
{ {
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
}, },
{ {
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
}, },
] ]
REST_FRAMEWORK = { REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': ( "DEFAULT_AUTHENTICATION_CLASSES": (
'rest_framework.authentication.SessionAuthentication', "rest_framework.authentication.SessionAuthentication",
'rest_framework.authentication.BasicAuthentication', "rest_framework.authentication.BasicAuthentication",
), ),
'DEFAULT_PERMISSION_CLASSES': [ "DEFAULT_PERMISSION_CLASSES": [
'libretimeapi.permissions.IsSystemTokenOrUser', "libretimeapi.permissions.IsSystemTokenOrUser",
], ],
'DEFAULT_FILTER_BACKENDS': [ "DEFAULT_FILTER_BACKENDS": [
'url_filter.integrations.drf.DjangoFilterBackend', "url_filter.integrations.drf.DjangoFilterBackend",
], ],
'URL_FIELD_NAME': 'item_url', "URL_FIELD_NAME": "item_url",
} }
# Internationalization # Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/ # https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us' LANGUAGE_CODE = "en-us"
TIME_ZONE = 'UTC' TIME_ZONE = "UTC"
USE_I18N = True USE_I18N = True
@ -136,50 +137,53 @@ USE_TZ = True
# Static files (CSS, JavaScript, Images) # Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/ # https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/api/static/' STATIC_URL = "/api/static/"
if not DEBUG: if not DEBUG:
STATIC_ROOT = os.getenv('LIBRETIME_STATIC_ROOT', '/usr/share/airtime/api') STATIC_ROOT = os.getenv("LIBRETIME_STATIC_ROOT", "/usr/share/airtime/api")
AUTH_USER_MODEL = 'libretimeapi.User' AUTH_USER_MODEL = "libretimeapi.User"
TEST_RUNNER = 'libretimeapi.tests.runners.ManagedModelTestRunner' TEST_RUNNER = "libretimeapi.tests.runners.ManagedModelTestRunner"
LOGGING = { LOGGING = {
'version': 1, "version": 1,
'disable_existing_loggers': False, "disable_existing_loggers": False,
'formatters': { "formatters": {
'simple': { "simple": {
'format': '{levelname} {message}', "format": "{levelname} {message}",
'style': '{', "style": "{",
}, },
'verbose': { "verbose": {
'format': '{asctime} {module} {levelname} {message}', "format": "{asctime} {module} {levelname} {message}",
'style': '{', "style": "{",
}
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(CONFIG.get('pypo', 'log_base_dir', fallback='.').replace('\'',''), 'api.log'),
'formatter': 'verbose',
},
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple',
}, },
}, },
'loggers': { "handlers": {
'django': { "file": {
'handlers': ['file', 'console'], "level": "DEBUG",
'level': 'INFO', "class": "logging.FileHandler",
'propogate': True, "filename": os.path.join(
CONFIG.get("pypo", "log_base_dir", fallback=".").replace("'", ""),
"api.log",
),
"formatter": "verbose",
}, },
'libretimeapi': { "console": {
'handlers': ['file', 'console'], "level": "INFO",
'level': 'INFO', "class": "logging.StreamHandler",
'propogate': True, "formatter": "simple",
},
},
"loggers": {
"django": {
"handlers": ["file", "console"],
"level": "INFO",
"propogate": True,
},
"libretimeapi": {
"handlers": ["file", "console"],
"level": "INFO",
"propogate": True,
}, },
}, },
} }

View File

@ -8,18 +8,17 @@ class ManagedModelTestRunner(DiscoverRunner):
project managed for the duration of the test run, so that one doesn't need project managed for the duration of the test run, so that one doesn't need
to execute the SQL manually to create them. to execute the SQL manually to create them.
""" """
def setup_test_environment(self, *args, **kwargs): def setup_test_environment(self, *args, **kwargs):
from django.apps import apps from django.apps import apps
self.unmanaged_models = [m for m in apps.get_models()
if not m._meta.managed] self.unmanaged_models = [m for m in apps.get_models() if not m._meta.managed]
for m in self.unmanaged_models: for m in self.unmanaged_models:
m._meta.managed = True m._meta.managed = True
super(ManagedModelTestRunner, self).setup_test_environment(*args, super(ManagedModelTestRunner, self).setup_test_environment(*args, **kwargs)
**kwargs)
def teardown_test_environment(self, *args, **kwargs): def teardown_test_environment(self, *args, **kwargs):
super(ManagedModelTestRunner, self).teardown_test_environment(*args, super(ManagedModelTestRunner, self).teardown_test_environment(*args, **kwargs)
**kwargs)
# reset unmanaged models # reset unmanaged models
for m in self.unmanaged_models: for m in self.unmanaged_models:
m._meta.managed = False m._meta.managed = False

View File

@ -9,33 +9,40 @@ from libretimeapi.permission_constants import GROUPS
class TestUserManager(APITestCase): class TestUserManager(APITestCase):
def test_create_user(self): def test_create_user(self):
user = User.objects.create_user('test', user = User.objects.create_user(
email='test@example.com', "test",
password='test', email="test@example.com",
type=DJ, password="test",
first_name='test', type=DJ,
last_name='user') first_name="test",
last_name="user",
)
db_user = User.objects.get(pk=user.pk) db_user = User.objects.get(pk=user.pk)
self.assertEqual(db_user.username, user.username) self.assertEqual(db_user.username, user.username)
def test_create_superuser(self): def test_create_superuser(self):
user = User.objects.create_superuser('test', user = User.objects.create_superuser(
email='test@example.com', "test",
password='test', email="test@example.com",
first_name='test', password="test",
last_name='user') first_name="test",
last_name="user",
)
db_user = User.objects.get(pk=user.pk) db_user = User.objects.get(pk=user.pk)
self.assertEqual(db_user.username, user.username) self.assertEqual(db_user.username, user.username)
class TestUser(APITestCase): class TestUser(APITestCase):
def test_guest_get_group_perms(self): def test_guest_get_group_perms(self):
user = User.objects.create_user('test', user = User.objects.create_user(
email='test@example.com', "test",
password='test', email="test@example.com",
type=GUEST, password="test",
first_name='test', type=GUEST,
last_name='user') first_name="test",
last_name="user",
)
permissions = user.get_group_permissions() permissions = user.get_group_permissions()
# APIRoot permission hardcoded in the check as it isn't a Permission object # APIRoot permission hardcoded in the check as it isn't a Permission object
str_perms = [p.codename for p in permissions] + ['view_apiroot'] str_perms = [p.codename for p in permissions] + ["view_apiroot"]
self.assertCountEqual(str_perms, GROUPS[GUEST]) self.assertCountEqual(str_perms, GROUPS[GUEST])

View File

@ -6,7 +6,11 @@ from django.conf import settings
from rest_framework.test import APITestCase, APIRequestFactory from rest_framework.test import APITestCase, APIRequestFactory
from model_bakery import baker from model_bakery import baker
from libretimeapi.permissions import IsSystemTokenOrUser from libretimeapi.permissions import IsSystemTokenOrUser
from libretimeapi.permission_constants import GUEST_PERMISSIONS, DJ_PERMISSIONS, PROGRAM_MANAGER_PERMISSIONS from libretimeapi.permission_constants import (
GUEST_PERMISSIONS,
DJ_PERMISSIONS,
PROGRAM_MANAGER_PERMISSIONS,
)
from libretimeapi.models.user_constants import GUEST, DJ, PROGRAM_MANAGER, ADMIN from libretimeapi.models.user_constants import GUEST, DJ, PROGRAM_MANAGER, ADMIN
@ -16,54 +20,56 @@ class TestIsSystemTokenOrUser(APITestCase):
cls.path = "/api/v2/files/" cls.path = "/api/v2/files/"
def test_unauthorized(self): def test_unauthorized(self):
response = self.client.get(self.path.format('files')) response = self.client.get(self.path.format("files"))
self.assertEqual(response.status_code, 403) self.assertEqual(response.status_code, 403)
def test_token_incorrect(self): def test_token_incorrect(self):
token = 'doesnotexist' token = "doesnotexist"
request = APIRequestFactory().get(self.path) request = APIRequestFactory().get(self.path)
request.user = AnonymousUser() request.user = AnonymousUser()
request.META['Authorization'] = 'Api-Key {token}'.format(token=token) request.META["Authorization"] = "Api-Key {token}".format(token=token)
allowed = IsSystemTokenOrUser().has_permission(request, None) allowed = IsSystemTokenOrUser().has_permission(request, None)
self.assertFalse(allowed) self.assertFalse(allowed)
def test_token_correct(self): def test_token_correct(self):
token = settings.CONFIG.get('general', 'api_key') token = settings.CONFIG.get("general", "api_key")
request = APIRequestFactory().get(self.path) request = APIRequestFactory().get(self.path)
request.user = AnonymousUser() request.user = AnonymousUser()
request.META['Authorization'] = 'Api-Key {token}'.format(token=token) request.META["Authorization"] = "Api-Key {token}".format(token=token)
allowed = IsSystemTokenOrUser().has_permission(request, None) allowed = IsSystemTokenOrUser().has_permission(request, None)
self.assertTrue(allowed) self.assertTrue(allowed)
class TestPermissions(APITestCase): class TestPermissions(APITestCase):
URLS = [ URLS = [
'schedule', "schedule",
'shows', "shows",
'show-days', "show-days",
'show-hosts', "show-hosts",
'show-instances', "show-instances",
'show-rebroadcasts', "show-rebroadcasts",
'files', "files",
'playlists', "playlists",
'playlist-contents', "playlist-contents",
'smart-blocks', "smart-blocks",
'smart-block-contents', "smart-block-contents",
'smart-block-criteria', "smart-block-criteria",
'webstreams', "webstreams",
] ]
def logged_in_test_model(self, model, name, user_type, fn): def logged_in_test_model(self, model, name, user_type, fn):
path = self.path.format(model) path = self.path.format(model)
user_created = get_user_model().objects.filter(username=name) user_created = get_user_model().objects.filter(username=name)
if not user_created: if not user_created:
user = get_user_model().objects.create_user(name, user = get_user_model().objects.create_user(
email='test@example.com', name,
password='test', email="test@example.com",
type=user_type, password="test",
first_name='test', type=user_type,
last_name='user') first_name="test",
self.client.login(username=name, password='test') last_name="user",
)
self.client.login(username=name, password="test")
return fn(path) return fn(path)
@classmethod @classmethod
@ -72,49 +78,57 @@ class TestPermissions(APITestCase):
def test_guest_permissions_success(self): def test_guest_permissions_success(self):
for model in self.URLS: for model in self.URLS:
response = self.logged_in_test_model(model, 'guest', GUEST, self.client.get) response = self.logged_in_test_model(model, "guest", GUEST, self.client.get)
self.assertEqual(response.status_code, 200, self.assertEqual(
msg='Invalid for model {}'.format(model)) response.status_code, 200, msg="Invalid for model {}".format(model)
)
def test_guest_permissions_failure(self): def test_guest_permissions_failure(self):
for model in self.URLS: for model in self.URLS:
response = self.logged_in_test_model(model, 'guest', GUEST, self.client.post) response = self.logged_in_test_model(
self.assertEqual(response.status_code, 403, model, "guest", GUEST, self.client.post
msg='Invalid for model {}'.format(model)) )
response = self.logged_in_test_model('users', 'guest', GUEST, self.client.get) self.assertEqual(
self.assertEqual(response.status_code, 403, msg='Invalid for model users') response.status_code, 403, msg="Invalid for model {}".format(model)
)
response = self.logged_in_test_model("users", "guest", GUEST, self.client.get)
self.assertEqual(response.status_code, 403, msg="Invalid for model users")
def test_dj_get_permissions(self): def test_dj_get_permissions(self):
for model in self.URLS: for model in self.URLS:
response = self.logged_in_test_model(model, 'dj', DJ, self.client.get) response = self.logged_in_test_model(model, "dj", DJ, self.client.get)
self.assertEqual(response.status_code, 200, self.assertEqual(
msg='Invalid for model {}'.format(model)) response.status_code, 200, msg="Invalid for model {}".format(model)
)
def test_dj_post_permissions(self): def test_dj_post_permissions(self):
user = get_user_model().objects.create_user('test-dj', user = get_user_model().objects.create_user(
email='test@example.com', "test-dj",
password='test', email="test@example.com",
type=DJ, password="test",
first_name='test', type=DJ,
last_name='user') first_name="test",
f = baker.make('libretimeapi.File', last_name="user",
owner=user) )
model = 'files/{}'.format(f.id) f = baker.make("libretimeapi.File", owner=user)
model = "files/{}".format(f.id)
path = self.path.format(model) path = self.path.format(model)
self.client.login(username='test-dj', password='test') self.client.login(username="test-dj", password="test")
response = self.client.patch(path, {'name': 'newFilename'}) response = self.client.patch(path, {"name": "newFilename"})
self.assertEqual(response.status_code, 200) self.assertEqual(response.status_code, 200)
def test_dj_post_permissions_failure(self): def test_dj_post_permissions_failure(self):
user = get_user_model().objects.create_user('test-dj', user = get_user_model().objects.create_user(
email='test@example.com', "test-dj",
password='test', email="test@example.com",
type=DJ, password="test",
first_name='test', type=DJ,
last_name='user') first_name="test",
f = baker.make('libretimeapi.File') last_name="user",
model = 'files/{}'.format(f.id) )
f = baker.make("libretimeapi.File")
model = "files/{}".format(f.id)
path = self.path.format(model) path = self.path.format(model)
self.client.login(username='test-dj', password='test') self.client.login(username="test-dj", password="test")
response = self.client.patch(path, {'name': 'newFilename'}) response = self.client.patch(path, {"name": "newFilename"})
self.assertEqual(response.status_code, 403) self.assertEqual(response.status_code, 403)

View File

@ -11,29 +11,32 @@ class TestFileViewSet(APITestCase):
@classmethod @classmethod
def setUpTestData(cls): def setUpTestData(cls):
cls.path = "/api/v2/files/{id}/download/" cls.path = "/api/v2/files/{id}/download/"
cls.token = settings.CONFIG.get('general', 'api_key') cls.token = settings.CONFIG.get("general", "api_key")
def test_invalid(self): def test_invalid(self):
path = self.path.format(id='a') path = self.path.format(id="a")
self.client.credentials(HTTP_AUTHORIZATION='Api-Key {}'.format(self.token)) self.client.credentials(HTTP_AUTHORIZATION="Api-Key {}".format(self.token))
response = self.client.get(path) response = self.client.get(path)
self.assertEqual(response.status_code, 400) self.assertEqual(response.status_code, 400)
def test_does_not_exist(self): def test_does_not_exist(self):
path = self.path.format(id='1') path = self.path.format(id="1")
self.client.credentials(HTTP_AUTHORIZATION='Api-Key {}'.format(self.token)) self.client.credentials(HTTP_AUTHORIZATION="Api-Key {}".format(self.token))
response = self.client.get(path) response = self.client.get(path)
self.assertEqual(response.status_code, 404) self.assertEqual(response.status_code, 404)
def test_exists(self): def test_exists(self):
music_dir = baker.make('libretimeapi.MusicDir', music_dir = baker.make(
directory=os.path.join(os.path.dirname(__file__), "libretimeapi.MusicDir",
'resources')) directory=os.path.join(os.path.dirname(__file__), "resources"),
f = baker.make('libretimeapi.File', )
directory=music_dir, f = baker.make(
mime='audio/mp3', "libretimeapi.File",
filepath='song.mp3') directory=music_dir,
mime="audio/mp3",
filepath="song.mp3",
)
path = self.path.format(id=str(f.pk)) path = self.path.format(id=str(f.pk))
self.client.credentials(HTTP_AUTHORIZATION='Api-Key {}'.format(self.token)) self.client.credentials(HTTP_AUTHORIZATION="Api-Key {}".format(self.token))
response = self.client.get(path) response = self.client.get(path)
self.assertEqual(response.status_code, 200) self.assertEqual(response.status_code, 200)

View File

@ -5,48 +5,48 @@ from rest_framework import routers
from .views import * from .views import *
router = routers.DefaultRouter() router = routers.DefaultRouter()
router.register('smart-blocks', SmartBlockViewSet) router.register("smart-blocks", SmartBlockViewSet)
router.register('smart-block-contents', SmartBlockContentViewSet) router.register("smart-block-contents", SmartBlockContentViewSet)
router.register('smart-block-criteria', SmartBlockCriteriaViewSet) router.register("smart-block-criteria", SmartBlockCriteriaViewSet)
router.register('countries', CountryViewSet) router.register("countries", CountryViewSet)
router.register('files', FileViewSet) router.register("files", FileViewSet)
router.register('listener-counts', ListenerCountViewSet) router.register("listener-counts", ListenerCountViewSet)
router.register('live-logs', LiveLogViewSet) router.register("live-logs", LiveLogViewSet)
router.register('login-attempts', LoginAttemptViewSet) router.register("login-attempts", LoginAttemptViewSet)
router.register('mount-names', MountNameViewSet) router.register("mount-names", MountNameViewSet)
router.register('music-dirs', MusicDirViewSet) router.register("music-dirs", MusicDirViewSet)
router.register('playlists', PlaylistViewSet) router.register("playlists", PlaylistViewSet)
router.register('playlist-contents', PlaylistContentViewSet) router.register("playlist-contents", PlaylistContentViewSet)
router.register('playout-history', PlayoutHistoryViewSet) router.register("playout-history", PlayoutHistoryViewSet)
router.register('playout-history-metadata', PlayoutHistoryMetadataViewSet) router.register("playout-history-metadata", PlayoutHistoryMetadataViewSet)
router.register('playout-history-templates', PlayoutHistoryTemplateViewSet) router.register("playout-history-templates", PlayoutHistoryTemplateViewSet)
router.register('playout-history-template-fields', PlayoutHistoryTemplateFieldViewSet) router.register("playout-history-template-fields", PlayoutHistoryTemplateFieldViewSet)
router.register('preferences', PreferenceViewSet) router.register("preferences", PreferenceViewSet)
router.register('schedule', ScheduleViewSet) router.register("schedule", ScheduleViewSet)
router.register('service-registers', ServiceRegisterViewSet) router.register("service-registers", ServiceRegisterViewSet)
router.register('sessions', SessionViewSet) router.register("sessions", SessionViewSet)
router.register('shows', ShowViewSet) router.register("shows", ShowViewSet)
router.register('show-days', ShowDaysViewSet) router.register("show-days", ShowDaysViewSet)
router.register('show-hosts', ShowHostViewSet) router.register("show-hosts", ShowHostViewSet)
router.register('show-instances', ShowInstanceViewSet) router.register("show-instances", ShowInstanceViewSet)
router.register('show-rebroadcasts', ShowRebroadcastViewSet) router.register("show-rebroadcasts", ShowRebroadcastViewSet)
router.register('stream-settings', StreamSettingViewSet) router.register("stream-settings", StreamSettingViewSet)
router.register('users', UserViewSet) router.register("users", UserViewSet)
router.register('user-tokens', UserTokenViewSet) router.register("user-tokens", UserTokenViewSet)
router.register('timestamps', TimestampViewSet) router.register("timestamps", TimestampViewSet)
router.register('webstreams', WebstreamViewSet) router.register("webstreams", WebstreamViewSet)
router.register('webstream-metadata', WebstreamMetadataViewSet) router.register("webstream-metadata", WebstreamMetadataViewSet)
router.register('celery-tasks', CeleryTaskViewSet) router.register("celery-tasks", CeleryTaskViewSet)
router.register('cloud-files', CloudFileViewSet) router.register("cloud-files", CloudFileViewSet)
router.register('imported-podcasts', ImportedPodcastViewSet) router.register("imported-podcasts", ImportedPodcastViewSet)
router.register('podcasts', PodcastViewSet) router.register("podcasts", PodcastViewSet)
router.register('podcast-episodes', PodcastEpisodeViewSet) router.register("podcast-episodes", PodcastEpisodeViewSet)
router.register('station-podcasts', StationPodcastViewSet) router.register("station-podcasts", StationPodcastViewSet)
router.register('third-party-track-references', ThirdPartyTrackReferenceViewSet) router.register("third-party-track-references", ThirdPartyTrackReferenceViewSet)
router.register('track-types', TrackTypeViewSet) router.register("track-types", TrackTypeViewSet)
urlpatterns = [ urlpatterns = [
path('api/v2/', include(router.urls)), path("api/v2/", include(router.urls)),
path('api/v2/version/', version), path("api/v2/version/", version),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')), path("api-auth/", include("rest_framework.urls", namespace="rest_framework")),
] ]

View File

@ -4,23 +4,27 @@ import sys
import string import string
import random import random
def read_config_file(config_path): def read_config_file(config_path):
"""Parse the application's config file located at config_path.""" """Parse the application's config file located at config_path."""
config = configparser.ConfigParser() config = configparser.ConfigParser()
try: try:
config.readfp(open(config_path)) config.readfp(open(config_path))
except IOError as e: except IOError as e:
print("Failed to open config file at {}: {}".format(config_path, e.strerror), print(
file=sys.stderr) "Failed to open config file at {}: {}".format(config_path, e.strerror),
file=sys.stderr,
)
raise e raise e
except Exception as e: except Exception as e:
print(e.strerror, file=sys.stderr) print(e.strerror, file=sys.stderr)
raise e raise e
return config return config
def get_random_string(seed): def get_random_string(seed):
"""Generates a random string based on the given seed""" """Generates a random string based on the given seed"""
choices = string.ascii_letters + string.digits + string.punctuation choices = string.ascii_letters + string.digits + string.punctuation
seed = seed.encode('utf-8') seed = seed.encode("utf-8")
rand = random.Random(seed) rand = random.Random(seed)
return [rand.choice(choices) for i in range(16)] return [rand.choice(choices) for i in range(16)]

View File

@ -10,220 +10,261 @@ from rest_framework.response import Response
from .serializers import * from .serializers import *
from .permissions import IsAdminOrOwnUser from .permissions import IsAdminOrOwnUser
class UserViewSet(viewsets.ModelViewSet): class UserViewSet(viewsets.ModelViewSet):
queryset = get_user_model().objects.all() queryset = get_user_model().objects.all()
serializer_class = UserSerializer serializer_class = UserSerializer
permission_classes = [IsAdminOrOwnUser] permission_classes = [IsAdminOrOwnUser]
model_permission_name = 'user' model_permission_name = "user"
class SmartBlockViewSet(viewsets.ModelViewSet): class SmartBlockViewSet(viewsets.ModelViewSet):
queryset = SmartBlock.objects.all() queryset = SmartBlock.objects.all()
serializer_class = SmartBlockSerializer serializer_class = SmartBlockSerializer
model_permission_name = 'smartblock' model_permission_name = "smartblock"
class SmartBlockContentViewSet(viewsets.ModelViewSet): class SmartBlockContentViewSet(viewsets.ModelViewSet):
queryset = SmartBlockContent.objects.all() queryset = SmartBlockContent.objects.all()
serializer_class = SmartBlockContentSerializer serializer_class = SmartBlockContentSerializer
model_permission_name = 'smartblockcontent' model_permission_name = "smartblockcontent"
class SmartBlockCriteriaViewSet(viewsets.ModelViewSet): class SmartBlockCriteriaViewSet(viewsets.ModelViewSet):
queryset = SmartBlockCriteria.objects.all() queryset = SmartBlockCriteria.objects.all()
serializer_class = SmartBlockCriteriaSerializer serializer_class = SmartBlockCriteriaSerializer
model_permission_name = 'smartblockcriteria' model_permission_name = "smartblockcriteria"
class CountryViewSet(viewsets.ModelViewSet): class CountryViewSet(viewsets.ModelViewSet):
queryset = Country.objects.all() queryset = Country.objects.all()
serializer_class = CountrySerializer serializer_class = CountrySerializer
model_permission_name = 'country' model_permission_name = "country"
class FileViewSet(viewsets.ModelViewSet): class FileViewSet(viewsets.ModelViewSet):
queryset = File.objects.all() queryset = File.objects.all()
serializer_class = FileSerializer serializer_class = FileSerializer
model_permission_name = 'file' model_permission_name = "file"
@action(detail=True, methods=['GET']) @action(detail=True, methods=["GET"])
def download(self, request, pk=None): def download(self, request, pk=None):
if pk is None: if pk is None:
return Response('No file requested', status=status.HTTP_400_BAD_REQUEST) return Response("No file requested", status=status.HTTP_400_BAD_REQUEST)
try: try:
pk = int(pk) pk = int(pk)
except ValueError: except ValueError:
return Response('File ID should be an integer', return Response(
status=status.HTTP_400_BAD_REQUEST) "File ID should be an integer", status=status.HTTP_400_BAD_REQUEST
)
filename = get_object_or_404(File, pk=pk) filename = get_object_or_404(File, pk=pk)
directory = filename.directory directory = filename.directory
path = os.path.join(directory.directory, filename.filepath) path = os.path.join(directory.directory, filename.filepath)
response = FileResponse(open(path, 'rb'), content_type=filename.mime) response = FileResponse(open(path, "rb"), content_type=filename.mime)
return response return response
class ListenerCountViewSet(viewsets.ModelViewSet): class ListenerCountViewSet(viewsets.ModelViewSet):
queryset = ListenerCount.objects.all() queryset = ListenerCount.objects.all()
serializer_class = ListenerCountSerializer serializer_class = ListenerCountSerializer
model_permission_name = 'listenercount' model_permission_name = "listenercount"
class LiveLogViewSet(viewsets.ModelViewSet): class LiveLogViewSet(viewsets.ModelViewSet):
queryset = LiveLog.objects.all() queryset = LiveLog.objects.all()
serializer_class = LiveLogSerializer serializer_class = LiveLogSerializer
model_permission_name = 'livelog' model_permission_name = "livelog"
class LoginAttemptViewSet(viewsets.ModelViewSet): class LoginAttemptViewSet(viewsets.ModelViewSet):
queryset = LoginAttempt.objects.all() queryset = LoginAttempt.objects.all()
serializer_class = LoginAttemptSerializer serializer_class = LoginAttemptSerializer
model_permission_name = 'loginattempt' model_permission_name = "loginattempt"
class MountNameViewSet(viewsets.ModelViewSet): class MountNameViewSet(viewsets.ModelViewSet):
queryset = MountName.objects.all() queryset = MountName.objects.all()
serializer_class = MountNameSerializer serializer_class = MountNameSerializer
model_permission_name = 'mountname' model_permission_name = "mountname"
class MusicDirViewSet(viewsets.ModelViewSet): class MusicDirViewSet(viewsets.ModelViewSet):
queryset = MusicDir.objects.all() queryset = MusicDir.objects.all()
serializer_class = MusicDirSerializer serializer_class = MusicDirSerializer
model_permission_name = 'musicdir' model_permission_name = "musicdir"
class PlaylistViewSet(viewsets.ModelViewSet): class PlaylistViewSet(viewsets.ModelViewSet):
queryset = Playlist.objects.all() queryset = Playlist.objects.all()
serializer_class = PlaylistSerializer serializer_class = PlaylistSerializer
model_permission_name = 'playlist' model_permission_name = "playlist"
class PlaylistContentViewSet(viewsets.ModelViewSet): class PlaylistContentViewSet(viewsets.ModelViewSet):
queryset = PlaylistContent.objects.all() queryset = PlaylistContent.objects.all()
serializer_class = PlaylistContentSerializer serializer_class = PlaylistContentSerializer
model_permission_name = 'playlistcontent' model_permission_name = "playlistcontent"
class PlayoutHistoryViewSet(viewsets.ModelViewSet): class PlayoutHistoryViewSet(viewsets.ModelViewSet):
queryset = PlayoutHistory.objects.all() queryset = PlayoutHistory.objects.all()
serializer_class = PlayoutHistorySerializer serializer_class = PlayoutHistorySerializer
model_permission_name = 'playouthistory' model_permission_name = "playouthistory"
class PlayoutHistoryMetadataViewSet(viewsets.ModelViewSet): class PlayoutHistoryMetadataViewSet(viewsets.ModelViewSet):
queryset = PlayoutHistoryMetadata.objects.all() queryset = PlayoutHistoryMetadata.objects.all()
serializer_class = PlayoutHistoryMetadataSerializer serializer_class = PlayoutHistoryMetadataSerializer
model_permission_name = 'playouthistorymetadata' model_permission_name = "playouthistorymetadata"
class PlayoutHistoryTemplateViewSet(viewsets.ModelViewSet): class PlayoutHistoryTemplateViewSet(viewsets.ModelViewSet):
queryset = PlayoutHistoryTemplate.objects.all() queryset = PlayoutHistoryTemplate.objects.all()
serializer_class = PlayoutHistoryTemplateSerializer serializer_class = PlayoutHistoryTemplateSerializer
model_permission_name = 'playouthistorytemplate' model_permission_name = "playouthistorytemplate"
class PlayoutHistoryTemplateFieldViewSet(viewsets.ModelViewSet): class PlayoutHistoryTemplateFieldViewSet(viewsets.ModelViewSet):
queryset = PlayoutHistoryTemplateField.objects.all() queryset = PlayoutHistoryTemplateField.objects.all()
serializer_class = PlayoutHistoryTemplateFieldSerializer serializer_class = PlayoutHistoryTemplateFieldSerializer
model_permission_name = 'playouthistorytemplatefield' model_permission_name = "playouthistorytemplatefield"
class PreferenceViewSet(viewsets.ModelViewSet): class PreferenceViewSet(viewsets.ModelViewSet):
queryset = Preference.objects.all() queryset = Preference.objects.all()
serializer_class = PreferenceSerializer serializer_class = PreferenceSerializer
model_permission_name = 'perference' model_permission_name = "perference"
class ScheduleViewSet(viewsets.ModelViewSet): class ScheduleViewSet(viewsets.ModelViewSet):
queryset = Schedule.objects.all() queryset = Schedule.objects.all()
serializer_class = ScheduleSerializer serializer_class = ScheduleSerializer
filter_fields = ('starts', 'ends', 'playout_status', 'broadcasted') filter_fields = ("starts", "ends", "playout_status", "broadcasted")
model_permission_name = 'schedule' model_permission_name = "schedule"
class ServiceRegisterViewSet(viewsets.ModelViewSet): class ServiceRegisterViewSet(viewsets.ModelViewSet):
queryset = ServiceRegister.objects.all() queryset = ServiceRegister.objects.all()
serializer_class = ServiceRegisterSerializer serializer_class = ServiceRegisterSerializer
model_permission_name = 'serviceregister' model_permission_name = "serviceregister"
class SessionViewSet(viewsets.ModelViewSet): class SessionViewSet(viewsets.ModelViewSet):
queryset = Session.objects.all() queryset = Session.objects.all()
serializer_class = SessionSerializer serializer_class = SessionSerializer
model_permission_name = 'session' model_permission_name = "session"
class ShowViewSet(viewsets.ModelViewSet): class ShowViewSet(viewsets.ModelViewSet):
queryset = Show.objects.all() queryset = Show.objects.all()
serializer_class = ShowSerializer serializer_class = ShowSerializer
model_permission_name = 'show' model_permission_name = "show"
class ShowDaysViewSet(viewsets.ModelViewSet): class ShowDaysViewSet(viewsets.ModelViewSet):
queryset = ShowDays.objects.all() queryset = ShowDays.objects.all()
serializer_class = ShowDaysSerializer serializer_class = ShowDaysSerializer
model_permission_name = 'showdays' model_permission_name = "showdays"
class ShowHostViewSet(viewsets.ModelViewSet): class ShowHostViewSet(viewsets.ModelViewSet):
queryset = ShowHost.objects.all() queryset = ShowHost.objects.all()
serializer_class = ShowHostSerializer serializer_class = ShowHostSerializer
model_permission_name = 'showhost' model_permission_name = "showhost"
class ShowInstanceViewSet(viewsets.ModelViewSet): class ShowInstanceViewSet(viewsets.ModelViewSet):
queryset = ShowInstance.objects.all() queryset = ShowInstance.objects.all()
serializer_class = ShowInstanceSerializer serializer_class = ShowInstanceSerializer
model_permission_name = 'showinstance' model_permission_name = "showinstance"
class ShowRebroadcastViewSet(viewsets.ModelViewSet): class ShowRebroadcastViewSet(viewsets.ModelViewSet):
queryset = ShowRebroadcast.objects.all() queryset = ShowRebroadcast.objects.all()
serializer_class = ShowRebroadcastSerializer serializer_class = ShowRebroadcastSerializer
model_permission_name = 'showrebroadcast' model_permission_name = "showrebroadcast"
class StreamSettingViewSet(viewsets.ModelViewSet): class StreamSettingViewSet(viewsets.ModelViewSet):
queryset = StreamSetting.objects.all() queryset = StreamSetting.objects.all()
serializer_class = StreamSettingSerializer serializer_class = StreamSettingSerializer
model_permission_name = 'streamsetting' model_permission_name = "streamsetting"
class UserTokenViewSet(viewsets.ModelViewSet): class UserTokenViewSet(viewsets.ModelViewSet):
queryset = UserToken.objects.all() queryset = UserToken.objects.all()
serializer_class = UserTokenSerializer serializer_class = UserTokenSerializer
model_permission_name = 'usertoken' model_permission_name = "usertoken"
class TimestampViewSet(viewsets.ModelViewSet): class TimestampViewSet(viewsets.ModelViewSet):
queryset = Timestamp.objects.all() queryset = Timestamp.objects.all()
serializer_class = TimestampSerializer serializer_class = TimestampSerializer
model_permission_name = 'timestamp' model_permission_name = "timestamp"
class WebstreamViewSet(viewsets.ModelViewSet): class WebstreamViewSet(viewsets.ModelViewSet):
queryset = Webstream.objects.all() queryset = Webstream.objects.all()
serializer_class = WebstreamSerializer serializer_class = WebstreamSerializer
model_permission_name = 'webstream' model_permission_name = "webstream"
class WebstreamMetadataViewSet(viewsets.ModelViewSet): class WebstreamMetadataViewSet(viewsets.ModelViewSet):
queryset = WebstreamMetadata.objects.all() queryset = WebstreamMetadata.objects.all()
serializer_class = WebstreamMetadataSerializer serializer_class = WebstreamMetadataSerializer
model_permission_name = 'webstreametadata' model_permission_name = "webstreametadata"
class CeleryTaskViewSet(viewsets.ModelViewSet): class CeleryTaskViewSet(viewsets.ModelViewSet):
queryset = CeleryTask.objects.all() queryset = CeleryTask.objects.all()
serializer_class = CeleryTaskSerializer serializer_class = CeleryTaskSerializer
model_permission_name = 'celerytask' model_permission_name = "celerytask"
class CloudFileViewSet(viewsets.ModelViewSet): class CloudFileViewSet(viewsets.ModelViewSet):
queryset = CloudFile.objects.all() queryset = CloudFile.objects.all()
serializer_class = CloudFileSerializer serializer_class = CloudFileSerializer
model_permission_name = 'cloudfile' model_permission_name = "cloudfile"
class ImportedPodcastViewSet(viewsets.ModelViewSet): class ImportedPodcastViewSet(viewsets.ModelViewSet):
queryset = ImportedPodcast.objects.all() queryset = ImportedPodcast.objects.all()
serializer_class = ImportedPodcastSerializer serializer_class = ImportedPodcastSerializer
model_permission_name = 'importedpodcast' model_permission_name = "importedpodcast"
class PodcastViewSet(viewsets.ModelViewSet): class PodcastViewSet(viewsets.ModelViewSet):
queryset = Podcast.objects.all() queryset = Podcast.objects.all()
serializer_class = PodcastSerializer serializer_class = PodcastSerializer
model_permission_name = 'podcast' model_permission_name = "podcast"
class PodcastEpisodeViewSet(viewsets.ModelViewSet): class PodcastEpisodeViewSet(viewsets.ModelViewSet):
queryset = PodcastEpisode.objects.all() queryset = PodcastEpisode.objects.all()
serializer_class = PodcastEpisodeSerializer serializer_class = PodcastEpisodeSerializer
model_permission_name = 'podcastepisode' model_permission_name = "podcastepisode"
class StationPodcastViewSet(viewsets.ModelViewSet): class StationPodcastViewSet(viewsets.ModelViewSet):
queryset = StationPodcast.objects.all() queryset = StationPodcast.objects.all()
serializer_class = StationPodcastSerializer serializer_class = StationPodcastSerializer
model_permission_name = 'station' model_permission_name = "station"
class ThirdPartyTrackReferenceViewSet(viewsets.ModelViewSet): class ThirdPartyTrackReferenceViewSet(viewsets.ModelViewSet):
queryset = ThirdPartyTrackReference.objects.all() queryset = ThirdPartyTrackReference.objects.all()
serializer_class = ThirdPartyTrackReferenceSerializer serializer_class = ThirdPartyTrackReferenceSerializer
model_permission_name = 'thirdpartytrackreference' model_permission_name = "thirdpartytrackreference"
class TrackTypeViewSet(viewsets.ModelViewSet): class TrackTypeViewSet(viewsets.ModelViewSet):
queryset = TrackType.objects.all() queryset = TrackType.objects.all()
serializer_class = TrackTypeSerializer serializer_class = TrackTypeSerializer
model_permission_name = 'tracktype' model_permission_name = "tracktype"
@api_view(['GET'])
@permission_classes((AllowAny, )) @api_view(["GET"])
@permission_classes((AllowAny,))
def version(request, *args, **kwargs): def version(request, *args, **kwargs):
return Response({'api_version': settings.API_VERSION}) return Response({"api_version": settings.API_VERSION})

View File

@ -12,6 +12,6 @@ import os
from django.core.wsgi import get_wsgi_application from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'libretimeapi.settings') os.environ.setdefault("DJANGO_SETTINGS_MODULE", "libretimeapi.settings")
application = get_wsgi_application() application = get_wsgi_application()

View File

@ -8,26 +8,26 @@ print(script_path)
os.chdir(script_path) os.chdir(script_path)
setup( setup(
name='libretime-api', name="libretime-api",
version='2.0.0a1', version="2.0.0a1",
packages=find_packages(), packages=find_packages(),
include_package_data=True, include_package_data=True,
description='LibreTime API backend server', description="LibreTime API backend server",
url='https://github.com/LibreTime/libretime', url="https://github.com/LibreTime/libretime",
author='LibreTime Contributors', author="LibreTime Contributors",
scripts=['bin/libretime-api'], scripts=["bin/libretime-api"],
install_requires=[ install_requires=[
'coreapi', "coreapi",
'Django~=3.0', "Django~=3.0",
'djangorestframework', "djangorestframework",
'django-url-filter', "django-url-filter",
'markdown', "markdown",
'model_bakery', "model_bakery",
'psycopg2', "psycopg2",
], ],
project_urls={ project_urls={
'Bug Tracker': 'https://github.com/LibreTime/libretime/issues', "Bug Tracker": "https://github.com/LibreTime/libretime/issues",
'Documentation': 'https://libretime.org', "Documentation": "https://libretime.org",
'Source Code': 'https://github.com/LibreTime/libretime', "Source Code": "https://github.com/LibreTime/libretime",
}, },
) )

View File

@ -16,8 +16,8 @@ similar code when it starts up (but then makes changes if something is different
""" """
class AirtimeMediaMonitorBootstrap(): class AirtimeMediaMonitorBootstrap:
"""AirtimeMediaMonitorBootstrap constructor """AirtimeMediaMonitorBootstrap constructor
Keyword Arguments: Keyword Arguments:
@ -25,8 +25,9 @@ class AirtimeMediaMonitorBootstrap():
pe -- reference to an instance of ProcessEvent pe -- reference to an instance of ProcessEvent
api_clients -- reference of api_clients to communicate with airtime-server api_clients -- reference of api_clients to communicate with airtime-server
""" """
def __init__(self): def __init__(self):
config = ConfigObj('/etc/airtime/airtime.conf') config = ConfigObj("/etc/airtime/airtime.conf")
self.api_client = apc.api_client_factory(config) self.api_client = apc.api_client_factory(config)
""" """
@ -36,25 +37,26 @@ class AirtimeMediaMonitorBootstrap():
print 'Error configuring logging: ', e print 'Error configuring logging: ', e
sys.exit(1) sys.exit(1)
""" """
self.logger = logging.getLogger() self.logger = logging.getLogger()
self.logger.info("Adding %s on watch list...", "xxx") self.logger.info("Adding %s on watch list...", "xxx")
self.scan() self.scan()
"""On bootup we want to scan all directories and look for files that """On bootup we want to scan all directories and look for files that
weren't there or files that changed before media-monitor process weren't there or files that changed before media-monitor process
went offline. went offline.
""" """
def scan(self): def scan(self):
directories = self.get_list_of_watched_dirs(); directories = self.get_list_of_watched_dirs()
self.logger.info("watched directories found: %s", directories) self.logger.info("watched directories found: %s", directories)
for id, dir in directories.iteritems(): for id, dir in directories.iteritems():
self.logger.debug("%s, %s", id, dir) self.logger.debug("%s, %s", id, dir)
#CHANGED!!! # CHANGED!!!
#self.sync_database_to_filesystem(id, api_client.encode_to(dir, "utf-8")) # self.sync_database_to_filesystem(id, api_client.encode_to(dir, "utf-8"))
self.sync_database_to_filesystem(id, dir) self.sync_database_to_filesystem(id, dir)
"""Gets a list of files that the Airtime database knows for a specific directory. """Gets a list of files that the Airtime database knows for a specific directory.
@ -62,6 +64,7 @@ class AirtimeMediaMonitorBootstrap():
get_list_of_watched_dirs function. get_list_of_watched_dirs function.
dir_id -- row id of the directory in the cc_watched_dirs database table dir_id -- row id of the directory in the cc_watched_dirs database table
""" """
def list_db_files(self, dir_id): def list_db_files(self, dir_id):
return self.api_client.list_all_db_files(dir_id) return self.api_client.list_all_db_files(dir_id)
@ -69,23 +72,29 @@ class AirtimeMediaMonitorBootstrap():
returns the path and the database row id for this path for all watched directories. Also returns the path and the database row id for this path for all watched directories. Also
returns the Stor directory, which can be identified by its row id (always has value of "1") returns the Stor directory, which can be identified by its row id (always has value of "1")
""" """
def get_list_of_watched_dirs(self): def get_list_of_watched_dirs(self):
json = self.api_client.list_all_watched_dirs() json = self.api_client.list_all_watched_dirs()
return json["dirs"] return json["dirs"]
def scan_dir_for_existing_files(self, dir): def scan_dir_for_existing_files(self, dir):
command = 'find "%s" -type f -iname "*.ogg" -o -iname "*.mp3" -readable' % dir.replace('"', '\\"') command = (
'find "%s" -type f -iname "*.ogg" -o -iname "*.mp3" -readable'
% dir.replace('"', '\\"')
)
self.logger.debug(command) self.logger.debug(command)
#CHANGED!! # CHANGED!!
stdout = self.exec_command(command).decode("UTF-8") stdout = self.exec_command(command).decode("UTF-8")
return stdout.splitlines() return stdout.splitlines()
def exec_command(self, command): def exec_command(self, command):
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE) p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate() stdout, stderr = p.communicate()
if p.returncode != 0: if p.returncode != 0:
self.logger.warn("command \n%s\n return with a non-zero return value", command) self.logger.warn(
"command \n%s\n return with a non-zero return value", command
)
self.logger.error(stderr) self.logger.error(stderr)
return stdout return stdout
@ -98,6 +107,7 @@ class AirtimeMediaMonitorBootstrap():
dir_id -- row id of the directory in the cc_watched_dirs database table dir_id -- row id of the directory in the cc_watched_dirs database table
dir -- pathname of the directory dir -- pathname of the directory
""" """
def sync_database_to_filesystem(self, dir_id, dir): def sync_database_to_filesystem(self, dir_id, dir):
""" """
set to hold new and/or modified files. We use a set to make it ok if files are added set to hold new and/or modified files. We use a set to make it ok if files are added
@ -107,7 +117,7 @@ class AirtimeMediaMonitorBootstrap():
db_known_files_set = set() db_known_files_set = set()
files = self.list_db_files(dir_id) files = self.list_db_files(dir_id)
for file in files['files']: for file in files["files"]:
db_known_files_set.add(file) db_known_files_set.add(file)
existing_files = self.scan_dir_for_existing_files(dir) existing_files = self.scan_dir_for_existing_files(dir)
@ -115,18 +125,17 @@ class AirtimeMediaMonitorBootstrap():
existing_files_set = set() existing_files_set = set()
for file_path in existing_files: for file_path in existing_files:
if len(file_path.strip(" \n")) > 0: if len(file_path.strip(" \n")) > 0:
existing_files_set.add(file_path[len(dir):]) existing_files_set.add(file_path[len(dir) :])
deleted_files_set = db_known_files_set - existing_files_set deleted_files_set = db_known_files_set - existing_files_set
new_files_set = existing_files_set - db_known_files_set new_files_set = existing_files_set - db_known_files_set
print("DB Known files: \n%s\n\n" % len(db_known_files_set))
print("FS Known files: \n%s\n\n" % len(existing_files_set))
print("Deleted files: \n%s\n\n" % deleted_files_set)
print("New files: \n%s\n\n" % new_files_set)
print ("DB Known files: \n%s\n\n"%len(db_known_files_set))
print ("FS Known files: \n%s\n\n"%len(existing_files_set))
print ("Deleted files: \n%s\n\n"%deleted_files_set)
print ("New files: \n%s\n\n"%new_files_set)
if __name__ == "__main__": if __name__ == "__main__":
AirtimeMediaMonitorBootstrap() AirtimeMediaMonitorBootstrap()

View File

@ -10,24 +10,25 @@ from . import config_file
from functools import partial from functools import partial
from .metadata_analyzer import MetadataAnalyzer from .metadata_analyzer import MetadataAnalyzer
from .replaygain_analyzer import ReplayGainAnalyzer from .replaygain_analyzer import ReplayGainAnalyzer
from .status_reporter import StatusReporter from .status_reporter import StatusReporter
from .message_listener import MessageListener from .message_listener import MessageListener
class AirtimeAnalyzerServer: class AirtimeAnalyzerServer:
"""A server for importing uploads to Airtime as background jobs. """A server for importing uploads to Airtime as background jobs."""
"""
# Constants # Constants
_LOG_PATH = "/var/log/airtime/airtime_analyzer.log" _LOG_PATH = "/var/log/airtime/airtime_analyzer.log"
# Variables # Variables
_log_level = logging.INFO _log_level = logging.INFO
def __init__(self, rmq_config_path, http_retry_queue_path, debug=False): def __init__(self, rmq_config_path, http_retry_queue_path, debug=False):
# Dump a stacktrace with 'kill -SIGUSR2 <PID>' # Dump a stacktrace with 'kill -SIGUSR2 <PID>'
signal.signal(signal.SIGUSR2, lambda sig, frame: AirtimeAnalyzerServer.dump_stacktrace()) signal.signal(
signal.SIGUSR2, lambda sig, frame: AirtimeAnalyzerServer.dump_stacktrace()
)
# Configure logging # Configure logging
self.setup_logging(debug) self.setup_logging(debug)
@ -43,11 +44,10 @@ class AirtimeAnalyzerServer:
self._msg_listener = MessageListener(rmq_config) self._msg_listener = MessageListener(rmq_config)
StatusReporter.stop_thread() StatusReporter.stop_thread()
def setup_logging(self, debug): def setup_logging(self, debug):
"""Set up nicely formatted logging and log rotation. """Set up nicely formatted logging and log rotation.
Keyword arguments: Keyword arguments:
debug -- a boolean indicating whether to enable super verbose logging debug -- a boolean indicating whether to enable super verbose logging
to the screen and disk. to the screen and disk.
@ -55,27 +55,30 @@ class AirtimeAnalyzerServer:
if debug: if debug:
self._log_level = logging.DEBUG self._log_level = logging.DEBUG
else: else:
#Disable most pika/rabbitmq logging: # Disable most pika/rabbitmq logging:
pika_logger = logging.getLogger('pika') pika_logger = logging.getLogger("pika")
pika_logger.setLevel(logging.CRITICAL) pika_logger.setLevel(logging.CRITICAL)
# Set up logging # Set up logging
logFormatter = logging.Formatter("%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s") logFormatter = logging.Formatter(
"%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s"
)
rootLogger = logging.getLogger() rootLogger = logging.getLogger()
rootLogger.setLevel(self._log_level) rootLogger.setLevel(self._log_level)
fileHandler = logging.handlers.RotatingFileHandler(filename=self._LOG_PATH, maxBytes=1024*1024*30, fileHandler = logging.handlers.RotatingFileHandler(
backupCount=8) filename=self._LOG_PATH, maxBytes=1024 * 1024 * 30, backupCount=8
)
fileHandler.setFormatter(logFormatter) fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler) rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler() consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter) consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler) rootLogger.addHandler(consoleHandler)
@classmethod @classmethod
def dump_stacktrace(stack): def dump_stacktrace(stack):
''' Dump a stacktrace for all threads ''' """Dump a stacktrace for all threads"""
code = [] code = []
for threadId, stack in list(sys._current_frames().items()): for threadId, stack in list(sys._current_frames().items()):
code.append("\n# ThreadID: %s" % threadId) code.append("\n# ThreadID: %s" % threadId)
@ -83,4 +86,4 @@ class AirtimeAnalyzerServer:
code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line: if line:
code.append(" %s" % (line.strip())) code.append(" %s" % (line.strip()))
logging.info('\n'.join(code)) logging.info("\n".join(code))

View File

@ -3,8 +3,7 @@
class Analyzer: class Analyzer:
""" Abstract base class for all "analyzers". """Abstract base class for all "analyzers"."""
"""
@staticmethod @staticmethod
def analyze(filename, metadata): def analyze(filename, metadata):

View File

@ -12,20 +12,28 @@ from .cuepoint_analyzer import CuePointAnalyzer
from .replaygain_analyzer import ReplayGainAnalyzer from .replaygain_analyzer import ReplayGainAnalyzer
from .playability_analyzer import * from .playability_analyzer import *
class AnalyzerPipeline:
""" Analyzes and imports an audio file into the Airtime library.
This currently performs metadata extraction (eg. gets the ID3 tags from an MP3), class AnalyzerPipeline:
then moves the file to the Airtime music library (stor/imported), and returns """Analyzes and imports an audio file into the Airtime library.
the results back to the parent process. This class is used in an isolated process
so that if it crashes, it does not kill the entire airtime_analyzer daemon and This currently performs metadata extraction (eg. gets the ID3 tags from an MP3),
the failure to import can be reported back to the web application. then moves the file to the Airtime music library (stor/imported), and returns
the results back to the parent process. This class is used in an isolated process
so that if it crashes, it does not kill the entire airtime_analyzer daemon and
the failure to import can be reported back to the web application.
""" """
IMPORT_STATUS_FAILED = 2 IMPORT_STATUS_FAILED = 2
@staticmethod @staticmethod
def run_analysis(queue, audio_file_path, import_directory, original_filename, storage_backend, file_prefix): def run_analysis(
queue,
audio_file_path,
import_directory,
original_filename,
storage_backend,
file_prefix,
):
"""Analyze and import an audio file, and put all extracted metadata into queue. """Analyze and import an audio file, and put all extracted metadata into queue.
Keyword arguments: Keyword arguments:
@ -50,14 +58,29 @@ class AnalyzerPipeline:
if not isinstance(queue, Queue): if not isinstance(queue, Queue):
raise TypeError("queue must be a Queue.Queue()") raise TypeError("queue must be a Queue.Queue()")
if not isinstance(audio_file_path, str): if not isinstance(audio_file_path, str):
raise TypeError("audio_file_path must be unicode. Was of type " + type(audio_file_path).__name__ + " instead.") raise TypeError(
"audio_file_path must be unicode. Was of type "
+ type(audio_file_path).__name__
+ " instead."
)
if not isinstance(import_directory, str): if not isinstance(import_directory, str):
raise TypeError("import_directory must be unicode. Was of type " + type(import_directory).__name__ + " instead.") raise TypeError(
"import_directory must be unicode. Was of type "
+ type(import_directory).__name__
+ " instead."
)
if not isinstance(original_filename, str): if not isinstance(original_filename, str):
raise TypeError("original_filename must be unicode. Was of type " + type(original_filename).__name__ + " instead.") raise TypeError(
"original_filename must be unicode. Was of type "
+ type(original_filename).__name__
+ " instead."
)
if not isinstance(file_prefix, str): if not isinstance(file_prefix, str):
raise TypeError("file_prefix must be unicode. Was of type " + type(file_prefix).__name__ + " instead.") raise TypeError(
"file_prefix must be unicode. Was of type "
+ type(file_prefix).__name__
+ " instead."
)
# Analyze the audio file we were told to analyze: # Analyze the audio file we were told to analyze:
# First, we extract the ID3 tags and other metadata: # First, we extract the ID3 tags and other metadata:
@ -69,9 +92,11 @@ class AnalyzerPipeline:
metadata = ReplayGainAnalyzer.analyze(audio_file_path, metadata) metadata = ReplayGainAnalyzer.analyze(audio_file_path, metadata)
metadata = PlayabilityAnalyzer.analyze(audio_file_path, metadata) metadata = PlayabilityAnalyzer.analyze(audio_file_path, metadata)
metadata = FileMoverAnalyzer.move(audio_file_path, import_directory, original_filename, metadata) metadata = FileMoverAnalyzer.move(
audio_file_path, import_directory, original_filename, metadata
)
metadata["import_status"] = 0 # Successfully imported metadata["import_status"] = 0 # Successfully imported
# Note that the queue we're putting the results into is our interprocess communication # Note that the queue we're putting the results into is our interprocess communication
# back to the main process. # back to the main process.
@ -93,9 +118,8 @@ class AnalyzerPipeline:
def python_logger_deadlock_workaround(): def python_logger_deadlock_workaround():
# Workaround for: http://bugs.python.org/issue6721#msg140215 # Workaround for: http://bugs.python.org/issue6721#msg140215
logger_names = list(logging.Logger.manager.loggerDict.keys()) logger_names = list(logging.Logger.manager.loggerDict.keys())
logger_names.append(None) # Root logger logger_names.append(None) # Root logger
for name in logger_names: for name in logger_names:
for handler in logging.getLogger(name).handlers: for handler in logging.getLogger(name).handlers:
handler.createLock() handler.createLock()
logging._lock = threading.RLock() logging._lock = threading.RLock()

View File

@ -9,21 +9,32 @@ import os
import airtime_analyzer.airtime_analyzer as aa import airtime_analyzer.airtime_analyzer as aa
VERSION = "1.0" VERSION = "1.0"
LIBRETIME_CONF_DIR = os.getenv('LIBRETIME_CONF_DIR', '/etc/airtime') LIBRETIME_CONF_DIR = os.getenv("LIBRETIME_CONF_DIR", "/etc/airtime")
DEFAULT_RMQ_CONFIG_PATH = os.path.join(LIBRETIME_CONF_DIR, 'airtime.conf') DEFAULT_RMQ_CONFIG_PATH = os.path.join(LIBRETIME_CONF_DIR, "airtime.conf")
DEFAULT_HTTP_RETRY_PATH = '/tmp/airtime_analyzer_http_retries' DEFAULT_HTTP_RETRY_PATH = "/tmp/airtime_analyzer_http_retries"
def main(): def main():
'''Entry-point for this application''' """Entry-point for this application"""
print("LibreTime Analyzer {}".format(VERSION)) print("LibreTime Analyzer {}".format(VERSION))
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("-d", "--daemon", help="run as a daemon", action="store_true") parser.add_argument("-d", "--daemon", help="run as a daemon", action="store_true")
parser.add_argument("--debug", help="log full debugging output", action="store_true") parser.add_argument(
parser.add_argument("--rmq-config-file", help="specify a configuration file with RabbitMQ settings (default is %s)" % DEFAULT_RMQ_CONFIG_PATH) "--debug", help="log full debugging output", action="store_true"
parser.add_argument("--http-retry-queue-file", help="specify where incompleted HTTP requests will be serialized (default is %s)" % DEFAULT_HTTP_RETRY_PATH) )
parser.add_argument(
"--rmq-config-file",
help="specify a configuration file with RabbitMQ settings (default is %s)"
% DEFAULT_RMQ_CONFIG_PATH,
)
parser.add_argument(
"--http-retry-queue-file",
help="specify where incompleted HTTP requests will be serialized (default is %s)"
% DEFAULT_HTTP_RETRY_PATH,
)
args = parser.parse_args() args = parser.parse_args()
#Default config file path # Default config file path
rmq_config_path = DEFAULT_RMQ_CONFIG_PATH rmq_config_path = DEFAULT_RMQ_CONFIG_PATH
http_retry_queue_path = DEFAULT_HTTP_RETRY_PATH http_retry_queue_path = DEFAULT_HTTP_RETRY_PATH
if args.rmq_config_file: if args.rmq_config_file:
@ -33,14 +44,19 @@ def main():
if args.daemon: if args.daemon:
with daemon.DaemonContext(): with daemon.DaemonContext():
aa.AirtimeAnalyzerServer(rmq_config_path=rmq_config_path, aa.AirtimeAnalyzerServer(
http_retry_queue_path=http_retry_queue_path, rmq_config_path=rmq_config_path,
debug=args.debug) http_retry_queue_path=http_retry_queue_path,
debug=args.debug,
)
else: else:
# Run without daemonizing # Run without daemonizing
aa.AirtimeAnalyzerServer(rmq_config_path=rmq_config_path, aa.AirtimeAnalyzerServer(
http_retry_queue_path=http_retry_queue_path, rmq_config_path=rmq_config_path,
debug=args.debug) http_retry_queue_path=http_retry_queue_path,
debug=args.debug,
)
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -2,6 +2,7 @@
import configparser import configparser
def read_config_file(config_path): def read_config_file(config_path):
"""Parse the application's config file located at config_path.""" """Parse the application's config file located at config_path."""
config = configparser.SafeConfigParser() config = configparser.SafeConfigParser()

View File

@ -8,26 +8,38 @@ from .analyzer import Analyzer
class CuePointAnalyzer(Analyzer): class CuePointAnalyzer(Analyzer):
''' This class extracts the cue-in time, cue-out time, and length of a track using silan. ''' """This class extracts the cue-in time, cue-out time, and length of a track using silan."""
SILAN_EXECUTABLE = 'silan' SILAN_EXECUTABLE = "silan"
@staticmethod @staticmethod
def analyze(filename, metadata): def analyze(filename, metadata):
''' Extracts the cue-in and cue-out times along and sets the file duration based on that. """Extracts the cue-in and cue-out times along and sets the file duration based on that.
The cue points are there to skip the silence at the start and end of a track, and are determined The cue points are there to skip the silence at the start and end of a track, and are determined
using "silan", which analyzes the loudness in a track. using "silan", which analyzes the loudness in a track.
:param filename: The full path to the file to analyzer :param filename: The full path to the file to analyzer
:param metadata: A metadata dictionary where the results will be put :param metadata: A metadata dictionary where the results will be put
:return: The metadata dictionary :return: The metadata dictionary
''' """
''' The silan -F 0.99 parameter tweaks the highpass filter. The default is 0.98, but at that setting, """ The silan -F 0.99 parameter tweaks the highpass filter. The default is 0.98, but at that setting,
the unit test on the short m4a file fails. With the new setting, it gets the correct cue-in time and the unit test on the short m4a file fails. With the new setting, it gets the correct cue-in time and
all the unit tests pass. all the unit tests pass.
''' """
command = [CuePointAnalyzer.SILAN_EXECUTABLE, '-b', '-F', '0.99', '-f', 'JSON', '-t', '1.0', filename] command = [
CuePointAnalyzer.SILAN_EXECUTABLE,
"-b",
"-F",
"0.99",
"-f",
"JSON",
"-t",
"1.0",
filename,
]
try: try:
results_json = subprocess.check_output(command, stderr=subprocess.STDOUT, close_fds=True) results_json = subprocess.check_output(
command, stderr=subprocess.STDOUT, close_fds=True
)
try: try:
results_json = results_json.decode() results_json = results_json.decode()
except (UnicodeDecodeError, AttributeError): except (UnicodeDecodeError, AttributeError):
@ -35,40 +47,51 @@ class CuePointAnalyzer(Analyzer):
silan_results = json.loads(results_json) silan_results = json.loads(results_json)
# Defensive coding against Silan wildly miscalculating the cue in and out times: # Defensive coding against Silan wildly miscalculating the cue in and out times:
silan_length_seconds = float(silan_results['file duration']) silan_length_seconds = float(silan_results["file duration"])
silan_cuein = format(silan_results['sound'][0][0], 'f') silan_cuein = format(silan_results["sound"][0][0], "f")
silan_cueout = format(silan_results['sound'][0][1], 'f') silan_cueout = format(silan_results["sound"][0][1], "f")
# Sanity check the results against any existing metadata passed to us (presumably extracted by Mutagen): # Sanity check the results against any existing metadata passed to us (presumably extracted by Mutagen):
if 'length_seconds' in metadata: if "length_seconds" in metadata:
# Silan has a rare bug where it can massively overestimate the length or cue out time sometimes. # Silan has a rare bug where it can massively overestimate the length or cue out time sometimes.
if (silan_length_seconds - metadata['length_seconds'] > 3) or (float(silan_cueout) - metadata['length_seconds'] > 2): if (silan_length_seconds - metadata["length_seconds"] > 3) or (
float(silan_cueout) - metadata["length_seconds"] > 2
):
# Don't trust anything silan says then... # Don't trust anything silan says then...
raise Exception("Silan cue out {0} or length {1} differs too much from the Mutagen length {2}. Ignoring Silan values." raise Exception(
.format(silan_cueout, silan_length_seconds, metadata['length_seconds'])) "Silan cue out {0} or length {1} differs too much from the Mutagen length {2}. Ignoring Silan values.".format(
silan_cueout,
silan_length_seconds,
metadata["length_seconds"],
)
)
# Don't allow silan to trim more than the greater of 3 seconds or 5% off the start of a track # Don't allow silan to trim more than the greater of 3 seconds or 5% off the start of a track
if float(silan_cuein) > max(silan_length_seconds*0.05, 3): if float(silan_cuein) > max(silan_length_seconds * 0.05, 3):
raise Exception("Silan cue in time {0} too big, ignoring.".format(silan_cuein)) raise Exception(
"Silan cue in time {0} too big, ignoring.".format(silan_cuein)
)
else: else:
# Only use the Silan track length in the worst case, where Mutagen didn't give us one for some reason. # Only use the Silan track length in the worst case, where Mutagen didn't give us one for some reason.
# (This is mostly to make the unit tests still pass.) # (This is mostly to make the unit tests still pass.)
# Convert the length into a formatted time string. # Convert the length into a formatted time string.
metadata['length_seconds'] = silan_length_seconds # metadata["length_seconds"] = silan_length_seconds #
track_length = datetime.timedelta(seconds=metadata['length_seconds']) track_length = datetime.timedelta(seconds=metadata["length_seconds"])
metadata["length"] = str(track_length) metadata["length"] = str(track_length)
""" XXX: I've commented out the track_length stuff below because Mutagen seems more accurate than silan
''' XXX: I've commented out the track_length stuff below because Mutagen seems more accurate than silan
as of Mutagen version 1.31. We are always going to use Mutagen's length now because Silan's as of Mutagen version 1.31. We are always going to use Mutagen's length now because Silan's
length can be off by a few seconds reasonably often. length can be off by a few seconds reasonably often.
''' """
metadata['cuein'] = silan_cuein metadata["cuein"] = silan_cuein
metadata['cueout'] = silan_cueout metadata["cueout"] = silan_cueout
except OSError as e: # silan was not found except OSError as e: # silan was not found
logging.warn("Failed to run: %s - %s. %s" % (command[0], e.strerror, "Do you have silan installed?")) logging.warn(
except subprocess.CalledProcessError as e: # silan returned an error code "Failed to run: %s - %s. %s"
% (command[0], e.strerror, "Do you have silan installed?")
)
except subprocess.CalledProcessError as e: # silan returned an error code
logging.warn("%s %s %s", e.cmd, e.output, e.returncode) logging.warn("%s %s %s", e.cmd, e.output, e.returncode)
except Exception as e: except Exception as e:
logging.warn(e) logging.warn(e)

View File

@ -9,10 +9,12 @@ import uuid
from .analyzer import Analyzer from .analyzer import Analyzer
class FileMoverAnalyzer(Analyzer): class FileMoverAnalyzer(Analyzer):
"""This analyzer copies a file over from a temporary directory (stor/organize) """This analyzer copies a file over from a temporary directory (stor/organize)
into the Airtime library (stor/imported). into the Airtime library (stor/imported).
""" """
@staticmethod @staticmethod
def analyze(audio_file_path, metadata): def analyze(audio_file_path, metadata):
"""Dummy method because we need more info than analyze gets passed to it""" """Dummy method because we need more info than analyze gets passed to it"""
@ -21,27 +23,38 @@ class FileMoverAnalyzer(Analyzer):
@staticmethod @staticmethod
def move(audio_file_path, import_directory, original_filename, metadata): def move(audio_file_path, import_directory, original_filename, metadata):
"""Move the file at audio_file_path over into the import_directory/import, """Move the file at audio_file_path over into the import_directory/import,
renaming it to original_filename. renaming it to original_filename.
Keyword arguments: Keyword arguments:
audio_file_path: Path to the file to be imported. audio_file_path: Path to the file to be imported.
import_directory: Path to the "import" directory inside the Airtime stor directory. import_directory: Path to the "import" directory inside the Airtime stor directory.
(eg. /srv/airtime/stor/import) (eg. /srv/airtime/stor/import)
original_filename: The filename of the file when it was uploaded to Airtime. original_filename: The filename of the file when it was uploaded to Airtime.
metadata: A dictionary where the "full_path" of where the file is moved to will be added. metadata: A dictionary where the "full_path" of where the file is moved to will be added.
""" """
if not isinstance(audio_file_path, str): if not isinstance(audio_file_path, str):
raise TypeError("audio_file_path must be string. Was of type " + type(audio_file_path).__name__) raise TypeError(
"audio_file_path must be string. Was of type "
+ type(audio_file_path).__name__
)
if not isinstance(import_directory, str): if not isinstance(import_directory, str):
raise TypeError("import_directory must be string. Was of type " + type(import_directory).__name__) raise TypeError(
"import_directory must be string. Was of type "
+ type(import_directory).__name__
)
if not isinstance(original_filename, str): if not isinstance(original_filename, str):
raise TypeError("original_filename must be string. Was of type " + type(original_filename).__name__) raise TypeError(
"original_filename must be string. Was of type "
+ type(original_filename).__name__
)
if not isinstance(metadata, dict): if not isinstance(metadata, dict):
raise TypeError("metadata must be a dict. Was of type " + type(metadata).__name__) raise TypeError(
"metadata must be a dict. Was of type " + type(metadata).__name__
)
if not os.path.exists(audio_file_path): if not os.path.exists(audio_file_path):
raise FileNotFoundError("audio file not found: {}".format(audio_file_path)) raise FileNotFoundError("audio file not found: {}".format(audio_file_path))
#Import the file over to it's final location. # Import the file over to it's final location.
# TODO: Also, handle the case where the move fails and write some code # TODO: Also, handle the case where the move fails and write some code
# to possibly move the file to problem_files. # to possibly move the file to problem_files.
@ -50,52 +63,65 @@ class FileMoverAnalyzer(Analyzer):
final_file_path = import_directory final_file_path = import_directory
orig_file_basename, orig_file_extension = os.path.splitext(original_filename) orig_file_basename, orig_file_extension = os.path.splitext(original_filename)
if "artist_name" in metadata: if "artist_name" in metadata:
final_file_path += "/" + metadata["artist_name"][0:max_dir_len] # truncating with array slicing final_file_path += (
"/" + metadata["artist_name"][0:max_dir_len]
) # truncating with array slicing
if "album_title" in metadata: if "album_title" in metadata:
final_file_path += "/" + metadata["album_title"][0:max_dir_len] final_file_path += "/" + metadata["album_title"][0:max_dir_len]
# Note that orig_file_extension includes the "." already # Note that orig_file_extension includes the "." already
final_file_path += "/" + orig_file_basename[0:max_file_len] + orig_file_extension final_file_path += (
"/" + orig_file_basename[0:max_file_len] + orig_file_extension
)
#Ensure any redundant slashes are stripped # Ensure any redundant slashes are stripped
final_file_path = os.path.normpath(final_file_path) final_file_path = os.path.normpath(final_file_path)
#If a file with the same name already exists in the "import" directory, then # If a file with the same name already exists in the "import" directory, then
#we add a unique string to the end of this one. We never overwrite a file on import # we add a unique string to the end of this one. We never overwrite a file on import
#because if we did that, it would mean Airtime's database would have # because if we did that, it would mean Airtime's database would have
#the wrong information for the file we just overwrote (eg. the song length would be wrong!) # the wrong information for the file we just overwrote (eg. the song length would be wrong!)
#If the final file path is the same as the file we've been told to import (which # If the final file path is the same as the file we've been told to import (which
#you often do when you're debugging), then don't move the file at all. # you often do when you're debugging), then don't move the file at all.
if os.path.exists(final_file_path): if os.path.exists(final_file_path):
if os.path.samefile(audio_file_path, final_file_path): if os.path.samefile(audio_file_path, final_file_path):
metadata["full_path"] = final_file_path metadata["full_path"] = final_file_path
return metadata return metadata
base_file_path, file_extension = os.path.splitext(final_file_path) base_file_path, file_extension = os.path.splitext(final_file_path)
final_file_path = "%s_%s%s" % (base_file_path, time.strftime("%m-%d-%Y-%H-%M-%S", time.localtime()), file_extension) final_file_path = "%s_%s%s" % (
base_file_path,
time.strftime("%m-%d-%Y-%H-%M-%S", time.localtime()),
file_extension,
)
#If THAT path exists, append a UUID instead: # If THAT path exists, append a UUID instead:
while os.path.exists(final_file_path): while os.path.exists(final_file_path):
base_file_path, file_extension = os.path.splitext(final_file_path) base_file_path, file_extension = os.path.splitext(final_file_path)
final_file_path = "%s_%s%s" % (base_file_path, str(uuid.uuid4()), file_extension) final_file_path = "%s_%s%s" % (
base_file_path,
str(uuid.uuid4()),
file_extension,
)
#Ensure the full path to the file exists # Ensure the full path to the file exists
mkdir_p(os.path.dirname(final_file_path)) mkdir_p(os.path.dirname(final_file_path))
#Move the file into its final destination directory # Move the file into its final destination directory
logging.debug("Moving %s to %s" % (audio_file_path, final_file_path)) logging.debug("Moving %s to %s" % (audio_file_path, final_file_path))
shutil.move(audio_file_path, final_file_path) shutil.move(audio_file_path, final_file_path)
metadata["full_path"] = final_file_path metadata["full_path"] = final_file_path
return metadata return metadata
def mkdir_p(path): def mkdir_p(path):
""" Make all directories in a tree (like mkdir -p)""" """Make all directories in a tree (like mkdir -p)"""
if path == "": if path == "":
return return
try: try:
os.makedirs(path) os.makedirs(path)
except OSError as exc: # Python >2.5 except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path): if exc.errno == errno.EEXIST and os.path.isdir(path):
pass pass
else: raise else:
raise

View File

@ -5,8 +5,8 @@ import json
import time import time
import select import select
import signal import signal
import logging import logging
import multiprocessing import multiprocessing
import queue import queue
from .analyzer_pipeline import AnalyzerPipeline from .analyzer_pipeline import AnalyzerPipeline
from .status_reporter import StatusReporter from .status_reporter import StatusReporter
@ -54,29 +54,30 @@ QUEUE = "airtime-uploads"
So that is a quick overview of the design constraints for this application, and So that is a quick overview of the design constraints for this application, and
why airtime_analyzer is written this way. why airtime_analyzer is written this way.
""" """
class MessageListener:
class MessageListener:
def __init__(self, rmq_config): def __init__(self, rmq_config):
''' Start listening for file upload notification messages """Start listening for file upload notification messages
from RabbitMQ from RabbitMQ
Keyword arguments: Keyword arguments:
rmq_config: A ConfigParser object containing the [rabbitmq] configuration. rmq_config: A ConfigParser object containing the [rabbitmq] configuration.
''' """
self._shutdown = False self._shutdown = False
# Read the RabbitMQ connection settings from the rmq_config file # Read the RabbitMQ connection settings from the rmq_config file
# The exceptions throw here by default give good error messages. # The exceptions throw here by default give good error messages.
RMQ_CONFIG_SECTION = "rabbitmq" RMQ_CONFIG_SECTION = "rabbitmq"
self._host = rmq_config.get(RMQ_CONFIG_SECTION, 'host') self._host = rmq_config.get(RMQ_CONFIG_SECTION, "host")
self._port = rmq_config.getint(RMQ_CONFIG_SECTION, 'port') self._port = rmq_config.getint(RMQ_CONFIG_SECTION, "port")
self._username = rmq_config.get(RMQ_CONFIG_SECTION, 'user') self._username = rmq_config.get(RMQ_CONFIG_SECTION, "user")
self._password = rmq_config.get(RMQ_CONFIG_SECTION, 'password') self._password = rmq_config.get(RMQ_CONFIG_SECTION, "password")
self._vhost = rmq_config.get(RMQ_CONFIG_SECTION, 'vhost') self._vhost = rmq_config.get(RMQ_CONFIG_SECTION, "vhost")
# Set up a signal handler so we can shutdown gracefully # Set up a signal handler so we can shutdown gracefully
# For some reason, this signal handler must be set up here. I'd rather # For some reason, this signal handler must be set up here. I'd rather
# put it in AirtimeAnalyzerServer, but it doesn't work there (something to do # put it in AirtimeAnalyzerServer, but it doesn't work there (something to do
# with pika's SIGTERM handler interfering with it, I think...) # with pika's SIGTERM handler interfering with it, I think...)
signal.signal(signal.SIGTERM, self.graceful_shutdown) signal.signal(signal.SIGTERM, self.graceful_shutdown)
@ -86,9 +87,9 @@ class MessageListener:
self.connect_to_messaging_server() self.connect_to_messaging_server()
self.wait_for_messages() self.wait_for_messages()
except (KeyboardInterrupt, SystemExit): except (KeyboardInterrupt, SystemExit):
break # Break out of the while loop and exit the application break # Break out of the while loop and exit the application
except select.error: except select.error:
pass pass
except pika.exceptions.AMQPError as e: except pika.exceptions.AMQPError as e:
if self._shutdown: if self._shutdown:
break break
@ -100,27 +101,37 @@ class MessageListener:
self.disconnect_from_messaging_server() self.disconnect_from_messaging_server()
logging.info("Exiting cleanly.") logging.info("Exiting cleanly.")
def connect_to_messaging_server(self): def connect_to_messaging_server(self):
'''Connect to the RabbitMQ server and start listening for messages.''' """Connect to the RabbitMQ server and start listening for messages."""
self._connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._host, self._connection = pika.BlockingConnection(
port=self._port, virtual_host=self._vhost, pika.ConnectionParameters(
credentials=pika.credentials.PlainCredentials(self._username, self._password))) host=self._host,
port=self._port,
virtual_host=self._vhost,
credentials=pika.credentials.PlainCredentials(
self._username, self._password
),
)
)
self._channel = self._connection.channel() self._channel = self._connection.channel()
self._channel.exchange_declare(exchange=EXCHANGE, exchange_type=EXCHANGE_TYPE, durable=True) self._channel.exchange_declare(
exchange=EXCHANGE, exchange_type=EXCHANGE_TYPE, durable=True
)
result = self._channel.queue_declare(queue=QUEUE, durable=True) result = self._channel.queue_declare(queue=QUEUE, durable=True)
self._channel.queue_bind(exchange=EXCHANGE, queue=QUEUE, routing_key=ROUTING_KEY) self._channel.queue_bind(
exchange=EXCHANGE, queue=QUEUE, routing_key=ROUTING_KEY
)
logging.info(" Listening for messages...") logging.info(" Listening for messages...")
self._channel.basic_consume(QUEUE, self.msg_received_callback, auto_ack=False) self._channel.basic_consume(QUEUE, self.msg_received_callback, auto_ack=False)
def wait_for_messages(self): def wait_for_messages(self):
'''Wait until we've received a RabbitMQ message.''' """Wait until we've received a RabbitMQ message."""
self._channel.start_consuming() self._channel.start_consuming()
def disconnect_from_messaging_server(self): def disconnect_from_messaging_server(self):
'''Stop consuming RabbitMQ messages and disconnect''' """Stop consuming RabbitMQ messages and disconnect"""
# If you try to close a connection that's already closed, you're going to have a bad time. # If you try to close a connection that's already closed, you're going to have a bad time.
# We're breaking EAFP because this can be called multiple times depending on exception # We're breaking EAFP because this can be called multiple times depending on exception
# handling flow here. # handling flow here.
@ -128,43 +139,45 @@ class MessageListener:
self._channel.stop_consuming() self._channel.stop_consuming()
if not self._connection.is_closed and not self._connection.is_closing: if not self._connection.is_closed and not self._connection.is_closing:
self._connection.close() self._connection.close()
def graceful_shutdown(self, signum, frame): def graceful_shutdown(self, signum, frame):
'''Disconnect and break out of the message listening loop''' """Disconnect and break out of the message listening loop"""
self._shutdown = True self._shutdown = True
self.disconnect_from_messaging_server() self.disconnect_from_messaging_server()
def msg_received_callback(self, channel, method_frame, header_frame, body): def msg_received_callback(self, channel, method_frame, header_frame, body):
''' A callback method that runs when a RabbitMQ message is received. """A callback method that runs when a RabbitMQ message is received.
Here we parse the message, spin up an analyzer process, and report the Here we parse the message, spin up an analyzer process, and report the
metadata back to the Airtime web application (or report an error). metadata back to the Airtime web application (or report an error).
''' """
logging.info(" - Received '%s' on routing_key '%s'" % (body, method_frame.routing_key)) logging.info(
" - Received '%s' on routing_key '%s'" % (body, method_frame.routing_key)
#Declare all variables here so they exist in the exception handlers below, no matter what. )
# Declare all variables here so they exist in the exception handlers below, no matter what.
audio_file_path = "" audio_file_path = ""
#final_file_path = "" # final_file_path = ""
import_directory = "" import_directory = ""
original_filename = "" original_filename = ""
callback_url = "" callback_url = ""
api_key = "" api_key = ""
file_prefix = "" file_prefix = ""
''' Spin up a worker process. We use the multiprocessing module and multiprocessing.Queue """ Spin up a worker process. We use the multiprocessing module and multiprocessing.Queue
to pass objects between the processes so that if the analyzer process crashes, it does not to pass objects between the processes so that if the analyzer process crashes, it does not
take down the rest of the daemon and we NACK that message so that it doesn't get take down the rest of the daemon and we NACK that message so that it doesn't get
propagated to other airtime_analyzer daemons (eg. running on other servers). propagated to other airtime_analyzer daemons (eg. running on other servers).
We avoid cascading failure this way. We avoid cascading failure this way.
''' """
try: try:
try: try:
body = body.decode() body = body.decode()
except (UnicodeDecodeError, AttributeError): except (UnicodeDecodeError, AttributeError):
pass pass
msg_dict = json.loads(body) msg_dict = json.loads(body)
api_key = msg_dict["api_key"] api_key = msg_dict["api_key"]
callback_url = msg_dict["callback_url"] callback_url = msg_dict["callback_url"]
audio_file_path = msg_dict["tmp_file_path"] audio_file_path = msg_dict["tmp_file_path"]
import_directory = msg_dict["import_directory"] import_directory = msg_dict["import_directory"]
@ -172,48 +185,71 @@ class MessageListener:
file_prefix = msg_dict["file_prefix"] file_prefix = msg_dict["file_prefix"]
storage_backend = msg_dict["storage_backend"] storage_backend = msg_dict["storage_backend"]
audio_metadata = MessageListener.spawn_analyzer_process(audio_file_path, import_directory, original_filename, storage_backend, file_prefix) audio_metadata = MessageListener.spawn_analyzer_process(
StatusReporter.report_success_to_callback_url(callback_url, api_key, audio_metadata) audio_file_path,
import_directory,
original_filename,
storage_backend,
file_prefix,
)
StatusReporter.report_success_to_callback_url(
callback_url, api_key, audio_metadata
)
except KeyError as e: except KeyError as e:
# A field in msg_dict that we needed was missing (eg. audio_file_path) # A field in msg_dict that we needed was missing (eg. audio_file_path)
logging.exception("A mandatory airtime_analyzer message field was missing from the message.") logging.exception(
"A mandatory airtime_analyzer message field was missing from the message."
)
# See the huge comment about NACK below. # See the huge comment about NACK below.
channel.basic_nack(delivery_tag=method_frame.delivery_tag, multiple=False, channel.basic_nack(
requeue=False) #Important that it doesn't requeue the message delivery_tag=method_frame.delivery_tag, multiple=False, requeue=False
) # Important that it doesn't requeue the message
except Exception as e: except Exception as e:
logging.exception(e) logging.exception(e)
''' If ANY exception happens while processing a file, we're going to NACK to the """ If ANY exception happens while processing a file, we're going to NACK to the
messaging server and tell it to remove the message from the queue. messaging server and tell it to remove the message from the queue.
(NACK is a negative acknowledgement. We could use ACK instead, but this might come (NACK is a negative acknowledgement. We could use ACK instead, but this might come
in handy in the future.) in handy in the future.)
Exceptions in this context are unexpected, unhandled errors. We try to recover Exceptions in this context are unexpected, unhandled errors. We try to recover
from as many errors as possible in AnalyzerPipeline, but we're safeguarding ourselves from as many errors as possible in AnalyzerPipeline, but we're safeguarding ourselves
here from any catastrophic or genuinely unexpected errors: here from any catastrophic or genuinely unexpected errors:
''' """
channel.basic_nack(delivery_tag=method_frame.delivery_tag, multiple=False, channel.basic_nack(
requeue=False) #Important that it doesn't requeue the message delivery_tag=method_frame.delivery_tag, multiple=False, requeue=False
) # Important that it doesn't requeue the message
# #
# TODO: If the JSON was invalid or the web server is down, # TODO: If the JSON was invalid or the web server is down,
# then don't report that failure to the REST API # then don't report that failure to the REST API
#TODO: Catch exceptions from this HTTP request too: # TODO: Catch exceptions from this HTTP request too:
if callback_url: # If we got an invalid message, there might be no callback_url in the JSON if (
callback_url
): # If we got an invalid message, there might be no callback_url in the JSON
# Report this as a failed upload to the File Upload REST API. # Report this as a failed upload to the File Upload REST API.
StatusReporter.report_failure_to_callback_url(callback_url, api_key, import_status=2, StatusReporter.report_failure_to_callback_url(
reason='An error occurred while importing this file') callback_url,
api_key,
import_status=2,
reason="An error occurred while importing this file",
)
else: else:
# ACK at the very end, after the message has been successfully processed. # ACK at the very end, after the message has been successfully processed.
# If we don't ack, then RabbitMQ will redeliver the message in the future. # If we don't ack, then RabbitMQ will redeliver the message in the future.
channel.basic_ack(delivery_tag=method_frame.delivery_tag) channel.basic_ack(delivery_tag=method_frame.delivery_tag)
@staticmethod @staticmethod
def spawn_analyzer_process(audio_file_path, import_directory, original_filename, storage_backend, file_prefix): def spawn_analyzer_process(
''' Spawn a child process to analyze and import a new audio file. ''' audio_file_path,
''' import_directory,
original_filename,
storage_backend,
file_prefix,
):
"""Spawn a child process to analyze and import a new audio file."""
"""
q = multiprocessing.Queue() q = multiprocessing.Queue()
p = multiprocessing.Process(target=AnalyzerPipeline.run_analysis, p = multiprocessing.Process(target=AnalyzerPipeline.run_analysis,
args=(q, audio_file_path, import_directory, original_filename, storage_backend, file_prefix)) args=(q, audio_file_path, import_directory, original_filename, storage_backend, file_prefix))
@ -225,12 +261,19 @@ class MessageListener:
logging.info(results) logging.info(results)
else: else:
raise Exception("Analyzer process terminated unexpectedly.") raise Exception("Analyzer process terminated unexpectedly.")
''' """
metadata = {} metadata = {}
q = queue.Queue() q = queue.Queue()
try: try:
AnalyzerPipeline.run_analysis(q, audio_file_path, import_directory, original_filename, storage_backend, file_prefix) AnalyzerPipeline.run_analysis(
q,
audio_file_path,
import_directory,
original_filename,
storage_backend,
file_prefix,
)
metadata = q.get() metadata = q.get()
except Exception as e: except Exception as e:
logging.error("Analyzer pipeline exception: %s" % str(e)) logging.error("Analyzer pipeline exception: %s" % str(e))
@ -241,4 +284,3 @@ class MessageListener:
q.get() q.get()
return metadata return metadata

View File

@ -9,32 +9,36 @@ import os
import hashlib import hashlib
from .analyzer import Analyzer from .analyzer import Analyzer
class MetadataAnalyzer(Analyzer):
class MetadataAnalyzer(Analyzer):
@staticmethod @staticmethod
def analyze(filename, metadata): def analyze(filename, metadata):
''' Extract audio metadata from tags embedded in the file (eg. ID3 tags) """Extract audio metadata from tags embedded in the file (eg. ID3 tags)
Keyword arguments: Keyword arguments:
filename: The path to the audio file to extract metadata from. filename: The path to the audio file to extract metadata from.
metadata: A dictionary that the extracted metadata will be added to. metadata: A dictionary that the extracted metadata will be added to.
''' """
if not isinstance(filename, str): if not isinstance(filename, str):
raise TypeError("filename must be string. Was of type " + type(filename).__name__) raise TypeError(
"filename must be string. Was of type " + type(filename).__name__
)
if not isinstance(metadata, dict): if not isinstance(metadata, dict):
raise TypeError("metadata must be a dict. Was of type " + type(metadata).__name__) raise TypeError(
"metadata must be a dict. Was of type " + type(metadata).__name__
)
if not os.path.exists(filename): if not os.path.exists(filename):
raise FileNotFoundError("audio file not found: {}".format(filename)) raise FileNotFoundError("audio file not found: {}".format(filename))
#Airtime <= 2.5.x nonsense: # Airtime <= 2.5.x nonsense:
metadata["ftype"] = "audioclip" metadata["ftype"] = "audioclip"
#Other fields we'll want to set for Airtime: # Other fields we'll want to set for Airtime:
metadata["hidden"] = False metadata["hidden"] = False
# Get file size and md5 hash of the file # Get file size and md5 hash of the file
metadata["filesize"] = os.path.getsize(filename) metadata["filesize"] = os.path.getsize(filename)
with open(filename, 'rb') as fh: with open(filename, "rb") as fh:
m = hashlib.md5() m = hashlib.md5()
while True: while True:
data = fh.read(8192) data = fh.read(8192)
@ -46,37 +50,41 @@ class MetadataAnalyzer(Analyzer):
# Mutagen doesn't handle WAVE files so we use a different package # Mutagen doesn't handle WAVE files so we use a different package
ms = magic.open(magic.MIME_TYPE) ms = magic.open(magic.MIME_TYPE)
ms.load() ms.load()
with open(filename, 'rb') as fh: with open(filename, "rb") as fh:
mime_check = ms.buffer(fh.read(2014)) mime_check = ms.buffer(fh.read(2014))
metadata["mime"] = mime_check metadata["mime"] = mime_check
if mime_check == 'audio/x-wav': if mime_check == "audio/x-wav":
return MetadataAnalyzer._analyze_wave(filename, metadata) return MetadataAnalyzer._analyze_wave(filename, metadata)
#Extract metadata from an audio file using mutagen # Extract metadata from an audio file using mutagen
audio_file = mutagen.File(filename, easy=True) audio_file = mutagen.File(filename, easy=True)
#Bail if the file couldn't be parsed. The title should stay as the filename # Bail if the file couldn't be parsed. The title should stay as the filename
#inside Airtime. # inside Airtime.
if audio_file == None: # Don't use "if not" here. It is wrong due to mutagen's design. if (
audio_file == None
): # Don't use "if not" here. It is wrong due to mutagen's design.
return metadata return metadata
# Note that audio_file can equal {} if the file is valid but there's no metadata tags. # Note that audio_file can equal {} if the file is valid but there's no metadata tags.
# We can still try to grab the info variables below. # We can still try to grab the info variables below.
#Grab other file information that isn't encoded in a tag, but instead usually # Grab other file information that isn't encoded in a tag, but instead usually
#in the file header. Mutagen breaks that out into a separate "info" object: # in the file header. Mutagen breaks that out into a separate "info" object:
info = audio_file.info info = audio_file.info
if hasattr(info, "sample_rate"): # Mutagen is annoying and inconsistent if hasattr(info, "sample_rate"): # Mutagen is annoying and inconsistent
metadata["sample_rate"] = info.sample_rate metadata["sample_rate"] = info.sample_rate
if hasattr(info, "length"): if hasattr(info, "length"):
metadata["length_seconds"] = info.length metadata["length_seconds"] = info.length
#Converting the length in seconds (float) to a formatted time string # Converting the length in seconds (float) to a formatted time string
track_length = datetime.timedelta(seconds=info.length) track_length = datetime.timedelta(seconds=info.length)
metadata["length"] = str(track_length) #time.strftime("%H:%M:%S.%f", track_length) metadata["length"] = str(
track_length
) # time.strftime("%H:%M:%S.%f", track_length)
# Other fields for Airtime # Other fields for Airtime
metadata["cueout"] = metadata["length"] metadata["cueout"] = metadata["length"]
# Set a default cue in time in seconds # Set a default cue in time in seconds
metadata["cuein"] = 0.0; metadata["cuein"] = 0.0
if hasattr(info, "bitrate"): if hasattr(info, "bitrate"):
metadata["bit_rate"] = info.bitrate metadata["bit_rate"] = info.bitrate
@ -86,11 +94,11 @@ class MetadataAnalyzer(Analyzer):
if audio_file.mime: if audio_file.mime:
metadata["mime"] = audio_file.mime[0] metadata["mime"] = audio_file.mime[0]
#Try to get the number of channels if mutagen can... # Try to get the number of channels if mutagen can...
try: try:
#Special handling for getting the # of channels from MP3s. It's in the "mode" field # Special handling for getting the # of channels from MP3s. It's in the "mode" field
#which is 0=Stereo, 1=Joint Stereo, 2=Dual Channel, 3=Mono. Part of the ID3 spec... # which is 0=Stereo, 1=Joint Stereo, 2=Dual Channel, 3=Mono. Part of the ID3 spec...
if metadata["mime"] in ["audio/mpeg", 'audio/mp3']: if metadata["mime"] in ["audio/mpeg", "audio/mp3"]:
if info.mode == 3: if info.mode == 3:
metadata["channels"] = 1 metadata["channels"] = 1
else: else:
@ -98,54 +106,54 @@ class MetadataAnalyzer(Analyzer):
else: else:
metadata["channels"] = info.channels metadata["channels"] = info.channels
except (AttributeError, KeyError): except (AttributeError, KeyError):
#If mutagen can't figure out the number of channels, we'll just leave it out... # If mutagen can't figure out the number of channels, we'll just leave it out...
pass pass
#Try to extract the number of tracks on the album if we can (the "track total") # Try to extract the number of tracks on the album if we can (the "track total")
try: try:
track_number = audio_file["tracknumber"] track_number = audio_file["tracknumber"]
if isinstance(track_number, list): # Sometimes tracknumber is a list, ugh if isinstance(track_number, list): # Sometimes tracknumber is a list, ugh
track_number = track_number[0] track_number = track_number[0]
track_number_tokens = track_number track_number_tokens = track_number
if '/' in track_number: if "/" in track_number:
track_number_tokens = track_number.split('/') track_number_tokens = track_number.split("/")
track_number = track_number_tokens[0] track_number = track_number_tokens[0]
elif '-' in track_number: elif "-" in track_number:
track_number_tokens = track_number.split('-') track_number_tokens = track_number.split("-")
track_number = track_number_tokens[0] track_number = track_number_tokens[0]
metadata["track_number"] = track_number metadata["track_number"] = track_number
track_total = track_number_tokens[1] track_total = track_number_tokens[1]
metadata["track_total"] = track_total metadata["track_total"] = track_total
except (AttributeError, KeyError, IndexError): except (AttributeError, KeyError, IndexError):
#If we couldn't figure out the track_number or track_total, just ignore it... # If we couldn't figure out the track_number or track_total, just ignore it...
pass pass
#We normalize the mutagen tags slightly here, so in case mutagen changes, # We normalize the mutagen tags slightly here, so in case mutagen changes,
#we find the # we find the
mutagen_to_airtime_mapping = { mutagen_to_airtime_mapping = {
'title': 'track_title', "title": "track_title",
'artist': 'artist_name', "artist": "artist_name",
'album': 'album_title', "album": "album_title",
'bpm': 'bpm', "bpm": "bpm",
'composer': 'composer', "composer": "composer",
'conductor': 'conductor', "conductor": "conductor",
'copyright': 'copyright', "copyright": "copyright",
'comment': 'comment', "comment": "comment",
'encoded_by': 'encoder', "encoded_by": "encoder",
'genre': 'genre', "genre": "genre",
'isrc': 'isrc', "isrc": "isrc",
'label': 'label', "label": "label",
'organization': 'label', "organization": "label",
#'length': 'length', #'length': 'length',
'language': 'language', "language": "language",
'last_modified':'last_modified', "last_modified": "last_modified",
'mood': 'mood', "mood": "mood",
'bit_rate': 'bit_rate', "bit_rate": "bit_rate",
'replay_gain': 'replaygain', "replay_gain": "replaygain",
#'tracknumber': 'track_number', #'tracknumber': 'track_number',
#'track_total': 'track_total', #'track_total': 'track_total',
'website': 'website', "website": "website",
'date': 'year', "date": "year",
#'mime_type': 'mime', #'mime_type': 'mime',
} }
@ -158,7 +166,7 @@ class MetadataAnalyzer(Analyzer):
if isinstance(metadata[airtime_tag], list): if isinstance(metadata[airtime_tag], list):
if metadata[airtime_tag]: if metadata[airtime_tag]:
metadata[airtime_tag] = metadata[airtime_tag][0] metadata[airtime_tag] = metadata[airtime_tag][0]
else: # Handle empty lists else: # Handle empty lists
metadata[airtime_tag] = "" metadata[airtime_tag] = ""
except KeyError: except KeyError:
@ -169,13 +177,15 @@ class MetadataAnalyzer(Analyzer):
@staticmethod @staticmethod
def _analyze_wave(filename, metadata): def _analyze_wave(filename, metadata):
try: try:
reader = wave.open(filename, 'rb') reader = wave.open(filename, "rb")
metadata["channels"] = reader.getnchannels() metadata["channels"] = reader.getnchannels()
metadata["sample_rate"] = reader.getframerate() metadata["sample_rate"] = reader.getframerate()
length_seconds = float(reader.getnframes()) / float(metadata["sample_rate"]) length_seconds = float(reader.getnframes()) / float(metadata["sample_rate"])
#Converting the length in seconds (float) to a formatted time string # Converting the length in seconds (float) to a formatted time string
track_length = datetime.timedelta(seconds=length_seconds) track_length = datetime.timedelta(seconds=length_seconds)
metadata["length"] = str(track_length) #time.strftime("%H:%M:%S.%f", track_length) metadata["length"] = str(
track_length
) # time.strftime("%H:%M:%S.%f", track_length)
metadata["length_seconds"] = length_seconds metadata["length_seconds"] = length_seconds
metadata["cueout"] = metadata["length"] metadata["cueout"] = metadata["length"]
except wave.Error as ex: except wave.Error as ex:

View File

@ -1,32 +1,47 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
__author__ = 'asantoni' __author__ = "asantoni"
import subprocess import subprocess
import logging import logging
from .analyzer import Analyzer from .analyzer import Analyzer
class UnplayableFileError(Exception): class UnplayableFileError(Exception):
pass pass
class PlayabilityAnalyzer(Analyzer):
''' This class checks if a file can actually be played with Liquidsoap. '''
LIQUIDSOAP_EXECUTABLE = 'liquidsoap' class PlayabilityAnalyzer(Analyzer):
"""This class checks if a file can actually be played with Liquidsoap."""
LIQUIDSOAP_EXECUTABLE = "liquidsoap"
@staticmethod @staticmethod
def analyze(filename, metadata): def analyze(filename, metadata):
''' Checks if a file can be played by Liquidsoap. """Checks if a file can be played by Liquidsoap.
:param filename: The full path to the file to analyzer :param filename: The full path to the file to analyzer
:param metadata: A metadata dictionary where the results will be put :param metadata: A metadata dictionary where the results will be put
:return: The metadata dictionary :return: The metadata dictionary
''' """
command = [PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE, '-v', '-c', "output.dummy(audio_to_stereo(single(argv(1))))", '--', filename] command = [
PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE,
"-v",
"-c",
"output.dummy(audio_to_stereo(single(argv(1))))",
"--",
filename,
]
try: try:
subprocess.check_output(command, stderr=subprocess.STDOUT, close_fds=True) subprocess.check_output(command, stderr=subprocess.STDOUT, close_fds=True)
except OSError as e: # liquidsoap was not found except OSError as e: # liquidsoap was not found
logging.warn("Failed to run: %s - %s. %s" % (command[0], e.strerror, "Do you have liquidsoap installed?")) logging.warn(
except (subprocess.CalledProcessError, Exception) as e: # liquidsoap returned an error code "Failed to run: %s - %s. %s"
% (command[0], e.strerror, "Do you have liquidsoap installed?")
)
except (
subprocess.CalledProcessError,
Exception,
) as e: # liquidsoap returned an error code
logging.warn(e) logging.warn(e)
raise UnplayableFileError() raise UnplayableFileError()

View File

@ -6,30 +6,39 @@ import re
class ReplayGainAnalyzer(Analyzer): class ReplayGainAnalyzer(Analyzer):
''' This class extracts the ReplayGain using a tool from the python-rgain package. ''' """This class extracts the ReplayGain using a tool from the python-rgain package."""
REPLAYGAIN_EXECUTABLE = 'replaygain' # From the rgain3 python package REPLAYGAIN_EXECUTABLE = "replaygain" # From the rgain3 python package
@staticmethod @staticmethod
def analyze(filename, metadata): def analyze(filename, metadata):
''' Extracts the Replaygain loudness normalization factor of a track. """Extracts the Replaygain loudness normalization factor of a track.
:param filename: The full path to the file to analyzer :param filename: The full path to the file to analyzer
:param metadata: A metadata dictionary where the results will be put :param metadata: A metadata dictionary where the results will be put
:return: The metadata dictionary :return: The metadata dictionary
''' """
''' The -d flag means do a dry-run, ie. don't modify the file directly. """ The -d flag means do a dry-run, ie. don't modify the file directly.
''' """
command = [ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE, '-d', filename] command = [ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE, "-d", filename]
try: try:
results = subprocess.check_output(command, stderr=subprocess.STDOUT, results = subprocess.check_output(
close_fds=True, universal_newlines=True) command,
gain_match = r'Calculating Replay Gain information \.\.\.(?:\n|.)*?:([\d.-]*) dB' stderr=subprocess.STDOUT,
close_fds=True,
universal_newlines=True,
)
gain_match = (
r"Calculating Replay Gain information \.\.\.(?:\n|.)*?:([\d.-]*) dB"
)
replaygain = re.search(gain_match, results).group(1) replaygain = re.search(gain_match, results).group(1)
metadata['replay_gain'] = float(replaygain) metadata["replay_gain"] = float(replaygain)
except OSError as e: # replaygain was not found except OSError as e: # replaygain was not found
logging.warn("Failed to run: %s - %s. %s" % (command[0], e.strerror, "Do you have python-rgain installed?")) logging.warn(
except subprocess.CalledProcessError as e: # replaygain returned an error code "Failed to run: %s - %s. %s"
% (command[0], e.strerror, "Do you have python-rgain installed?")
)
except subprocess.CalledProcessError as e: # replaygain returned an error code
logging.warn("%s %s %s", e.cmd, e.output, e.returncode) logging.warn("%s %s %s", e.cmd, e.output, e.returncode)
except Exception as e: except Exception as e:
logging.warn(e) logging.warn(e)

View File

@ -7,14 +7,15 @@ import queue
import time import time
import traceback import traceback
import pickle import pickle
import threading import threading
from urllib.parse import urlparse from urllib.parse import urlparse
# Disable urllib3 warnings because these can cause a rare deadlock due to Python 2's crappy internal non-reentrant locking # Disable urllib3 warnings because these can cause a rare deadlock due to Python 2's crappy internal non-reentrant locking
# around POSIX stuff. See SAAS-714. The hasattr() is for compatibility with older versions of requests. # around POSIX stuff. See SAAS-714. The hasattr() is for compatibility with older versions of requests.
if hasattr(requests, 'packages'): if hasattr(requests, "packages"):
requests.packages.urllib3.disable_warnings() requests.packages.urllib3.disable_warnings()
class PicklableHttpRequest: class PicklableHttpRequest:
def __init__(self, method, url, data, api_key): def __init__(self, method, url, data, api_key):
self.method = method self.method = method
@ -23,18 +24,23 @@ class PicklableHttpRequest:
self.api_key = api_key self.api_key = api_key
def create_request(self): def create_request(self):
return requests.Request(method=self.method, url=self.url, data=self.data, return requests.Request(
auth=requests.auth.HTTPBasicAuth(self.api_key, '')) method=self.method,
url=self.url,
data=self.data,
auth=requests.auth.HTTPBasicAuth(self.api_key, ""),
)
def process_http_requests(ipc_queue, http_retry_queue_path): def process_http_requests(ipc_queue, http_retry_queue_path):
''' Runs in a separate thread and performs all the HTTP requests where we're """Runs in a separate thread and performs all the HTTP requests where we're
reporting extracted audio file metadata or errors back to the Airtime web application. reporting extracted audio file metadata or errors back to the Airtime web application.
This process also checks every 5 seconds if there's failed HTTP requests that we This process also checks every 5 seconds if there's failed HTTP requests that we
need to retry. We retry failed HTTP requests so that we don't lose uploads if the need to retry. We retry failed HTTP requests so that we don't lose uploads if the
web server is temporarily down. web server is temporarily down.
''' """
# Store any failed requests (eg. due to web server errors or downtime) to be # Store any failed requests (eg. due to web server errors or downtime) to be
# retried later: # retried later:
@ -45,7 +51,7 @@ def process_http_requests(ipc_queue, http_retry_queue_path):
# if airtime_analyzer is shut down while the web server is down or unreachable, # if airtime_analyzer is shut down while the web server is down or unreachable,
# and there were failed HTTP requests pending, waiting to be retried. # and there were failed HTTP requests pending, waiting to be retried.
try: try:
with open(http_retry_queue_path, 'rb') as pickle_file: with open(http_retry_queue_path, "rb") as pickle_file:
retry_queue = pickle.load(pickle_file) retry_queue = pickle.load(pickle_file)
except IOError as e: except IOError as e:
if e.errno == 2: if e.errno == 2:
@ -64,11 +70,16 @@ def process_http_requests(ipc_queue, http_retry_queue_path):
while not shutdown: while not shutdown:
try: try:
request = ipc_queue.get(block=True, timeout=5) request = ipc_queue.get(block=True, timeout=5)
if isinstance(request, str) and request == "shutdown": # Bit of a cheat if (
isinstance(request, str) and request == "shutdown"
): # Bit of a cheat
shutdown = True shutdown = True
break break
if not isinstance(request, PicklableHttpRequest): if not isinstance(request, PicklableHttpRequest):
raise TypeError("request must be a PicklableHttpRequest. Was of type " + type(request).__name__) raise TypeError(
"request must be a PicklableHttpRequest. Was of type "
+ type(request).__name__
)
except queue.Empty: except queue.Empty:
request = None request = None
@ -85,32 +96,40 @@ def process_http_requests(ipc_queue, http_retry_queue_path):
logging.info("Shutting down status_reporter") logging.info("Shutting down status_reporter")
# Pickle retry_queue to disk so that we don't lose uploads if we're shut down while # Pickle retry_queue to disk so that we don't lose uploads if we're shut down while
# while the web server is down or unreachable. # while the web server is down or unreachable.
with open(http_retry_queue_path, 'wb') as pickle_file: with open(http_retry_queue_path, "wb") as pickle_file:
pickle.dump(retry_queue, pickle_file) pickle.dump(retry_queue, pickle_file)
return return
except Exception as e: # Terrible top-level exception handler to prevent the thread from dying, just in case. except Exception as e: # Terrible top-level exception handler to prevent the thread from dying, just in case.
if shutdown: if shutdown:
return return
logging.exception("Unhandled exception in StatusReporter") logging.exception("Unhandled exception in StatusReporter")
logging.exception(e) logging.exception(e)
logging.info("Restarting StatusReporter thread") logging.info("Restarting StatusReporter thread")
time.sleep(2) # Throttle it time.sleep(2) # Throttle it
def send_http_request(picklable_request, retry_queue): def send_http_request(picklable_request, retry_queue):
if not isinstance(picklable_request, PicklableHttpRequest): if not isinstance(picklable_request, PicklableHttpRequest):
raise TypeError("picklable_request must be a PicklableHttpRequest. Was of type " + type(picklable_request).__name__) raise TypeError(
try: "picklable_request must be a PicklableHttpRequest. Was of type "
+ type(picklable_request).__name__
)
try:
bare_request = picklable_request.create_request() bare_request = picklable_request.create_request()
s = requests.Session() s = requests.Session()
prepared_request = s.prepare_request(bare_request) prepared_request = s.prepare_request(bare_request)
r = s.send(prepared_request, timeout=StatusReporter._HTTP_REQUEST_TIMEOUT, verify=False) # SNI is a pain in the ass r = s.send(
r.raise_for_status() # Raise an exception if there was an http error code returned prepared_request, timeout=StatusReporter._HTTP_REQUEST_TIMEOUT, verify=False
) # SNI is a pain in the ass
r.raise_for_status() # Raise an exception if there was an http error code returned
logging.info("HTTP request sent successfully.") logging.info("HTTP request sent successfully.")
except requests.exceptions.HTTPError as e: except requests.exceptions.HTTPError as e:
if e.response.status_code == 422: if e.response.status_code == 422:
# Do no retry the request if there was a metadata validation error # Do no retry the request if there was a metadata validation error
logging.error("HTTP request failed due to an HTTP exception. Exception was: %s" % str(e)) logging.error(
"HTTP request failed due to an HTTP exception. Exception was: %s"
% str(e)
)
else: else:
# The request failed with an error 500 probably, so let's check if Airtime and/or # The request failed with an error 500 probably, so let's check if Airtime and/or
# the web server are broken. If not, then our request was probably causing an # the web server are broken. If not, then our request was probably causing an
@ -124,8 +143,10 @@ def send_http_request(picklable_request, retry_queue):
# You will have to find these bad requests in logs or you'll be # You will have to find these bad requests in logs or you'll be
# notified by sentry. # notified by sentry.
except requests.exceptions.ConnectionError as e: except requests.exceptions.ConnectionError as e:
logging.error("HTTP request failed due to a connection error. Retrying later. %s" % str(e)) logging.error(
retry_queue.append(picklable_request) # Retry it later "HTTP request failed due to a connection error. Retrying later. %s" % str(e)
)
retry_queue.append(picklable_request) # Retry it later
except Exception as e: except Exception as e:
logging.error("HTTP request failed with unhandled exception. %s" % str(e)) logging.error("HTTP request failed with unhandled exception. %s" % str(e))
logging.error(traceback.format_exc()) logging.error(traceback.format_exc())
@ -134,12 +155,13 @@ def send_http_request(picklable_request, retry_queue):
# that breaks our code. I don't want us pickling data that potentially # that breaks our code. I don't want us pickling data that potentially
# breaks airtime_analyzer. # breaks airtime_analyzer.
def is_web_server_broken(url): def is_web_server_broken(url):
''' Do a naive test to check if the web server we're trying to access is down. """Do a naive test to check if the web server we're trying to access is down.
We use this to try to differentiate between error 500s that are coming We use this to try to differentiate between error 500s that are coming
from (for example) a bug in the Airtime Media REST API and error 500s from (for example) a bug in the Airtime Media REST API and error 500s
caused by Airtime or the webserver itself being broken temporarily. caused by Airtime or the webserver itself being broken temporarily.
''' """
try: try:
test_req = requests.get(url, verify=False) test_req = requests.get(url, verify=False)
test_req.raise_for_status() test_req.raise_for_status()
@ -147,35 +169,38 @@ def is_web_server_broken(url):
return True return True
else: else:
# The request worked fine, so the web server and Airtime are still up. # The request worked fine, so the web server and Airtime are still up.
return False return False
return False return False
class StatusReporter(): class StatusReporter:
''' Reports the extracted audio file metadata and job status back to the """Reports the extracted audio file metadata and job status back to the
Airtime web application. Airtime web application.
''' """
_HTTP_REQUEST_TIMEOUT = 30 _HTTP_REQUEST_TIMEOUT = 30
''' We use multiprocessing.Process again here because we need a thread for this stuff """ We use multiprocessing.Process again here because we need a thread for this stuff
anyways, and Python gives us process isolation for free (crash safety). anyways, and Python gives us process isolation for free (crash safety).
''' """
_ipc_queue = queue.Queue() _ipc_queue = queue.Queue()
#_http_thread = multiprocessing.Process(target=process_http_requests, # _http_thread = multiprocessing.Process(target=process_http_requests,
# args=(_ipc_queue,)) # args=(_ipc_queue,))
_http_thread = None _http_thread = None
@classmethod @classmethod
def start_thread(self, http_retry_queue_path): def start_thread(self, http_retry_queue_path):
StatusReporter._http_thread = threading.Thread(target=process_http_requests, StatusReporter._http_thread = threading.Thread(
args=(StatusReporter._ipc_queue,http_retry_queue_path)) target=process_http_requests,
args=(StatusReporter._ipc_queue, http_retry_queue_path),
)
StatusReporter._http_thread.start() StatusReporter._http_thread.start()
@classmethod @classmethod
def stop_thread(self): def stop_thread(self):
logging.info("Terminating status_reporter process") logging.info("Terminating status_reporter process")
#StatusReporter._http_thread.terminate() # Triggers SIGTERM on the child process # StatusReporter._http_thread.terminate() # Triggers SIGTERM on the child process
StatusReporter._ipc_queue.put("shutdown") # Special trigger StatusReporter._ipc_queue.put("shutdown") # Special trigger
StatusReporter._http_thread.join() StatusReporter._http_thread.join()
@classmethod @classmethod
@ -184,30 +209,33 @@ class StatusReporter():
@classmethod @classmethod
def report_success_to_callback_url(self, callback_url, api_key, audio_metadata): def report_success_to_callback_url(self, callback_url, api_key, audio_metadata):
''' Report the extracted metadata and status of the successfully imported file """Report the extracted metadata and status of the successfully imported file
to the callback URL (which should be the Airtime File Upload API) to the callback URL (which should be the Airtime File Upload API)
''' """
put_payload = json.dumps(audio_metadata) put_payload = json.dumps(audio_metadata)
#r = requests.Request(method='PUT', url=callback_url, data=put_payload, # r = requests.Request(method='PUT', url=callback_url, data=put_payload,
# auth=requests.auth.HTTPBasicAuth(api_key, '')) # auth=requests.auth.HTTPBasicAuth(api_key, ''))
''' """
r = requests.Request(method='PUT', url=callback_url, data=put_payload, r = requests.Request(method='PUT', url=callback_url, data=put_payload,
auth=requests.auth.HTTPBasicAuth(api_key, '')) auth=requests.auth.HTTPBasicAuth(api_key, ''))
StatusReporter._send_http_request(r) StatusReporter._send_http_request(r)
''' """
StatusReporter._send_http_request(PicklableHttpRequest(method='PUT', url=callback_url, StatusReporter._send_http_request(
data=put_payload, api_key=api_key)) PicklableHttpRequest(
method="PUT", url=callback_url, data=put_payload, api_key=api_key
)
)
''' """
try: try:
r.raise_for_status() # Raise an exception if there was an http error code returned r.raise_for_status() # Raise an exception if there was an http error code returned
except requests.exceptions.RequestException: except requests.exceptions.RequestException:
StatusReporter._ipc_queue.put(r.prepare()) StatusReporter._ipc_queue.put(r.prepare())
''' """
''' """
# Encode the audio metadata as json and post it back to the callback_url # Encode the audio metadata as json and post it back to the callback_url
put_payload = json.dumps(audio_metadata) put_payload = json.dumps(audio_metadata)
logging.debug("sending http put with payload: " + put_payload) logging.debug("sending http put with payload: " + put_payload)
@ -219,31 +247,38 @@ class StatusReporter():
#TODO: queue up failed requests and try them again later. #TODO: queue up failed requests and try them again later.
r.raise_for_status() # Raise an exception if there was an http error code returned r.raise_for_status() # Raise an exception if there was an http error code returned
''' """
@classmethod @classmethod
def report_failure_to_callback_url(self, callback_url, api_key, import_status, reason): def report_failure_to_callback_url(
if not isinstance(import_status, int ): self, callback_url, api_key, import_status, reason
raise TypeError("import_status must be an integer. Was of type " + type(import_status).__name__) ):
if not isinstance(import_status, int):
raise TypeError(
"import_status must be an integer. Was of type "
+ type(import_status).__name__
)
logging.debug("Reporting import failure to Airtime REST API...") logging.debug("Reporting import failure to Airtime REST API...")
audio_metadata = dict() audio_metadata = dict()
audio_metadata["import_status"] = import_status audio_metadata["import_status"] = import_status
audio_metadata["comment"] = reason # hack attack audio_metadata["comment"] = reason # hack attack
put_payload = json.dumps(audio_metadata) put_payload = json.dumps(audio_metadata)
#logging.debug("sending http put with payload: " + put_payload) # logging.debug("sending http put with payload: " + put_payload)
''' """
r = requests.put(callback_url, data=put_payload, r = requests.put(callback_url, data=put_payload,
auth=requests.auth.HTTPBasicAuth(api_key, ''), auth=requests.auth.HTTPBasicAuth(api_key, ''),
timeout=StatusReporter._HTTP_REQUEST_TIMEOUT) timeout=StatusReporter._HTTP_REQUEST_TIMEOUT)
''' """
StatusReporter._send_http_request(PicklableHttpRequest(method='PUT', url=callback_url, StatusReporter._send_http_request(
data=put_payload, api_key=api_key)) PicklableHttpRequest(
''' method="PUT", url=callback_url, data=put_payload, api_key=api_key
)
)
"""
logging.debug("HTTP request returned status: " + str(r.status_code)) logging.debug("HTTP request returned status: " + str(r.status_code))
logging.debug(r.text) # log the response body logging.debug(r.text) # log the response body
#TODO: queue up failed requests and try them again later. #TODO: queue up failed requests and try them again later.
r.raise_for_status() # raise an exception if there was an http error code returned r.raise_for_status() # raise an exception if there was an http error code returned
''' """

View File

@ -2,12 +2,14 @@
from nose.tools import * from nose.tools import *
import airtime_analyzer import airtime_analyzer
def setup(): def setup():
pass pass
def teardown(): def teardown():
pass pass
def test_basic(): def test_basic():
pass pass

View File

@ -8,48 +8,58 @@ import datetime
from airtime_analyzer.analyzer_pipeline import AnalyzerPipeline from airtime_analyzer.analyzer_pipeline import AnalyzerPipeline
from airtime_analyzer import config_file from airtime_analyzer import config_file
DEFAULT_AUDIO_FILE = u'tests/test_data/44100Hz-16bit-mono.mp3' DEFAULT_AUDIO_FILE = u"tests/test_data/44100Hz-16bit-mono.mp3"
DEFAULT_IMPORT_DEST = u'Test Artist/Test Album/44100Hz-16bit-mono.mp3' DEFAULT_IMPORT_DEST = u"Test Artist/Test Album/44100Hz-16bit-mono.mp3"
def setup(): def setup():
pass pass
def teardown(): def teardown():
#Move the file back # Move the file back
shutil.move(DEFAULT_IMPORT_DEST, DEFAULT_AUDIO_FILE) shutil.move(DEFAULT_IMPORT_DEST, DEFAULT_AUDIO_FILE)
assert os.path.exists(DEFAULT_AUDIO_FILE) assert os.path.exists(DEFAULT_AUDIO_FILE)
def test_basic(): def test_basic():
filename = os.path.basename(DEFAULT_AUDIO_FILE) filename = os.path.basename(DEFAULT_AUDIO_FILE)
q = Queue() q = Queue()
file_prefix = u'' file_prefix = u""
storage_backend = "file" storage_backend = "file"
#This actually imports the file into the "./Test Artist" directory. # This actually imports the file into the "./Test Artist" directory.
AnalyzerPipeline.run_analysis(q, DEFAULT_AUDIO_FILE, u'.', filename, storage_backend, file_prefix) AnalyzerPipeline.run_analysis(
q, DEFAULT_AUDIO_FILE, u".", filename, storage_backend, file_prefix
)
metadata = q.get() metadata = q.get()
assert metadata['track_title'] == u'Test Title' assert metadata["track_title"] == u"Test Title"
assert metadata['artist_name'] == u'Test Artist' assert metadata["artist_name"] == u"Test Artist"
assert metadata['album_title'] == u'Test Album' assert metadata["album_title"] == u"Test Album"
assert metadata['year'] == u'1999' assert metadata["year"] == u"1999"
assert metadata['genre'] == u'Test Genre' assert metadata["genre"] == u"Test Genre"
assert metadata['mime'] == 'audio/mp3' # Not unicode because MIMEs aren't. assert metadata["mime"] == "audio/mp3" # Not unicode because MIMEs aren't.
assert abs(metadata['length_seconds'] - 3.9) < 0.1 assert abs(metadata["length_seconds"] - 3.9) < 0.1
assert metadata["length"] == str(datetime.timedelta(seconds=metadata["length_seconds"])) assert metadata["length"] == str(
datetime.timedelta(seconds=metadata["length_seconds"])
)
assert os.path.exists(DEFAULT_IMPORT_DEST) assert os.path.exists(DEFAULT_IMPORT_DEST)
@raises(TypeError) @raises(TypeError)
def test_wrong_type_queue_param(): def test_wrong_type_queue_param():
AnalyzerPipeline.run_analysis(Queue(), u'', u'', u'') AnalyzerPipeline.run_analysis(Queue(), u"", u"", u"")
@raises(TypeError) @raises(TypeError)
def test_wrong_type_string_param2(): def test_wrong_type_string_param2():
AnalyzerPipeline.run_analysis(Queue(), '', u'', u'') AnalyzerPipeline.run_analysis(Queue(), "", u"", u"")
@raises(TypeError) @raises(TypeError)
def test_wrong_type_string_param3(): def test_wrong_type_string_param3():
AnalyzerPipeline.run_analysis(Queue(), u'', '', u'') AnalyzerPipeline.run_analysis(Queue(), u"", "", u"")
@raises(TypeError) @raises(TypeError)
def test_wrong_type_string_param4(): def test_wrong_type_string_param4():
AnalyzerPipeline.run_analysis(Queue(), u'', u'', '') AnalyzerPipeline.run_analysis(Queue(), u"", u"", "")

View File

@ -2,13 +2,16 @@
from nose.tools import * from nose.tools import *
from airtime_analyzer.analyzer import Analyzer from airtime_analyzer.analyzer import Analyzer
def setup(): def setup():
pass pass
def teardown(): def teardown():
pass pass
@raises(NotImplementedError) @raises(NotImplementedError)
def test_analyze(): def test_analyze():
abstract_analyzer = Analyzer() abstract_analyzer = Analyzer()
abstract_analyzer.analyze(u'foo', dict()) abstract_analyzer.analyze(u"foo", dict())

View File

@ -2,63 +2,97 @@
from nose.tools import * from nose.tools import *
from airtime_analyzer.cuepoint_analyzer import CuePointAnalyzer from airtime_analyzer.cuepoint_analyzer import CuePointAnalyzer
def check_default_metadata(metadata): def check_default_metadata(metadata):
''' Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect. """Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect.
:param metadata: a metadata dictionary :param metadata: a metadata dictionary
:return: Nothing :return: Nothing
''' """
# We give silan some leeway here by specifying a tolerance # We give silan some leeway here by specifying a tolerance
tolerance_seconds = 0.1 tolerance_seconds = 0.1
length_seconds = 3.9 length_seconds = 3.9
assert abs(metadata['length_seconds'] - length_seconds) < tolerance_seconds assert abs(metadata["length_seconds"] - length_seconds) < tolerance_seconds
assert abs(float(metadata['cuein'])) < tolerance_seconds assert abs(float(metadata["cuein"])) < tolerance_seconds
assert abs(float(metadata['cueout']) - length_seconds) < tolerance_seconds assert abs(float(metadata["cueout"]) - length_seconds) < tolerance_seconds
def test_missing_silan(): def test_missing_silan():
old_silan = CuePointAnalyzer.SILAN_EXECUTABLE old_silan = CuePointAnalyzer.SILAN_EXECUTABLE
CuePointAnalyzer.SILAN_EXECUTABLE = 'foosdaf' CuePointAnalyzer.SILAN_EXECUTABLE = "foosdaf"
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict()) metadata = CuePointAnalyzer.analyze(
CuePointAnalyzer.SILAN_EXECUTABLE = old_silan # Need to put this back u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
)
CuePointAnalyzer.SILAN_EXECUTABLE = old_silan # Need to put this back
def test_invalid_filepath(): def test_invalid_filepath():
metadata = CuePointAnalyzer.analyze(u'non-existent-file', dict()) metadata = CuePointAnalyzer.analyze(u"non-existent-file", dict())
def test_mp3_utf8(): def test_mp3_utf8():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict()) metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
def test_mp3_dualmono(): def test_mp3_dualmono():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-dualmono.mp3', dict()) metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-dualmono.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
def test_mp3_jointstereo(): def test_mp3_jointstereo():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-jointstereo.mp3', dict()) metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-jointstereo.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
def test_mp3_simplestereo(): def test_mp3_simplestereo():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-simplestereo.mp3', dict()) metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-simplestereo.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
def test_mp3_stereo(): def test_mp3_stereo():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.mp3', dict()) metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
def test_mp3_mono(): def test_mp3_mono():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mono.mp3', dict()) metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-mono.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
def test_ogg_stereo(): def test_ogg_stereo():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.ogg', dict()) metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.ogg", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
def test_invalid_wma(): def test_invalid_wma():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict()) metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo-invalid.wma", dict()
)
def test_m4a_stereo(): def test_m4a_stereo():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.m4a', dict()) metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.m4a", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
def test_wav_stereo(): def test_wav_stereo():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.wav', dict()) metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.wav", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)

View File

@ -8,109 +8,125 @@ import mock
from pprint import pprint from pprint import pprint
from airtime_analyzer.filemover_analyzer import FileMoverAnalyzer from airtime_analyzer.filemover_analyzer import FileMoverAnalyzer
DEFAULT_AUDIO_FILE = u'tests/test_data/44100Hz-16bit-mono.mp3' DEFAULT_AUDIO_FILE = u"tests/test_data/44100Hz-16bit-mono.mp3"
DEFAULT_IMPORT_DEST = u'Test Artist/Test Album/44100Hz-16bit-mono.mp3' DEFAULT_IMPORT_DEST = u"Test Artist/Test Album/44100Hz-16bit-mono.mp3"
def setup(): def setup():
pass pass
def teardown(): def teardown():
pass pass
@raises(Exception) @raises(Exception)
def test_dont_use_analyze(): def test_dont_use_analyze():
FileMoverAnalyzer.analyze(u'foo', dict()) FileMoverAnalyzer.analyze(u"foo", dict())
@raises(TypeError) @raises(TypeError)
def test_move_wrong_string_param1(): def test_move_wrong_string_param1():
FileMoverAnalyzer.move(42, '', '', dict()) FileMoverAnalyzer.move(42, "", "", dict())
@raises(TypeError) @raises(TypeError)
def test_move_wrong_string_param2(): def test_move_wrong_string_param2():
FileMoverAnalyzer.move(u'', 23, u'', dict()) FileMoverAnalyzer.move(u"", 23, u"", dict())
@raises(TypeError) @raises(TypeError)
def test_move_wrong_string_param3(): def test_move_wrong_string_param3():
FileMoverAnalyzer.move('', '', 5, dict()) FileMoverAnalyzer.move("", "", 5, dict())
@raises(TypeError) @raises(TypeError)
def test_move_wrong_dict_param(): def test_move_wrong_dict_param():
FileMoverAnalyzer.move('', '', '', 12345) FileMoverAnalyzer.move("", "", "", 12345)
@raises(FileNotFoundError) @raises(FileNotFoundError)
def test_move_wrong_string_param3(): def test_move_wrong_string_param3():
FileMoverAnalyzer.move('', '', '', dict()) FileMoverAnalyzer.move("", "", "", dict())
def test_basic(): def test_basic():
filename = os.path.basename(DEFAULT_AUDIO_FILE) filename = os.path.basename(DEFAULT_AUDIO_FILE)
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, dict()) FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u".", filename, dict())
#Move the file back # Move the file back
shutil.move("./" + filename, DEFAULT_AUDIO_FILE) shutil.move("./" + filename, DEFAULT_AUDIO_FILE)
assert os.path.exists(DEFAULT_AUDIO_FILE) assert os.path.exists(DEFAULT_AUDIO_FILE)
def test_basic_samefile(): def test_basic_samefile():
filename = os.path.basename(DEFAULT_AUDIO_FILE) filename = os.path.basename(DEFAULT_AUDIO_FILE)
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'tests/test_data', filename, dict()) FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u"tests/test_data", filename, dict())
assert os.path.exists(DEFAULT_AUDIO_FILE) assert os.path.exists(DEFAULT_AUDIO_FILE)
def test_duplicate_file(): def test_duplicate_file():
filename = os.path.basename(DEFAULT_AUDIO_FILE) filename = os.path.basename(DEFAULT_AUDIO_FILE)
#Import the file once # Import the file once
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, dict()) FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u".", filename, dict())
#Copy it back to the original location # Copy it back to the original location
shutil.copy("./" + filename, DEFAULT_AUDIO_FILE) shutil.copy("./" + filename, DEFAULT_AUDIO_FILE)
#Import it again. It shouldn't overwrite the old file and instead create a new # Import it again. It shouldn't overwrite the old file and instead create a new
metadata = dict() metadata = dict()
metadata = FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, metadata) metadata = FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u".", filename, metadata)
#Cleanup: move the file (eg. 44100Hz-16bit-mono.mp3) back # Cleanup: move the file (eg. 44100Hz-16bit-mono.mp3) back
shutil.move("./" + filename, DEFAULT_AUDIO_FILE) shutil.move("./" + filename, DEFAULT_AUDIO_FILE)
#Remove the renamed duplicate, eg. 44100Hz-16bit-mono_03-26-2014-11-58.mp3 # Remove the renamed duplicate, eg. 44100Hz-16bit-mono_03-26-2014-11-58.mp3
os.remove(metadata["full_path"]) os.remove(metadata["full_path"])
assert os.path.exists(DEFAULT_AUDIO_FILE) assert os.path.exists(DEFAULT_AUDIO_FILE)
''' If you import three copies of the same file, the behaviour is:
""" If you import three copies of the same file, the behaviour is:
- The filename is of the first file preserved. - The filename is of the first file preserved.
- The filename of the second file has the timestamp attached to it. - The filename of the second file has the timestamp attached to it.
- The filename of the third file has a UUID placed after the timestamp, but ONLY IF - The filename of the third file has a UUID placed after the timestamp, but ONLY IF
it's imported within 1 second of the second file (ie. if the timestamp is the same). it's imported within 1 second of the second file (ie. if the timestamp is the same).
''' """
def test_double_duplicate_files(): def test_double_duplicate_files():
# Here we use mock to patch out the time.localtime() function so that it # Here we use mock to patch out the time.localtime() function so that it
# always returns the same value. This allows us to consistently simulate this test cases # always returns the same value. This allows us to consistently simulate this test cases
# where the last two of the three files are imported at the same time as the timestamp. # where the last two of the three files are imported at the same time as the timestamp.
with mock.patch('airtime_analyzer.filemover_analyzer.time') as mock_time: with mock.patch("airtime_analyzer.filemover_analyzer.time") as mock_time:
mock_time.localtime.return_value = time.localtime()#date(2010, 10, 8) mock_time.localtime.return_value = time.localtime() # date(2010, 10, 8)
mock_time.side_effect = lambda *args, **kw: time(*args, **kw) mock_time.side_effect = lambda *args, **kw: time(*args, **kw)
filename = os.path.basename(DEFAULT_AUDIO_FILE) filename = os.path.basename(DEFAULT_AUDIO_FILE)
#Import the file once # Import the file once
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, dict()) FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u".", filename, dict())
#Copy it back to the original location # Copy it back to the original location
shutil.copy("./" + filename, DEFAULT_AUDIO_FILE) shutil.copy("./" + filename, DEFAULT_AUDIO_FILE)
#Import it again. It shouldn't overwrite the old file and instead create a new # Import it again. It shouldn't overwrite the old file and instead create a new
first_dup_metadata = dict() first_dup_metadata = dict()
first_dup_metadata = FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, first_dup_metadata = FileMoverAnalyzer.move(
first_dup_metadata) DEFAULT_AUDIO_FILE, u".", filename, first_dup_metadata
#Copy it back again! )
# Copy it back again!
shutil.copy("./" + filename, DEFAULT_AUDIO_FILE) shutil.copy("./" + filename, DEFAULT_AUDIO_FILE)
#Reimport for the third time, which should have the same timestamp as the second one # Reimport for the third time, which should have the same timestamp as the second one
#thanks to us mocking out time.localtime() # thanks to us mocking out time.localtime()
second_dup_metadata = dict() second_dup_metadata = dict()
second_dup_metadata = FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, second_dup_metadata = FileMoverAnalyzer.move(
second_dup_metadata) DEFAULT_AUDIO_FILE, u".", filename, second_dup_metadata
#Cleanup: move the file (eg. 44100Hz-16bit-mono.mp3) back )
# Cleanup: move the file (eg. 44100Hz-16bit-mono.mp3) back
shutil.move("./" + filename, DEFAULT_AUDIO_FILE) shutil.move("./" + filename, DEFAULT_AUDIO_FILE)
#Remove the renamed duplicate, eg. 44100Hz-16bit-mono_03-26-2014-11-58.mp3 # Remove the renamed duplicate, eg. 44100Hz-16bit-mono_03-26-2014-11-58.mp3
os.remove(first_dup_metadata["full_path"]) os.remove(first_dup_metadata["full_path"])
os.remove(second_dup_metadata["full_path"]) os.remove(second_dup_metadata["full_path"])
assert os.path.exists(DEFAULT_AUDIO_FILE) assert os.path.exists(DEFAULT_AUDIO_FILE)
@raises(OSError) @raises(OSError)
def test_bad_permissions_destination_dir(): def test_bad_permissions_destination_dir():
filename = os.path.basename(DEFAULT_AUDIO_FILE) filename = os.path.basename(DEFAULT_AUDIO_FILE)
dest_dir = u'/sys/foobar' # /sys is using sysfs on Linux, which is unwritable dest_dir = u"/sys/foobar" # /sys is using sysfs on Linux, which is unwritable
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, dest_dir, filename, dict()) FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, dest_dir, filename, dict())
#Move the file back # Move the file back
shutil.move(os.path.join(dest_dir, filename), DEFAULT_AUDIO_FILE) shutil.move(os.path.join(dest_dir, filename), DEFAULT_AUDIO_FILE)
assert os.path.exists(DEFAULT_AUDIO_FILE) assert os.path.exists(DEFAULT_AUDIO_FILE)

View File

@ -6,78 +6,101 @@ import mock
from nose.tools import * from nose.tools import *
from airtime_analyzer.metadata_analyzer import MetadataAnalyzer from airtime_analyzer.metadata_analyzer import MetadataAnalyzer
def setup(): def setup():
pass pass
def teardown(): def teardown():
pass pass
def check_default_metadata(metadata): def check_default_metadata(metadata):
assert metadata['track_title'] == 'Test Title' assert metadata["track_title"] == "Test Title"
assert metadata['artist_name'] == 'Test Artist' assert metadata["artist_name"] == "Test Artist"
assert metadata['album_title'] == 'Test Album' assert metadata["album_title"] == "Test Album"
assert metadata['year'] == '1999' assert metadata["year"] == "1999"
assert metadata['genre'] == 'Test Genre' assert metadata["genre"] == "Test Genre"
assert metadata['track_number'] == '1' assert metadata["track_number"] == "1"
assert metadata["length"] == str(datetime.timedelta(seconds=metadata["length_seconds"])) assert metadata["length"] == str(
datetime.timedelta(seconds=metadata["length_seconds"])
)
def test_mp3_mono(): def test_mp3_mono():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-mono.mp3', dict()) metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-mono.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
assert metadata['channels'] == 1 assert metadata["channels"] == 1
assert metadata['bit_rate'] == 63998 assert metadata["bit_rate"] == 63998
assert abs(metadata['length_seconds'] - 3.9) < 0.1 assert abs(metadata["length_seconds"] - 3.9) < 0.1
assert metadata['mime'] == 'audio/mp3' # Not unicode because MIMEs aren't. assert metadata["mime"] == "audio/mp3" # Not unicode because MIMEs aren't.
assert metadata['track_total'] == '10' # MP3s can have a track_total assert metadata["track_total"] == "10" # MP3s can have a track_total
#Mutagen doesn't extract comments from mp3s it seems # Mutagen doesn't extract comments from mp3s it seems
def test_mp3_jointstereo(): def test_mp3_jointstereo():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-jointstereo.mp3', dict()) metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-jointstereo.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
assert metadata['channels'] == 2 assert metadata["channels"] == 2
assert metadata['bit_rate'] == 127998 assert metadata["bit_rate"] == 127998
assert abs(metadata['length_seconds'] - 3.9) < 0.1 assert abs(metadata["length_seconds"] - 3.9) < 0.1
assert metadata['mime'] == 'audio/mp3' assert metadata["mime"] == "audio/mp3"
assert metadata['track_total'] == '10' # MP3s can have a track_total assert metadata["track_total"] == "10" # MP3s can have a track_total
def test_mp3_simplestereo(): def test_mp3_simplestereo():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-simplestereo.mp3', dict()) metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-simplestereo.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
assert metadata['channels'] == 2 assert metadata["channels"] == 2
assert metadata['bit_rate'] == 127998 assert metadata["bit_rate"] == 127998
assert abs(metadata['length_seconds'] - 3.9) < 0.1 assert abs(metadata["length_seconds"] - 3.9) < 0.1
assert metadata['mime'] == 'audio/mp3' assert metadata["mime"] == "audio/mp3"
assert metadata['track_total'] == '10' # MP3s can have a track_total assert metadata["track_total"] == "10" # MP3s can have a track_total
def test_mp3_dualmono(): def test_mp3_dualmono():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-dualmono.mp3', dict()) metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-dualmono.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
assert metadata['channels'] == 2 assert metadata["channels"] == 2
assert metadata['bit_rate'] == 127998 assert metadata["bit_rate"] == 127998
assert abs(metadata['length_seconds'] - 3.9) < 0.1 assert abs(metadata["length_seconds"] - 3.9) < 0.1
assert metadata['mime'] == 'audio/mp3' assert metadata["mime"] == "audio/mp3"
assert metadata['track_total'] == '10' # MP3s can have a track_total assert metadata["track_total"] == "10" # MP3s can have a track_total
def test_ogg_mono(): def test_ogg_mono():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-mono.ogg', dict()) metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-mono.ogg", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
assert metadata['channels'] == 1 assert metadata["channels"] == 1
assert metadata['bit_rate'] == 80000 assert metadata["bit_rate"] == 80000
assert abs(metadata['length_seconds'] - 3.8) < 0.1 assert abs(metadata["length_seconds"] - 3.8) < 0.1
assert metadata['mime'] == 'audio/vorbis' assert metadata["mime"] == "audio/vorbis"
assert metadata['comment'] == 'Test Comment' assert metadata["comment"] == "Test Comment"
def test_ogg_stereo(): def test_ogg_stereo():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo.ogg', dict()) metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-stereo.ogg", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
assert metadata['channels'] == 2 assert metadata["channels"] == 2
assert metadata['bit_rate'] == 112000 assert metadata["bit_rate"] == 112000
assert abs(metadata['length_seconds'] - 3.8) < 0.1 assert abs(metadata["length_seconds"] - 3.8) < 0.1
assert metadata['mime'] == 'audio/vorbis' assert metadata["mime"] == "audio/vorbis"
assert metadata['comment'] == 'Test Comment' assert metadata["comment"] == "Test Comment"
''' faac and avconv can't seem to create a proper mono AAC file... ugh
""" faac and avconv can't seem to create a proper mono AAC file... ugh
def test_aac_mono(): def test_aac_mono():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-mono.m4a') metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-mono.m4a')
print("Mono AAC metadata:") print("Mono AAC metadata:")
@ -88,78 +111,93 @@ def test_aac_mono():
assert abs(metadata['length_seconds'] - 3.8) < 0.1 assert abs(metadata['length_seconds'] - 3.8) < 0.1
assert metadata['mime'] == 'audio/mp4' assert metadata['mime'] == 'audio/mp4'
assert metadata['comment'] == 'Test Comment' assert metadata['comment'] == 'Test Comment'
''' """
def test_aac_stereo(): def test_aac_stereo():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo.m4a', dict()) metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-stereo.m4a", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
assert metadata['channels'] == 2 assert metadata["channels"] == 2
assert metadata['bit_rate'] == 102619 assert metadata["bit_rate"] == 102619
assert abs(metadata['length_seconds'] - 3.8) < 0.1 assert abs(metadata["length_seconds"] - 3.8) < 0.1
assert metadata['mime'] == 'audio/mp4' assert metadata["mime"] == "audio/mp4"
assert metadata['comment'] == 'Test Comment' assert metadata["comment"] == "Test Comment"
def test_mp3_utf8(): def test_mp3_utf8():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict()) metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
)
# Using a bunch of different UTF-8 codepages here. Test data is from: # Using a bunch of different UTF-8 codepages here. Test data is from:
# http://winrus.com/utf8-jap.htm # http://winrus.com/utf8-jap.htm
assert metadata['track_title'] == 'アイウエオカキクケコサシスセソタチツテ' assert metadata["track_title"] == "アイウエオカキクケコサシスセソタチツテ"
assert metadata['artist_name'] == 'てすと' assert metadata["artist_name"] == "てすと"
assert metadata['album_title'] == 'Ä ä Ü ü ß' assert metadata["album_title"] == "Ä ä Ü ü ß"
assert metadata['year'] == '1999' assert metadata["year"] == "1999"
assert metadata['genre'] == 'Я Б Г Д Ж Й' assert metadata["genre"] == "Я Б Г Д Ж Й"
assert metadata['track_number'] == '1' assert metadata["track_number"] == "1"
assert metadata['channels'] == 2 assert metadata["channels"] == 2
assert metadata['bit_rate'] < 130000 assert metadata["bit_rate"] < 130000
assert metadata['bit_rate'] > 127000 assert metadata["bit_rate"] > 127000
assert abs(metadata['length_seconds'] - 3.9) < 0.1 assert abs(metadata["length_seconds"] - 3.9) < 0.1
assert metadata['mime'] == 'audio/mp3' assert metadata["mime"] == "audio/mp3"
assert metadata['track_total'] == '10' # MP3s can have a track_total assert metadata["track_total"] == "10" # MP3s can have a track_total
def test_invalid_wma(): def test_invalid_wma():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict()) metadata = MetadataAnalyzer.analyze(
assert metadata['mime'] == 'audio/x-ms-wma' "tests/test_data/44100Hz-16bit-stereo-invalid.wma", dict()
)
assert metadata["mime"] == "audio/x-ms-wma"
def test_wav_stereo(): def test_wav_stereo():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo.wav', dict()) metadata = MetadataAnalyzer.analyze(
assert metadata['mime'] == 'audio/x-wav' "tests/test_data/44100Hz-16bit-stereo.wav", dict()
assert abs(metadata['length_seconds'] - 3.9) < 0.1 )
assert metadata['channels'] == 2 assert metadata["mime"] == "audio/x-wav"
assert metadata['sample_rate'] == 44100 assert abs(metadata["length_seconds"] - 3.9) < 0.1
assert metadata["channels"] == 2
assert metadata["sample_rate"] == 44100
# Make sure the parameter checking works # Make sure the parameter checking works
@raises(FileNotFoundError) @raises(FileNotFoundError)
def test_move_wrong_string_param1(): def test_move_wrong_string_param1():
not_unicode = 'asdfasdf' not_unicode = "asdfasdf"
MetadataAnalyzer.analyze(not_unicode, dict()) MetadataAnalyzer.analyze(not_unicode, dict())
@raises(TypeError) @raises(TypeError)
def test_move_wrong_metadata_dict(): def test_move_wrong_metadata_dict():
not_a_dict = list() not_a_dict = list()
MetadataAnalyzer.analyze('asdfasdf', not_a_dict) MetadataAnalyzer.analyze("asdfasdf", not_a_dict)
# Test an mp3 file where the number of channels is invalid or missing: # Test an mp3 file where the number of channels is invalid or missing:
def test_mp3_bad_channels(): def test_mp3_bad_channels():
filename = 'tests/test_data/44100Hz-16bit-mono.mp3' filename = "tests/test_data/44100Hz-16bit-mono.mp3"
''' """
It'd be a pain in the ass to construct a real MP3 with an invalid number It'd be a pain in the ass to construct a real MP3 with an invalid number
of channels by hand because that value is stored in every MP3 frame in the file of channels by hand because that value is stored in every MP3 frame in the file
''' """
audio_file = mutagen.File(filename, easy=True) audio_file = mutagen.File(filename, easy=True)
audio_file.info.mode = 1777 audio_file.info.mode = 1777
with mock.patch('airtime_analyzer.metadata_analyzer.mutagen') as mock_mutagen: with mock.patch("airtime_analyzer.metadata_analyzer.mutagen") as mock_mutagen:
mock_mutagen.File.return_value = audio_file mock_mutagen.File.return_value = audio_file
#mock_mutagen.side_effect = lambda *args, **kw: audio_file #File(*args, **kw) # mock_mutagen.side_effect = lambda *args, **kw: audio_file #File(*args, **kw)
metadata = MetadataAnalyzer.analyze(filename, dict()) metadata = MetadataAnalyzer.analyze(filename, dict())
check_default_metadata(metadata) check_default_metadata(metadata)
assert metadata['channels'] == 1 assert metadata["channels"] == 1
assert metadata['bit_rate'] == 63998 assert metadata["bit_rate"] == 63998
assert abs(metadata['length_seconds'] - 3.9) < 0.1 assert abs(metadata["length_seconds"] - 3.9) < 0.1
assert metadata['mime'] == 'audio/mp3' # Not unicode because MIMEs aren't. assert metadata["mime"] == "audio/mp3" # Not unicode because MIMEs aren't.
assert metadata['track_total'] == '10' # MP3s can have a track_total assert metadata["track_total"] == "10" # MP3s can have a track_total
#Mutagen doesn't extract comments from mp3s it seems # Mutagen doesn't extract comments from mp3s it seems
def test_unparsable_file(): def test_unparsable_file():
MetadataAnalyzer.analyze('tests/test_data/unparsable.txt', dict()) MetadataAnalyzer.analyze("tests/test_data/unparsable.txt", dict())

View File

@ -2,61 +2,97 @@
from nose.tools import * from nose.tools import *
from airtime_analyzer.playability_analyzer import * from airtime_analyzer.playability_analyzer import *
def check_default_metadata(metadata): def check_default_metadata(metadata):
''' Stub function for now in case we need it later.''' """Stub function for now in case we need it later."""
pass pass
def test_missing_liquidsoap(): def test_missing_liquidsoap():
old_ls = PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE old_ls = PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE
PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = 'foosdaf' PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = "foosdaf"
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict()) metadata = PlayabilityAnalyzer.analyze(
PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = old_ls # Need to put this back u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
)
PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = old_ls # Need to put this back
@raises(UnplayableFileError) @raises(UnplayableFileError)
def test_invalid_filepath(): def test_invalid_filepath():
metadata = PlayabilityAnalyzer.analyze(u'non-existent-file', dict()) metadata = PlayabilityAnalyzer.analyze(u"non-existent-file", dict())
def test_mp3_utf8(): def test_mp3_utf8():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict()) metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
def test_mp3_dualmono(): def test_mp3_dualmono():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-dualmono.mp3', dict()) metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-dualmono.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
def test_mp3_jointstereo(): def test_mp3_jointstereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-jointstereo.mp3', dict()) metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-jointstereo.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
def test_mp3_simplestereo(): def test_mp3_simplestereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-simplestereo.mp3', dict()) metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-simplestereo.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
def test_mp3_stereo(): def test_mp3_stereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.mp3', dict()) metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
def test_mp3_mono(): def test_mp3_mono():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mono.mp3', dict()) metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-mono.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
def test_ogg_stereo(): def test_ogg_stereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.ogg', dict()) metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.ogg", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
@raises(UnplayableFileError) @raises(UnplayableFileError)
def test_invalid_wma(): def test_invalid_wma():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict()) metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo-invalid.wma", dict()
)
def test_m4a_stereo(): def test_m4a_stereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.m4a', dict()) metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.m4a", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
def test_wav_stereo(): def test_wav_stereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.wav', dict()) metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.wav", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
@raises(UnplayableFileError) @raises(UnplayableFileError)
def test_unknown(): def test_unknown():
metadata = PlayabilityAnalyzer.analyze(u'http://www.google.com', dict()) metadata = PlayabilityAnalyzer.analyze(u"http://www.google.com", dict())
check_default_metadata(metadata) check_default_metadata(metadata)

View File

@ -5,80 +5,134 @@ from airtime_analyzer.replaygain_analyzer import ReplayGainAnalyzer
def check_default_metadata(metadata): def check_default_metadata(metadata):
''' Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect. """Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect.
:param metadata: a metadata dictionary :param metadata: a metadata dictionary
:return: Nothing :return: Nothing
''' """
''' """
# We give python-rgain some leeway here by specifying a tolerance. It's not perfectly consistent across codecs... # We give python-rgain some leeway here by specifying a tolerance. It's not perfectly consistent across codecs...
assert abs(metadata['cuein']) < tolerance_seconds assert abs(metadata['cuein']) < tolerance_seconds
assert abs(metadata['cueout'] - length_seconds) < tolerance_seconds assert abs(metadata['cueout'] - length_seconds) < tolerance_seconds
''' """
tolerance = 0.60 tolerance = 0.60
expected_replaygain = 5.2 expected_replaygain = 5.2
print(metadata['replay_gain']) print(metadata["replay_gain"])
assert abs(metadata['replay_gain'] - expected_replaygain) < tolerance assert abs(metadata["replay_gain"] - expected_replaygain) < tolerance
def test_missing_replaygain(): def test_missing_replaygain():
old_rg = ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE old_rg = ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE
ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = 'foosdaf' ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = "foosdaf"
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict()) metadata = ReplayGainAnalyzer.analyze(
ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = old_rg # Need to put this back u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
)
ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = old_rg # Need to put this back
def test_invalid_filepath(): def test_invalid_filepath():
metadata = ReplayGainAnalyzer.analyze(u'non-existent-file', dict()) metadata = ReplayGainAnalyzer.analyze(u"non-existent-file", dict())
def test_mp3_utf8(): def test_mp3_utf8():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict()) metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
test_mp3_utf8.rgain = True test_mp3_utf8.rgain = True
def test_mp3_dualmono(): def test_mp3_dualmono():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-dualmono.mp3', dict()) metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-dualmono.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
test_mp3_dualmono.rgain = True test_mp3_dualmono.rgain = True
def test_mp3_jointstereo(): def test_mp3_jointstereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-jointstereo.mp3', dict()) metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-jointstereo.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
test_mp3_jointstereo.rgain = True test_mp3_jointstereo.rgain = True
def test_mp3_simplestereo(): def test_mp3_simplestereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-simplestereo.mp3', dict()) metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-simplestereo.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
test_mp3_simplestereo.rgain = True test_mp3_simplestereo.rgain = True
def test_mp3_stereo(): def test_mp3_stereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.mp3', dict()) metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
test_mp3_stereo.rgain = True test_mp3_stereo.rgain = True
def test_mp3_mono(): def test_mp3_mono():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mono.mp3', dict()) metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-mono.mp3", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
test_mp3_mono.rgain = True test_mp3_mono.rgain = True
def test_ogg_stereo(): def test_ogg_stereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.ogg', dict()) metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.ogg", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
test_ogg_stereo = True test_ogg_stereo = True
def test_invalid_wma(): def test_invalid_wma():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict()) metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo-invalid.wma", dict()
)
test_invalid_wma.rgain = True test_invalid_wma.rgain = True
def test_mp3_missing_id3_header(): def test_mp3_missing_id3_header():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mp3-missingid3header.mp3', dict()) metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-mp3-missingid3header.mp3", dict()
)
test_mp3_missing_id3_header.rgain = True test_mp3_missing_id3_header.rgain = True
def test_m4a_stereo(): def test_m4a_stereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.m4a', dict()) metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.m4a", dict()
)
check_default_metadata(metadata) check_default_metadata(metadata)
test_m4a_stereo.rgain = True test_m4a_stereo.rgain = True
''' WAVE is not supported by python-rgain yet """ WAVE is not supported by python-rgain yet
def test_wav_stereo(): def test_wav_stereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.wav', dict()) metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.wav', dict())
check_default_metadata(metadata) check_default_metadata(metadata)
test_wav_stereo.rgain = True test_wav_stereo.rgain = True
''' """

View File

@ -6,23 +6,28 @@ import socket
import requests import requests
from requests.auth import AuthBase from requests.auth import AuthBase
def get_protocol(config): def get_protocol(config):
positive_values = ['Yes', 'yes', 'True', 'true', True] positive_values = ["Yes", "yes", "True", "true", True]
port = config['general'].get('base_port', 80) port = config["general"].get("base_port", 80)
force_ssl = config['general'].get('force_ssl', False) force_ssl = config["general"].get("force_ssl", False)
if force_ssl in positive_values: if force_ssl in positive_values:
protocol = 'https' protocol = "https"
else: else:
protocol = config['general'].get('protocol') protocol = config["general"].get("protocol")
if not protocol: if not protocol:
protocol = str(("http", "https")[int(port) == 443]) protocol = str(("http", "https")[int(port) == 443])
return protocol return protocol
class UrlParamDict(dict): class UrlParamDict(dict):
def __missing__(self, key): def __missing__(self, key):
return '{' + key + '}' return "{" + key + "}"
class UrlException(Exception):
pass
class UrlException(Exception): pass
class IncompleteUrl(UrlException): class IncompleteUrl(UrlException):
def __init__(self, url): def __init__(self, url):
@ -31,6 +36,7 @@ class IncompleteUrl(UrlException):
def __str__(self): def __str__(self):
return "Incomplete url: '{}'".format(self.url) return "Incomplete url: '{}'".format(self.url)
class UrlBadParam(UrlException): class UrlBadParam(UrlException):
def __init__(self, url, param): def __init__(self, url, param):
self.url = url self.url = url
@ -39,17 +45,20 @@ class UrlBadParam(UrlException):
def __str__(self): def __str__(self):
return "Bad param '{}' passed into url: '{}'".format(self.param, self.url) return "Bad param '{}' passed into url: '{}'".format(self.param, self.url)
class KeyAuth(AuthBase): class KeyAuth(AuthBase):
def __init__(self, key): def __init__(self, key):
self.key = key self.key = key
def __call__(self, r): def __call__(self, r):
r.headers['Authorization'] = "Api-Key {}".format(self.key) r.headers["Authorization"] = "Api-Key {}".format(self.key)
return r return r
class ApcUrl: class ApcUrl:
""" A safe abstraction and testable for filling in parameters in """A safe abstraction and testable for filling in parameters in
api_client.cfg""" api_client.cfg"""
def __init__(self, base_url): def __init__(self, base_url):
self.base_url = base_url self.base_url = base_url
@ -63,17 +72,18 @@ class ApcUrl:
return ApcUrl(temp_url) return ApcUrl(temp_url)
def url(self): def url(self):
if '{' in self.base_url: if "{" in self.base_url:
raise IncompleteUrl(self.base_url) raise IncompleteUrl(self.base_url)
else: else:
return self.base_url return self.base_url
class ApiRequest: class ApiRequest:
API_HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout API_HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout
def __init__(self, name, url, logger=None, api_key=None): def __init__(self, name, url, logger=None, api_key=None):
self.name = name self.name = name
self.url = url self.url = url
self.__req = None self.__req = None
if logger is None: if logger is None:
self.logger = logging self.logger = logging
@ -86,36 +96,45 @@ class ApiRequest:
self.logger.debug(final_url) self.logger.debug(final_url)
try: try:
if _post_data: if _post_data:
response = requests.post(final_url, response = requests.post(
data=_post_data, auth=self.auth, final_url,
timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT) data=_post_data,
auth=self.auth,
timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT,
)
else: else:
response = requests.get(final_url, params=params, auth=self.auth, response = requests.get(
timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT) final_url,
if 'application/json' in response.headers['content-type']: params=params,
auth=self.auth,
timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT,
)
if "application/json" in response.headers["content-type"]:
return response.json() return response.json()
return response return response
except requests.exceptions.Timeout: except requests.exceptions.Timeout:
self.logger.error('HTTP request to %s timed out', final_url) self.logger.error("HTTP request to %s timed out", final_url)
raise raise
def req(self, *args, **kwargs): def req(self, *args, **kwargs):
self.__req = lambda : self(*args, **kwargs) self.__req = lambda: self(*args, **kwargs)
return self return self
def retry(self, n, delay=5): def retry(self, n, delay=5):
"""Try to send request n times. If after n times it fails then """Try to send request n times. If after n times it fails then
we finally raise exception""" we finally raise exception"""
for i in range(0,n-1): for i in range(0, n - 1):
try: try:
return self.__req() return self.__req()
except Exception: except Exception:
time.sleep(delay) time.sleep(delay)
return self.__req() return self.__req()
class RequestProvider: class RequestProvider:
""" Creates the available ApiRequest instance that can be read from """Creates the available ApiRequest instance that can be read from
a config file """ a config file"""
def __init__(self, cfg, endpoints): def __init__(self, cfg, endpoints):
self.config = cfg self.config = cfg
self.requests = {} self.requests = {}
@ -123,27 +142,29 @@ class RequestProvider:
self.config["general"]["base_dir"] = self.config["general"]["base_dir"][1:] self.config["general"]["base_dir"] = self.config["general"]["base_dir"][1:]
protocol = get_protocol(self.config) protocol = get_protocol(self.config)
base_port = self.config['general']['base_port'] base_port = self.config["general"]["base_port"]
base_url = self.config['general']['base_url'] base_url = self.config["general"]["base_url"]
base_dir = self.config['general']['base_dir'] base_dir = self.config["general"]["base_dir"]
api_base = self.config['api_base'] api_base = self.config["api_base"]
api_url = "{protocol}://{base_url}:{base_port}/{base_dir}{api_base}/{action}".format_map( api_url = "{protocol}://{base_url}:{base_port}/{base_dir}{api_base}/{action}".format_map(
UrlParamDict(protocol=protocol, UrlParamDict(
base_url=base_url, protocol=protocol,
base_port=base_port, base_url=base_url,
base_dir=base_dir, base_port=base_port,
api_base=api_base base_dir=base_dir,
)) api_base=api_base,
)
)
self.url = ApcUrl(api_url) self.url = ApcUrl(api_url)
# Now we must discover the possible actions # Now we must discover the possible actions
for action_name, action_value in endpoints.items(): for action_name, action_value in endpoints.items():
new_url = self.url.params(action=action_value) new_url = self.url.params(action=action_value)
if '{api_key}' in action_value: if "{api_key}" in action_value:
new_url = new_url.params(api_key=self.config["general"]['api_key']) new_url = new_url.params(api_key=self.config["general"]["api_key"])
self.requests[action_name] = ApiRequest(action_name, self.requests[action_name] = ApiRequest(
new_url, action_name, new_url, api_key=self.config["general"]["api_key"]
api_key=self.config['general']['api_key']) )
def available_requests(self): def available_requests(self):
return list(self.requests.keys()) return list(self.requests.keys())
@ -157,15 +178,20 @@ class RequestProvider:
else: else:
return super(RequestProvider, self).__getattribute__(attr) return super(RequestProvider, self).__getattribute__(attr)
def time_in_seconds(time): def time_in_seconds(time):
return time.hour * 60 * 60 + \ return (
time.minute * 60 + \ time.hour * 60 * 60
time.second + \ + time.minute * 60
time.microsecond / 1000000.0 + time.second
+ time.microsecond / 1000000.0
)
def time_in_milliseconds(time): def time_in_milliseconds(time):
return time_in_seconds(time) * 1000 return time_in_seconds(time) * 1000
def fromisoformat(time_string): def fromisoformat(time_string):
""" """
This is required for Python 3.6 support. datetime.time.fromisoformat was This is required for Python 3.6 support. datetime.time.fromisoformat was

View File

@ -26,58 +26,112 @@ api_config = {}
api_endpoints = {} api_endpoints = {}
# URL to get the version number of the server API # URL to get the version number of the server API
api_endpoints['version_url'] = 'version/api_key/{api_key}' api_endpoints["version_url"] = "version/api_key/{api_key}"
#URL to register a components IP Address with the central web server # URL to register a components IP Address with the central web server
api_endpoints['register_component'] = 'register-component/format/json/api_key/{api_key}/component/{component}' api_endpoints[
"register_component"
] = "register-component/format/json/api_key/{api_key}/component/{component}"
#media-monitor # media-monitor
api_endpoints['media_setup_url'] = 'media-monitor-setup/format/json/api_key/{api_key}' api_endpoints["media_setup_url"] = "media-monitor-setup/format/json/api_key/{api_key}"
api_endpoints['upload_recorded'] = 'upload-recorded/format/json/api_key/{api_key}/fileid/{fileid}/showinstanceid/{showinstanceid}' api_endpoints[
api_endpoints['update_media_url'] = 'reload-metadata/format/json/api_key/{api_key}/mode/{mode}' "upload_recorded"
api_endpoints['list_all_db_files'] = 'list-all-files/format/json/api_key/{api_key}/dir_id/{dir_id}/all/{all}' ] = "upload-recorded/format/json/api_key/{api_key}/fileid/{fileid}/showinstanceid/{showinstanceid}"
api_endpoints['list_all_watched_dirs'] = 'list-all-watched-dirs/format/json/api_key/{api_key}' api_endpoints[
api_endpoints['add_watched_dir'] = 'add-watched-dir/format/json/api_key/{api_key}/path/{path}' "update_media_url"
api_endpoints['remove_watched_dir'] = 'remove-watched-dir/format/json/api_key/{api_key}/path/{path}' ] = "reload-metadata/format/json/api_key/{api_key}/mode/{mode}"
api_endpoints['set_storage_dir'] = 'set-storage-dir/format/json/api_key/{api_key}/path/{path}' api_endpoints[
api_endpoints['update_fs_mount'] = 'update-file-system-mount/format/json/api_key/{api_key}' "list_all_db_files"
api_endpoints['reload_metadata_group'] = 'reload-metadata-group/format/json/api_key/{api_key}' ] = "list-all-files/format/json/api_key/{api_key}/dir_id/{dir_id}/all/{all}"
api_endpoints['handle_watched_dir_missing'] = 'handle-watched-dir-missing/format/json/api_key/{api_key}/dir/{dir}' api_endpoints[
#show-recorder "list_all_watched_dirs"
api_endpoints['show_schedule_url'] = 'recorded-shows/format/json/api_key/{api_key}' ] = "list-all-watched-dirs/format/json/api_key/{api_key}"
api_endpoints['upload_file_url'] = 'rest/media' api_endpoints[
api_endpoints['upload_retries'] = '3' "add_watched_dir"
api_endpoints['upload_wait'] = '60' ] = "add-watched-dir/format/json/api_key/{api_key}/path/{path}"
#pypo api_endpoints[
api_endpoints['export_url'] = 'schedule/api_key/{api_key}' "remove_watched_dir"
api_endpoints['get_media_url'] = 'get-media/file/{file}/api_key/{api_key}' ] = "remove-watched-dir/format/json/api_key/{api_key}/path/{path}"
api_endpoints['update_item_url'] = 'notify-schedule-group-play/api_key/{api_key}/schedule_id/{schedule_id}' api_endpoints[
api_endpoints['update_start_playing_url'] = 'notify-media-item-start-play/api_key/{api_key}/media_id/{media_id}/' "set_storage_dir"
api_endpoints['get_stream_setting'] = 'get-stream-setting/format/json/api_key/{api_key}/' ] = "set-storage-dir/format/json/api_key/{api_key}/path/{path}"
api_endpoints['update_liquidsoap_status'] = 'update-liquidsoap-status/format/json/api_key/{api_key}/msg/{msg}/stream_id/{stream_id}/boot_time/{boot_time}' api_endpoints[
api_endpoints['update_source_status'] = 'update-source-status/format/json/api_key/{api_key}/sourcename/{sourcename}/status/{status}' "update_fs_mount"
api_endpoints['check_live_stream_auth'] = 'check-live-stream-auth/format/json/api_key/{api_key}/username/{username}/password/{password}/djtype/{djtype}' ] = "update-file-system-mount/format/json/api_key/{api_key}"
api_endpoints['get_bootstrap_info'] = 'get-bootstrap-info/format/json/api_key/{api_key}' api_endpoints[
api_endpoints['get_files_without_replay_gain'] = 'get-files-without-replay-gain/api_key/{api_key}/dir_id/{dir_id}' "reload_metadata_group"
api_endpoints['update_replay_gain_value'] = 'update-replay-gain-value/format/json/api_key/{api_key}' ] = "reload-metadata-group/format/json/api_key/{api_key}"
api_endpoints['notify_webstream_data'] = 'notify-webstream-data/api_key/{api_key}/media_id/{media_id}/format/json' api_endpoints[
api_endpoints['notify_liquidsoap_started'] = 'rabbitmq-do-push/api_key/{api_key}/format/json' "handle_watched_dir_missing"
api_endpoints['get_stream_parameters'] = 'get-stream-parameters/api_key/{api_key}/format/json' ] = "handle-watched-dir-missing/format/json/api_key/{api_key}/dir/{dir}"
api_endpoints['push_stream_stats'] = 'push-stream-stats/api_key/{api_key}/format/json' # show-recorder
api_endpoints['update_stream_setting_table'] = 'update-stream-setting-table/api_key/{api_key}/format/json' api_endpoints["show_schedule_url"] = "recorded-shows/format/json/api_key/{api_key}"
api_endpoints['get_files_without_silan_value'] = 'get-files-without-silan-value/api_key/{api_key}' api_endpoints["upload_file_url"] = "rest/media"
api_endpoints['update_cue_values_by_silan'] = 'update-cue-values-by-silan/api_key/{api_key}' api_endpoints["upload_retries"] = "3"
api_endpoints['update_metadata_on_tunein'] = 'update-metadata-on-tunein/api_key/{api_key}' api_endpoints["upload_wait"] = "60"
api_config['api_base'] = 'api' # pypo
api_config['bin_dir'] = '/usr/lib/airtime/api_clients/' api_endpoints["export_url"] = "schedule/api_key/{api_key}"
api_endpoints["get_media_url"] = "get-media/file/{file}/api_key/{api_key}"
api_endpoints[
"update_item_url"
] = "notify-schedule-group-play/api_key/{api_key}/schedule_id/{schedule_id}"
api_endpoints[
"update_start_playing_url"
] = "notify-media-item-start-play/api_key/{api_key}/media_id/{media_id}/"
api_endpoints[
"get_stream_setting"
] = "get-stream-setting/format/json/api_key/{api_key}/"
api_endpoints[
"update_liquidsoap_status"
] = "update-liquidsoap-status/format/json/api_key/{api_key}/msg/{msg}/stream_id/{stream_id}/boot_time/{boot_time}"
api_endpoints[
"update_source_status"
] = "update-source-status/format/json/api_key/{api_key}/sourcename/{sourcename}/status/{status}"
api_endpoints[
"check_live_stream_auth"
] = "check-live-stream-auth/format/json/api_key/{api_key}/username/{username}/password/{password}/djtype/{djtype}"
api_endpoints["get_bootstrap_info"] = "get-bootstrap-info/format/json/api_key/{api_key}"
api_endpoints[
"get_files_without_replay_gain"
] = "get-files-without-replay-gain/api_key/{api_key}/dir_id/{dir_id}"
api_endpoints[
"update_replay_gain_value"
] = "update-replay-gain-value/format/json/api_key/{api_key}"
api_endpoints[
"notify_webstream_data"
] = "notify-webstream-data/api_key/{api_key}/media_id/{media_id}/format/json"
api_endpoints[
"notify_liquidsoap_started"
] = "rabbitmq-do-push/api_key/{api_key}/format/json"
api_endpoints[
"get_stream_parameters"
] = "get-stream-parameters/api_key/{api_key}/format/json"
api_endpoints["push_stream_stats"] = "push-stream-stats/api_key/{api_key}/format/json"
api_endpoints[
"update_stream_setting_table"
] = "update-stream-setting-table/api_key/{api_key}/format/json"
api_endpoints[
"get_files_without_silan_value"
] = "get-files-without-silan-value/api_key/{api_key}"
api_endpoints[
"update_cue_values_by_silan"
] = "update-cue-values-by-silan/api_key/{api_key}"
api_endpoints[
"update_metadata_on_tunein"
] = "update-metadata-on-tunein/api_key/{api_key}"
api_config["api_base"] = "api"
api_config["bin_dir"] = "/usr/lib/airtime/api_clients/"
################################################################################ ################################################################################
# Airtime API Version 1 Client # Airtime API Version 1 Client
################################################################################ ################################################################################
class AirtimeApiClient(object): class AirtimeApiClient(object):
def __init__(self, logger=None,config_path='/etc/airtime/airtime.conf'): def __init__(self, logger=None, config_path="/etc/airtime/airtime.conf"):
if logger is None: self.logger = logging if logger is None:
else: self.logger = logger self.logger = logging
else:
self.logger = logger
# loading config file # loading config file
try: try:
@ -85,16 +139,18 @@ class AirtimeApiClient(object):
self.config.update(api_config) self.config.update(api_config)
self.services = RequestProvider(self.config, api_endpoints) self.services = RequestProvider(self.config, api_endpoints)
except Exception as e: except Exception as e:
self.logger.exception('Error loading config file: %s', config_path) self.logger.exception("Error loading config file: %s", config_path)
sys.exit(1) sys.exit(1)
def __get_airtime_version(self): def __get_airtime_version(self):
try: return self.services.version_url()['airtime_version'] try:
except Exception: return -1 return self.services.version_url()["airtime_version"]
except Exception:
return -1
def __get_api_version(self): def __get_api_version(self):
try: try:
return self.services.version_url()['api_version'] return self.services.version_url()["api_version"]
except Exception as e: except Exception as e:
self.logger.exception(e) self.logger.exception(e)
return -1 return -1
@ -105,25 +161,30 @@ class AirtimeApiClient(object):
# logger.info('Airtime version found: ' + str(version)) # logger.info('Airtime version found: ' + str(version))
if api_version == -1: if api_version == -1:
if verbose: if verbose:
logger.info('Unable to get Airtime API version number.\n') logger.info("Unable to get Airtime API version number.\n")
return False return False
elif api_version[0:3] != AIRTIME_API_VERSION[0:3]: elif api_version[0:3] != AIRTIME_API_VERSION[0:3]:
if verbose: if verbose:
logger.info('Airtime API version found: ' + str(api_version)) logger.info("Airtime API version found: " + str(api_version))
logger.info('pypo is only compatible with API version: ' + AIRTIME_API_VERSION) logger.info(
"pypo is only compatible with API version: " + AIRTIME_API_VERSION
)
return False return False
else: else:
if verbose: if verbose:
logger.info('Airtime API version found: ' + str(api_version)) logger.info("Airtime API version found: " + str(api_version))
logger.info('pypo is only compatible with API version: ' + AIRTIME_API_VERSION) logger.info(
"pypo is only compatible with API version: " + AIRTIME_API_VERSION
)
return True return True
def get_schedule(self): def get_schedule(self):
# TODO : properly refactor this routine # TODO : properly refactor this routine
# For now the return type is a little messed up for compatibility reasons # For now the return type is a little messed up for compatibility reasons
try: return (True, self.services.export_url()) try:
except: return (False, None) return (True, self.services.export_url())
except:
return (False, None)
def notify_liquidsoap_started(self): def notify_liquidsoap_started(self):
try: try:
@ -132,9 +193,9 @@ class AirtimeApiClient(object):
self.logger.exception(e) self.logger.exception(e)
def notify_media_item_start_playing(self, media_id): def notify_media_item_start_playing(self, media_id):
""" This is a callback from liquidsoap, we use this to notify """This is a callback from liquidsoap, we use this to notify
about the currently playing *song*. We get passed a JSON string about the currently playing *song*. We get passed a JSON string
which we handed to liquidsoap in get_liquidsoap_data(). """ which we handed to liquidsoap in get_liquidsoap_data()."""
try: try:
return self.services.update_start_playing_url(media_id=media_id) return self.services.update_start_playing_url(media_id=media_id)
except Exception as e: except Exception as e:
@ -150,7 +211,7 @@ class AirtimeApiClient(object):
def upload_recorded_show(self, files, show_id): def upload_recorded_show(self, files, show_id):
logger = self.logger logger = self.logger
response = '' response = ""
retries = int(self.config["upload_retries"]) retries = int(self.config["upload_retries"])
retries_wait = int(self.config["upload_wait"]) retries_wait = int(self.config["upload_wait"])
@ -165,7 +226,9 @@ class AirtimeApiClient(object):
logger.debug(ApiRequest.API_HTTP_REQUEST_TIMEOUT) logger.debug(ApiRequest.API_HTTP_REQUEST_TIMEOUT)
try: try:
request = requests.post(url, files=files, timeout=float(ApiRequest.API_HTTP_REQUEST_TIMEOUT)) request = requests.post(
url, files=files, timeout=float(ApiRequest.API_HTTP_REQUEST_TIMEOUT)
)
response = request.json() response = request.json()
logger.debug(response) logger.debug(response)
@ -199,7 +262,7 @@ class AirtimeApiClient(object):
except Exception as e: except Exception as e:
self.logger.exception(e) self.logger.exception(e)
#wait some time before next retry # wait some time before next retry
time.sleep(retries_wait) time.sleep(retries_wait)
return response return response
@ -207,42 +270,49 @@ class AirtimeApiClient(object):
def check_live_stream_auth(self, username, password, dj_type): def check_live_stream_auth(self, username, password, dj_type):
try: try:
return self.services.check_live_stream_auth( return self.services.check_live_stream_auth(
username=username, password=password, djtype=dj_type) username=username, password=password, djtype=dj_type
)
except Exception as e: except Exception as e:
self.logger.exception(e) self.logger.exception(e)
return {} return {}
def construct_url(self,config_action_key): def construct_url(self, config_action_key):
"""Constructs the base url for every request""" """Constructs the base url for every request"""
# TODO : Make other methods in this class use this this method. # TODO : Make other methods in this class use this this method.
if self.config["general"]["base_dir"].startswith("/"): if self.config["general"]["base_dir"].startswith("/"):
self.config["general"]["base_dir"] = self.config["general"]["base_dir"][1:] self.config["general"]["base_dir"] = self.config["general"]["base_dir"][1:]
protocol = get_protocol(self.config) protocol = get_protocol(self.config)
url = "%s://%s:%s/%s%s/%s" % \ url = "%s://%s:%s/%s%s/%s" % (
(protocol, protocol,
self.config["general"]["base_url"], str(self.config["general"]["base_port"]), self.config["general"]["base_url"],
self.config["general"]["base_dir"], self.config["api_base"], str(self.config["general"]["base_port"]),
self.config[config_action_key]) self.config["general"]["base_dir"],
self.config["api_base"],
self.config[config_action_key],
)
url = url.replace("%%api_key%%", self.config["general"]["api_key"]) url = url.replace("%%api_key%%", self.config["general"]["api_key"])
return url return url
def construct_rest_url(self,config_action_key): def construct_rest_url(self, config_action_key):
"""Constructs the base url for RESTful requests""" """Constructs the base url for RESTful requests"""
if self.config["general"]["base_dir"].startswith("/"): if self.config["general"]["base_dir"].startswith("/"):
self.config["general"]["base_dir"] = self.config["general"]["base_dir"][1:] self.config["general"]["base_dir"] = self.config["general"]["base_dir"][1:]
protocol = get_protocol(self.config) protocol = get_protocol(self.config)
url = "%s://%s:@%s:%s/%s/%s" % \ url = "%s://%s:@%s:%s/%s/%s" % (
(protocol, self.config["general"]["api_key"], protocol,
self.config["general"]["base_url"], str(self.config["general"]["base_port"]), self.config["general"]["api_key"],
self.config["general"]["base_dir"], self.config["general"]["base_url"],
self.config[config_action_key]) str(self.config["general"]["base_port"]),
self.config["general"]["base_dir"],
self.config[config_action_key],
)
return url return url
""" """
Caller of this method needs to catch any exceptions such as Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen ValueError thrown by json.loads or URLError by urllib2.urlopen
""" """
def setup_media_monitor(self): def setup_media_monitor(self):
return self.services.media_setup_url() return self.services.media_setup_url()
@ -264,49 +334,55 @@ class AirtimeApiClient(object):
# filter but here we prefer a little more verbosity to help # filter but here we prefer a little more verbosity to help
# debugging # debugging
for action in action_list: for action in action_list:
if not 'mode' in action: if not "mode" in action:
self.logger.debug("Warning: Trying to send a request element without a 'mode'") self.logger.debug(
self.logger.debug("Here is the the request: '%s'" % str(action) ) "Warning: Trying to send a request element without a 'mode'"
)
self.logger.debug("Here is the the request: '%s'" % str(action))
else: else:
# We alias the value of is_record to true or false no # We alias the value of is_record to true or false no
# matter what it is based on if it's absent in the action # matter what it is based on if it's absent in the action
if 'is_record' not in action: if "is_record" not in action:
action['is_record'] = 0 action["is_record"] = 0
valid_actions.append(action) valid_actions.append(action)
# Note that we must prefix every key with: mdX where x is a number # Note that we must prefix every key with: mdX where x is a number
# Is there a way to format the next line a little better? The # Is there a way to format the next line a little better? The
# parenthesis make the code almost unreadable # parenthesis make the code almost unreadable
md_list = dict((("md%d" % i), json.dumps(md)) \ md_list = dict(
for i,md in enumerate(valid_actions)) (("md%d" % i), json.dumps(md)) for i, md in enumerate(valid_actions)
)
# For testing we add the following "dry" parameter to tell the # For testing we add the following "dry" parameter to tell the
# controller not to actually do any changes # controller not to actually do any changes
if dry: md_list['dry'] = 1 if dry:
md_list["dry"] = 1
self.logger.info("Pumping out %d requests..." % len(valid_actions)) self.logger.info("Pumping out %d requests..." % len(valid_actions))
return self.services.reload_metadata_group(_post_data=md_list) return self.services.reload_metadata_group(_post_data=md_list)
#returns a list of all db files for a given directory in JSON format: # returns a list of all db files for a given directory in JSON format:
#{"files":["path/to/file1", "path/to/file2"]} # {"files":["path/to/file1", "path/to/file2"]}
#Note that these are relative paths to the given directory. The full # Note that these are relative paths to the given directory. The full
#path is not returned. # path is not returned.
def list_all_db_files(self, dir_id, all_files=True): def list_all_db_files(self, dir_id, all_files=True):
logger = self.logger logger = self.logger
try: try:
all_files = "1" if all_files else "0" all_files = "1" if all_files else "0"
response = self.services.list_all_db_files(dir_id=dir_id, response = self.services.list_all_db_files(dir_id=dir_id, all=all_files)
all=all_files)
except Exception as e: except Exception as e:
response = {} response = {}
logger.error("Exception: %s", e) logger.error("Exception: %s", e)
try: try:
return response["files"] return response["files"]
except KeyError: except KeyError:
self.logger.error("Could not find index 'files' in dictionary: %s", self.logger.error(
str(response)) "Could not find index 'files' in dictionary: %s", str(response)
)
return [] return []
""" """
Caller of this method needs to catch any exceptions such as Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen ValueError thrown by json.loads or URLError by urllib2.urlopen
""" """
def list_all_watched_dirs(self): def list_all_watched_dirs(self):
return self.services.list_all_watched_dirs() return self.services.list_all_watched_dirs()
@ -314,6 +390,7 @@ class AirtimeApiClient(object):
Caller of this method needs to catch any exceptions such as Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen ValueError thrown by json.loads or URLError by urllib2.urlopen
""" """
def add_watched_dir(self, path): def add_watched_dir(self, path):
return self.services.add_watched_dir(path=base64.b64encode(path)) return self.services.add_watched_dir(path=base64.b64encode(path))
@ -321,6 +398,7 @@ class AirtimeApiClient(object):
Caller of this method needs to catch any exceptions such as Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen ValueError thrown by json.loads or URLError by urllib2.urlopen
""" """
def remove_watched_dir(self, path): def remove_watched_dir(self, path):
return self.services.remove_watched_dir(path=base64.b64encode(path)) return self.services.remove_watched_dir(path=base64.b64encode(path))
@ -328,6 +406,7 @@ class AirtimeApiClient(object):
Caller of this method needs to catch any exceptions such as Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen ValueError thrown by json.loads or URLError by urllib2.urlopen
""" """
def set_storage_dir(self, path): def set_storage_dir(self, path):
return self.services.set_storage_dir(path=base64.b64encode(path)) return self.services.set_storage_dir(path=base64.b64encode(path))
@ -335,15 +414,16 @@ class AirtimeApiClient(object):
Caller of this method needs to catch any exceptions such as Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen ValueError thrown by json.loads or URLError by urllib2.urlopen
""" """
def get_stream_setting(self): def get_stream_setting(self):
return self.services.get_stream_setting() return self.services.get_stream_setting()
def register_component(self, component): def register_component(self, component):
""" Purpose of this method is to contact the server with a "Hey its """Purpose of this method is to contact the server with a "Hey its
me!" message. This will allow the server to register the component's me!" message. This will allow the server to register the component's
(component = media-monitor, pypo etc.) ip address, and later use it (component = media-monitor, pypo etc.) ip address, and later use it
to query monit via monit's http service, or download log files via a to query monit via monit's http service, or download log files via a
http server. """ http server."""
return self.services.register_component(component=component) return self.services.register_component(component=component)
def notify_liquidsoap_status(self, msg, stream_id, time): def notify_liquidsoap_status(self, msg, stream_id, time):
@ -351,24 +431,24 @@ class AirtimeApiClient(object):
try: try:
post_data = {"msg_post": msg} post_data = {"msg_post": msg}
#encoded_msg is no longer used server_side!! # encoded_msg is no longer used server_side!!
encoded_msg = urllib.parse.quote('dummy') encoded_msg = urllib.parse.quote("dummy")
self.services.update_liquidsoap_status.req(post_data, self.services.update_liquidsoap_status.req(
msg=encoded_msg, post_data, msg=encoded_msg, stream_id=stream_id, boot_time=time
stream_id=stream_id, ).retry(5)
boot_time=time).retry(5)
except Exception as e: except Exception as e:
self.logger.exception(e) self.logger.exception(e)
def notify_source_status(self, sourcename, status): def notify_source_status(self, sourcename, status):
try: try:
return self.services.update_source_status.req(sourcename=sourcename, return self.services.update_source_status.req(
status=status).retry(5) sourcename=sourcename, status=status
).retry(5)
except Exception as e: except Exception as e:
self.logger.exception(e) self.logger.exception(e)
def get_bootstrap_info(self): def get_bootstrap_info(self):
""" Retrieve infomations needed on bootstrap time """ """Retrieve infomations needed on bootstrap time"""
return self.services.get_bootstrap_info() return self.services.get_bootstrap_info()
def get_files_without_replay_gain_value(self, dir_id): def get_files_without_replay_gain_value(self, dir_id):
@ -377,7 +457,7 @@ class AirtimeApiClient(object):
calculated. This list of files is downloaded into a file and the path calculated. This list of files is downloaded into a file and the path
to this file is the return value. to this file is the return value.
""" """
#http://localhost/api/get-files-without-replay-gain/dir_id/1 # http://localhost/api/get-files-without-replay-gain/dir_id/1
try: try:
return self.services.get_files_without_replay_gain(dir_id=dir_id) return self.services.get_files_without_replay_gain(dir_id=dir_id)
except Exception as e: except Exception as e:
@ -401,25 +481,31 @@ class AirtimeApiClient(object):
'pairs' is a list of pairs in (x, y), where x is the file's database 'pairs' is a list of pairs in (x, y), where x is the file's database
row id and y is the file's replay_gain value in dB row id and y is the file's replay_gain value in dB
""" """
self.logger.debug(self.services.update_replay_gain_value( self.logger.debug(
_post_data={'data': json.dumps(pairs)})) self.services.update_replay_gain_value(
_post_data={"data": json.dumps(pairs)}
)
)
def update_cue_values_by_silan(self, pairs): def update_cue_values_by_silan(self, pairs):
""" """
'pairs' is a list of pairs in (x, y), where x is the file's database 'pairs' is a list of pairs in (x, y), where x is the file's database
row id and y is the file's cue values in dB row id and y is the file's cue values in dB
""" """
return self.services.update_cue_values_by_silan(_post_data={'data': json.dumps(pairs)}) return self.services.update_cue_values_by_silan(
_post_data={"data": json.dumps(pairs)}
)
def notify_webstream_data(self, data, media_id): def notify_webstream_data(self, data, media_id):
""" """
Update the server with the latest metadata we've received from the Update the server with the latest metadata we've received from the
external webstream external webstream
""" """
self.logger.info( self.services.notify_webstream_data.req( self.logger.info(
_post_data={'data':data}, media_id=str(media_id)).retry(5)) self.services.notify_webstream_data.req(
_post_data={"data": data}, media_id=str(media_id)
).retry(5)
)
def get_stream_parameters(self): def get_stream_parameters(self):
response = self.services.get_stream_parameters() response = self.services.get_stream_parameters()
@ -428,12 +514,16 @@ class AirtimeApiClient(object):
def push_stream_stats(self, data): def push_stream_stats(self, data):
# TODO : users of this method should do their own error handling # TODO : users of this method should do their own error handling
response = self.services.push_stream_stats(_post_data={'data': json.dumps(data)}) response = self.services.push_stream_stats(
_post_data={"data": json.dumps(data)}
)
return response return response
def update_stream_setting_table(self, data): def update_stream_setting_table(self, data):
try: try:
response = self.services.update_stream_setting_table(_post_data={'data': json.dumps(data)}) response = self.services.update_stream_setting_table(
_post_data={"data": json.dumps(data)}
)
return response return response
except Exception as e: except Exception as e:
self.logger.exception(e) self.logger.exception(e)

View File

@ -18,17 +18,18 @@ LIBRETIME_API_VERSION = "2.0"
api_config = {} api_config = {}
api_endpoints = {} api_endpoints = {}
api_endpoints['version_url'] = 'version/' api_endpoints["version_url"] = "version/"
api_endpoints['schedule_url'] = 'schedule/' api_endpoints["schedule_url"] = "schedule/"
api_endpoints['webstream_url'] = 'webstreams/{id}/' api_endpoints["webstream_url"] = "webstreams/{id}/"
api_endpoints['show_instance_url'] = 'show-instances/{id}/' api_endpoints["show_instance_url"] = "show-instances/{id}/"
api_endpoints['show_url'] = 'shows/{id}/' api_endpoints["show_url"] = "shows/{id}/"
api_endpoints['file_url'] = 'files/{id}/' api_endpoints["file_url"] = "files/{id}/"
api_endpoints['file_download_url'] = 'files/{id}/download/' api_endpoints["file_download_url"] = "files/{id}/download/"
api_config['api_base'] = 'api/v2' api_config["api_base"] = "api/v2"
class AirtimeApiClient: class AirtimeApiClient:
def __init__(self, logger=None, config_path='/etc/airtime/airtime.conf'): def __init__(self, logger=None, config_path="/etc/airtime/airtime.conf"):
if logger is None: if logger is None:
self.logger = logging self.logger = logging
else: else:
@ -39,87 +40,89 @@ class AirtimeApiClient:
self.config.update(api_config) self.config.update(api_config)
self.services = RequestProvider(self.config, api_endpoints) self.services = RequestProvider(self.config, api_endpoints)
except Exception as e: except Exception as e:
self.logger.exception('Error loading config file: %s', config_path) self.logger.exception("Error loading config file: %s", config_path)
sys.exit(1) sys.exit(1)
def get_schedule(self): def get_schedule(self):
current_time = datetime.datetime.utcnow() current_time = datetime.datetime.utcnow()
end_time = current_time + datetime.timedelta(hours=1) end_time = current_time + datetime.timedelta(hours=1)
str_current = current_time.isoformat(timespec='seconds') str_current = current_time.isoformat(timespec="seconds")
str_end = end_time.isoformat(timespec='seconds') str_end = end_time.isoformat(timespec="seconds")
data = self.services.schedule_url(params={ data = self.services.schedule_url(
'ends__range': ('{}Z,{}Z'.format(str_current, str_end)), params={
}) "ends__range": ("{}Z,{}Z".format(str_current, str_end)),
result = {'media': {} }
for item in data:
start = isoparse(item['starts'])
key = start.strftime('%YYYY-%mm-%dd-%HH-%MM-%SS')
end = isoparse(item['ends'])
show_instance = self.services.show_instance_url(id=item['instance_id'])
show = self.services.show_url(id=show_instance['show_id'])
result['media'][key] = {
'start': start.strftime('%Y-%m-%d-%H-%M-%S'),
'end': end.strftime('%Y-%m-%d-%H-%M-%S'),
'row_id': item['id']
} }
current = result['media'][key] )
if item['file']: result = {"media": {}}
current['independent_event'] = False for item in data:
current['type'] = 'file' start = isoparse(item["starts"])
current['id'] = item['file_id'] key = start.strftime("%YYYY-%mm-%dd-%HH-%MM-%SS")
end = isoparse(item["ends"])
fade_in = time_in_milliseconds(fromisoformat(item['fade_in'])) show_instance = self.services.show_instance_url(id=item["instance_id"])
fade_out = time_in_milliseconds(fromisoformat(item['fade_out'])) show = self.services.show_url(id=show_instance["show_id"])
cue_in = time_in_seconds(fromisoformat(item['cue_in'])) result["media"][key] = {
cue_out = time_in_seconds(fromisoformat(item['cue_out'])) "start": start.strftime("%Y-%m-%d-%H-%M-%S"),
"end": end.strftime("%Y-%m-%d-%H-%M-%S"),
"row_id": item["id"],
}
current = result["media"][key]
if item["file"]:
current["independent_event"] = False
current["type"] = "file"
current["id"] = item["file_id"]
current['fade_in'] = fade_in fade_in = time_in_milliseconds(fromisoformat(item["fade_in"]))
current['fade_out'] = fade_out fade_out = time_in_milliseconds(fromisoformat(item["fade_out"]))
current['cue_in'] = cue_in
current['cue_out'] = cue_out
info = self.services.file_url(id=item['file_id']) cue_in = time_in_seconds(fromisoformat(item["cue_in"]))
current['metadata'] = info cue_out = time_in_seconds(fromisoformat(item["cue_out"]))
current['uri'] = item['file']
current['filesize'] = info['filesize'] current["fade_in"] = fade_in
elif item['stream']: current["fade_out"] = fade_out
current['independent_event'] = True current["cue_in"] = cue_in
current['id'] = item['stream_id'] current["cue_out"] = cue_out
info = self.services.webstream_url(id=item['stream_id'])
current['uri'] = info['url'] info = self.services.file_url(id=item["file_id"])
current['type'] = 'stream_buffer_start' current["metadata"] = info
current["uri"] = item["file"]
current["filesize"] = info["filesize"]
elif item["stream"]:
current["independent_event"] = True
current["id"] = item["stream_id"]
info = self.services.webstream_url(id=item["stream_id"])
current["uri"] = info["url"]
current["type"] = "stream_buffer_start"
# Stream events are instantaneous # Stream events are instantaneous
current['end'] = current['start'] current["end"] = current["start"]
result['{}_0'.format(key)] = { result["{}_0".format(key)] = {
'id': current['id'], "id": current["id"],
'type': 'stream_output_start', "type": "stream_output_start",
'start': current['start'], "start": current["start"],
'end': current['start'], "end": current["start"],
'uri': current['uri'], "uri": current["uri"],
'row_id': current['row_id'], "row_id": current["row_id"],
'independent_event': current['independent_event'], "independent_event": current["independent_event"],
} }
result[end.isoformat()] = { result[end.isoformat()] = {
'type': 'stream_buffer_end', "type": "stream_buffer_end",
'start': current['end'], "start": current["end"],
'end': current['end'], "end": current["end"],
'uri': current['uri'], "uri": current["uri"],
'row_id': current['row_id'], "row_id": current["row_id"],
'independent_event': current['independent_event'], "independent_event": current["independent_event"],
} }
result['{}_0'.format(end.isoformat())] = { result["{}_0".format(end.isoformat())] = {
'type': 'stream_output_end', "type": "stream_output_end",
'start': current['end'], "start": current["end"],
'end': current['end'], "end": current["end"],
'uri': current['uri'], "uri": current["uri"],
'row_id': current['row_id'], "row_id": current["row_id"],
'independent_event': current['independent_event'], "independent_event": current["independent_event"],
} }
return result return result

View File

@ -9,17 +9,19 @@ script_path = os.path.dirname(os.path.realpath(__file__))
print(script_path) print(script_path)
os.chdir(script_path) os.chdir(script_path)
setup(name='api_clients', setup(
version='2.0.0', name="api_clients",
description='LibreTime API Client', version="2.0.0",
url='http://github.com/LibreTime/Libretime', description="LibreTime API Client",
author='LibreTime Contributors', url="http://github.com/LibreTime/Libretime",
license='AGPLv3', author="LibreTime Contributors",
packages=['api_clients'], license="AGPLv3",
scripts=[], packages=["api_clients"],
install_requires=[ scripts=[],
'configobj', install_requires=[
'python-dateutil', "configobj",
], "python-dateutil",
zip_safe=False, ],
data_files=[]) zip_safe=False,
data_files=[],
)

View File

@ -2,6 +2,7 @@
import unittest import unittest
from api_clients.utils import ApcUrl, UrlBadParam, IncompleteUrl from api_clients.utils import ApcUrl, UrlBadParam, IncompleteUrl
class TestApcUrl(unittest.TestCase): class TestApcUrl(unittest.TestCase):
def test_init(self): def test_init(self):
url = "/testing" url = "/testing"
@ -10,22 +11,23 @@ class TestApcUrl(unittest.TestCase):
def test_params_1(self): def test_params_1(self):
u = ApcUrl("/testing/{key}") u = ApcUrl("/testing/{key}")
self.assertEqual(u.params(key='val').url(), '/testing/val') self.assertEqual(u.params(key="val").url(), "/testing/val")
def test_params_2(self): def test_params_2(self):
u = ApcUrl('/testing/{key}/{api}/more_testing') u = ApcUrl("/testing/{key}/{api}/more_testing")
full_url = u.params(key="AAA",api="BBB").url() full_url = u.params(key="AAA", api="BBB").url()
self.assertEqual(full_url, '/testing/AAA/BBB/more_testing') self.assertEqual(full_url, "/testing/AAA/BBB/more_testing")
def test_params_ex(self): def test_params_ex(self):
u = ApcUrl("/testing/{key}") u = ApcUrl("/testing/{key}")
with self.assertRaises(UrlBadParam): with self.assertRaises(UrlBadParam):
u.params(bad_key='testing') u.params(bad_key="testing")
def test_url(self): def test_url(self):
u = "one/two/three" u = "one/two/three"
self.assertEqual( ApcUrl(u).url(), u ) self.assertEqual(ApcUrl(u).url(), u)
def test_url_ex(self): def test_url_ex(self):
u = ApcUrl('/{one}/{two}/three').params(two='testing') u = ApcUrl("/{one}/{two}/three").params(two="testing")
with self.assertRaises(IncompleteUrl): u.url() with self.assertRaises(IncompleteUrl):
u.url()

View File

@ -4,39 +4,43 @@ import json
from mock import MagicMock, patch from mock import MagicMock, patch
from api_clients.utils import ApcUrl, ApiRequest from api_clients.utils import ApcUrl, ApiRequest
class ResponseInfo: class ResponseInfo:
@property @property
def headers(self): def headers(self):
return {'content-type': 'application/json'} return {"content-type": "application/json"}
def json(self): def json(self):
return {'ok', 'ok'} return {"ok", "ok"}
class TestApiRequest(unittest.TestCase): class TestApiRequest(unittest.TestCase):
def test_init(self): def test_init(self):
u = ApiRequest('request_name', ApcUrl('/test/ing')) u = ApiRequest("request_name", ApcUrl("/test/ing"))
self.assertEqual(u.name, "request_name") self.assertEqual(u.name, "request_name")
def test_call_json(self): def test_call_json(self):
ret = {'ok':'ok'} ret = {"ok": "ok"}
read = MagicMock() read = MagicMock()
read.headers = {'content-type': 'application/json'} read.headers = {"content-type": "application/json"}
read.json = MagicMock(return_value=ret) read.json = MagicMock(return_value=ret)
u = 'http://localhost/testing' u = "http://localhost/testing"
with patch('requests.get') as mock_method: with patch("requests.get") as mock_method:
mock_method.return_value = read mock_method.return_value = read
request = ApiRequest('mm', ApcUrl(u))() request = ApiRequest("mm", ApcUrl(u))()
self.assertEqual(request, ret) self.assertEqual(request, ret)
def test_call_html(self): def test_call_html(self):
ret = '<html><head></head><body></body></html>' ret = "<html><head></head><body></body></html>"
read = MagicMock() read = MagicMock()
read.headers = {'content-type': 'application/html'} read.headers = {"content-type": "application/html"}
read.text = MagicMock(return_value=ret) read.text = MagicMock(return_value=ret)
u = 'http://localhost/testing' u = "http://localhost/testing"
with patch('requests.get') as mock_method: with patch("requests.get") as mock_method:
mock_method.return_value = read mock_method.return_value = read
request = ApiRequest('mm', ApcUrl(u))() request = ApiRequest("mm", ApcUrl(u))()
self.assertEqual(request.text(), ret) self.assertEqual(request.text(), ret)
if __name__ == '__main__': unittest.main()
if __name__ == "__main__":
unittest.main()

View File

@ -6,18 +6,19 @@ from configobj import ConfigObj
from api_clients.version1 import api_config from api_clients.version1 import api_config
from api_clients.utils import RequestProvider from api_clients.utils import RequestProvider
class TestRequestProvider(unittest.TestCase): class TestRequestProvider(unittest.TestCase):
def setUp(self): def setUp(self):
self.cfg = api_config self.cfg = api_config
self.cfg['general'] = {} self.cfg["general"] = {}
self.cfg['general']['base_dir'] = '/test' self.cfg["general"]["base_dir"] = "/test"
self.cfg['general']['base_port'] = 80 self.cfg["general"]["base_port"] = 80
self.cfg['general']['base_url'] = 'localhost' self.cfg["general"]["base_url"] = "localhost"
self.cfg['general']['api_key'] = 'TEST_KEY' self.cfg["general"]["api_key"] = "TEST_KEY"
self.cfg['api_base'] = 'api' self.cfg["api_base"] = "api"
def test_test(self): def test_test(self):
self.assertTrue('general' in self.cfg) self.assertTrue("general" in self.cfg)
def test_init(self): def test_init(self):
rp = RequestProvider(self.cfg, {}) rp = RequestProvider(self.cfg, {})
@ -25,12 +26,14 @@ class TestRequestProvider(unittest.TestCase):
def test_contains(self): def test_contains(self):
methods = { methods = {
'upload_recorded': '/1/', "upload_recorded": "/1/",
'update_media_url': '/2/', "update_media_url": "/2/",
'list_all_db_files': '/3/', "list_all_db_files": "/3/",
} }
rp = RequestProvider(self.cfg, methods) rp = RequestProvider(self.cfg, methods)
for meth in methods: for meth in methods:
self.assertTrue(meth in rp.requests) self.assertTrue(meth in rp.requests)
if __name__ == '__main__': unittest.main()
if __name__ == "__main__":
unittest.main()

View File

@ -4,13 +4,14 @@ import configparser
import unittest import unittest
from api_clients import utils from api_clients import utils
def get_force_ssl(value, useConfigParser): def get_force_ssl(value, useConfigParser):
config = {} config = {}
if useConfigParser: if useConfigParser:
config = configparser.ConfigParser() config = configparser.ConfigParser()
config['general'] = { config["general"] = {
'base_port': 80, "base_port": 80,
'force_ssl': value, "force_ssl": value,
} }
return utils.get_protocol(config) return utils.get_protocol(config)
@ -27,65 +28,65 @@ class TestTime(unittest.TestCase):
class TestGetProtocol(unittest.TestCase): class TestGetProtocol(unittest.TestCase):
def test_dict_config_empty_http(self): def test_dict_config_empty_http(self):
config = {'general': {}} config = {"general": {}}
protocol = utils.get_protocol(config) protocol = utils.get_protocol(config)
self.assertEqual(protocol, 'http') self.assertEqual(protocol, "http")
def test_dict_config_http(self): def test_dict_config_http(self):
config = { config = {
'general': { "general": {
'base_port': 80, "base_port": 80,
}, },
} }
protocol = utils.get_protocol(config) protocol = utils.get_protocol(config)
self.assertEqual(protocol, 'http') self.assertEqual(protocol, "http")
def test_dict_config_https(self): def test_dict_config_https(self):
config = { config = {
'general': { "general": {
'base_port': 443, "base_port": 443,
}, },
} }
protocol = utils.get_protocol(config) protocol = utils.get_protocol(config)
self.assertEqual(protocol, 'https') self.assertEqual(protocol, "https")
def test_dict_config_force_https(self): def test_dict_config_force_https(self):
postive_values = ['yes', 'Yes', 'True', 'true', True] postive_values = ["yes", "Yes", "True", "true", True]
negative_values = ['no', 'No', 'False', 'false', False] negative_values = ["no", "No", "False", "false", False]
for value in postive_values: for value in postive_values:
self.assertEqual(get_force_ssl(value, False), 'https') self.assertEqual(get_force_ssl(value, False), "https")
for value in negative_values: for value in negative_values:
self.assertEqual(get_force_ssl(value, False), 'http') self.assertEqual(get_force_ssl(value, False), "http")
def test_configparser_config_empty_http(self): def test_configparser_config_empty_http(self):
config = configparser.ConfigParser() config = configparser.ConfigParser()
config['general'] = {} config["general"] = {}
protocol = utils.get_protocol(config) protocol = utils.get_protocol(config)
self.assertEqual(protocol, 'http') self.assertEqual(protocol, "http")
def test_configparser_config_http(self): def test_configparser_config_http(self):
config = configparser.ConfigParser() config = configparser.ConfigParser()
config['general'] = { config["general"] = {
'base_port': 80, "base_port": 80,
} }
protocol = utils.get_protocol(config) protocol = utils.get_protocol(config)
self.assertEqual(protocol, 'http') self.assertEqual(protocol, "http")
def test_configparser_config_https(self): def test_configparser_config_https(self):
config = configparser.ConfigParser() config = configparser.ConfigParser()
config['general'] = { config["general"] = {
'base_port': 443, "base_port": 443,
} }
protocol = utils.get_protocol(config) protocol = utils.get_protocol(config)
self.assertEqual(protocol, 'https') self.assertEqual(protocol, "https")
def test_configparser_config_force_https(self): def test_configparser_config_force_https(self):
postive_values = ['yes', 'Yes', 'True', 'true', True] postive_values = ["yes", "Yes", "True", "true", True]
negative_values = ['no', 'No', 'False', 'false', False] negative_values = ["no", "No", "False", "false", False]
for value in postive_values: for value in postive_values:
self.assertEqual(get_force_ssl(value, True), 'https') self.assertEqual(get_force_ssl(value, True), "https")
for value in negative_values: for value in negative_values:
self.assertEqual(get_force_ssl(value, True), 'http') self.assertEqual(get_force_ssl(value, True), "http")
def test_fromisoformat(self): def test_fromisoformat(self):
time = { time = {
@ -96,4 +97,6 @@ class TestGetProtocol(unittest.TestCase):
result = utils.fromisoformat(time_string) result = utils.fromisoformat(time_string)
self.assertEqual(result, expected) self.assertEqual(result, expected)
if __name__ == '__main__': unittest.main()
if __name__ == "__main__":
unittest.main()

View File

@ -9,14 +9,18 @@ if os.geteuid() != 0:
print("Please run this as root.") print("Please run this as root.")
sys.exit(1) sys.exit(1)
def get_current_script_dir(): def get_current_script_dir():
current_script_dir = os.path.realpath(__file__) current_script_dir = os.path.realpath(__file__)
index = current_script_dir.rindex('/') index = current_script_dir.rindex("/")
return current_script_dir[0:index] return current_script_dir[0:index]
try: try:
current_script_dir = get_current_script_dir() current_script_dir = get_current_script_dir()
shutil.copy(current_script_dir+"/../airtime-icecast-status.xsl", "/usr/share/icecast2/web") shutil.copy(
current_script_dir + "/../airtime-icecast-status.xsl", "/usr/share/icecast2/web"
)
except Exception as e: except Exception as e:
print("exception: {}".format(e)) print("exception: {}".format(e))

View File

@ -2,5 +2,5 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import runpy import runpy
# Run the liquidsoap python module # Run the liquidsoap python module
runpy.run_module('liquidsoap') runpy.run_module("liquidsoap")

View File

@ -3,4 +3,3 @@
import runpy import runpy
runpy.run_module("pypo", run_name="__main__") runpy.run_module("pypo", run_name="__main__")

View File

@ -27,27 +27,75 @@ import json
from configobj import ConfigObj from configobj import ConfigObj
# custom imports # custom imports
#from util import * # from util import *
from api_clients import version1 as api_client from api_clients import version1 as api_client
LOG_LEVEL = logging.INFO LOG_LEVEL = logging.INFO
LOG_PATH = '/var/log/airtime/pypo/notify.log' LOG_PATH = "/var/log/airtime/pypo/notify.log"
# help screeen / info # help screeen / info
usage = "%prog [options]" + " - notification gateway" usage = "%prog [options]" + " - notification gateway"
parser = OptionParser(usage=usage) parser = OptionParser(usage=usage)
# Options # Options
parser.add_option("-d", "--data", help="Pass JSON data from Liquidsoap into this script.", metavar="data") parser.add_option(
parser.add_option("-m", "--media-id", help="ID of the file that is currently playing.", metavar="media_id") "-d",
parser.add_option("-e", "--error", action="store", dest="error", type="string", help="Liquidsoap error msg.", metavar="error_msg") "--data",
help="Pass JSON data from Liquidsoap into this script.",
metavar="data",
)
parser.add_option(
"-m",
"--media-id",
help="ID of the file that is currently playing.",
metavar="media_id",
)
parser.add_option(
"-e",
"--error",
action="store",
dest="error",
type="string",
help="Liquidsoap error msg.",
metavar="error_msg",
)
parser.add_option("-s", "--stream-id", help="ID stream", metavar="stream_id") parser.add_option("-s", "--stream-id", help="ID stream", metavar="stream_id")
parser.add_option("-c", "--connect", help="Liquidsoap connected", action="store_true", metavar="connect") parser.add_option(
parser.add_option("-t", "--time", help="Liquidsoap boot up time", action="store", dest="time", metavar="time", type="string") "-c",
parser.add_option("-x", "--source-name", help="source connection name", metavar="source_name") "--connect",
parser.add_option("-y", "--source-status", help="source connection status", metavar="source_status") help="Liquidsoap connected",
parser.add_option("-w", "--webstream", help="JSON metadata associated with webstream", metavar="json_data") action="store_true",
parser.add_option("-n", "--liquidsoap-started", help="notify liquidsoap started", metavar="json_data", action="store_true", default=False) metavar="connect",
)
parser.add_option(
"-t",
"--time",
help="Liquidsoap boot up time",
action="store",
dest="time",
metavar="time",
type="string",
)
parser.add_option(
"-x", "--source-name", help="source connection name", metavar="source_name"
)
parser.add_option(
"-y", "--source-status", help="source connection status", metavar="source_status"
)
parser.add_option(
"-w",
"--webstream",
help="JSON metadata associated with webstream",
metavar="json_data",
)
parser.add_option(
"-n",
"--liquidsoap-started",
help="notify liquidsoap started",
metavar="json_data",
action="store_true",
default=False,
)
# parse options # parse options
@ -55,12 +103,15 @@ parser.add_option("-n", "--liquidsoap-started", help="notify liquidsoap started"
# Set up logging # Set up logging
logging.captureWarnings(True) logging.captureWarnings(True)
logFormatter = logging.Formatter("%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s") logFormatter = logging.Formatter(
"%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s"
)
rootLogger = logging.getLogger() rootLogger = logging.getLogger()
rootLogger.setLevel(LOG_LEVEL) rootLogger.setLevel(LOG_LEVEL)
fileHandler = logging.handlers.RotatingFileHandler(filename=LOG_PATH, maxBytes=1024*1024*30, fileHandler = logging.handlers.RotatingFileHandler(
backupCount=8) filename=LOG_PATH, maxBytes=1024 * 1024 * 30, backupCount=8
)
fileHandler.setFormatter(logFormatter) fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler) rootLogger.addHandler(fileHandler)
@ -69,15 +120,15 @@ consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler) rootLogger.addHandler(consoleHandler)
logger = rootLogger logger = rootLogger
#need to wait for Python 2.7 for this.. # need to wait for Python 2.7 for this..
#logging.captureWarnings(True) # logging.captureWarnings(True)
# loading config file # loading config file
try: try:
config = ConfigObj('/etc/airtime/airtime.conf') config = ConfigObj("/etc/airtime/airtime.conf")
except Exception as e: except Exception as e:
logger.error('Error loading config file: %s', e) logger.error("Error loading config file: %s", e)
sys.exit() sys.exit()
@ -90,39 +141,41 @@ class Notify:
self.api_client.notify_liquidsoap_started() self.api_client.notify_liquidsoap_started()
def notify_media_start_playing(self, media_id): def notify_media_start_playing(self, media_id):
logger.debug('#################################################') logger.debug("#################################################")
logger.debug('# Calling server to update about what\'s playing #') logger.debug("# Calling server to update about what's playing #")
logger.debug('#################################################') logger.debug("#################################################")
response = self.api_client.notify_media_item_start_playing(media_id) response = self.api_client.notify_media_item_start_playing(media_id)
logger.debug("Response: " + json.dumps(response)) logger.debug("Response: " + json.dumps(response))
# @pram time: time that LS started # @pram time: time that LS started
def notify_liquidsoap_status(self, msg, stream_id, time): def notify_liquidsoap_status(self, msg, stream_id, time):
logger.info('#################################################') logger.info("#################################################")
logger.info('# Calling server to update liquidsoap status #') logger.info("# Calling server to update liquidsoap status #")
logger.info('#################################################') logger.info("#################################################")
logger.info('msg = ' + str(msg)) logger.info("msg = " + str(msg))
response = self.api_client.notify_liquidsoap_status(msg, stream_id, time) response = self.api_client.notify_liquidsoap_status(msg, stream_id, time)
logger.info("Response: " + json.dumps(response)) logger.info("Response: " + json.dumps(response))
def notify_source_status(self, source_name, status): def notify_source_status(self, source_name, status):
logger.debug('#################################################') logger.debug("#################################################")
logger.debug('# Calling server to update source status #') logger.debug("# Calling server to update source status #")
logger.debug('#################################################') logger.debug("#################################################")
logger.debug('msg = ' + str(source_name) + ' : ' + str(status)) logger.debug("msg = " + str(source_name) + " : " + str(status))
response = self.api_client.notify_source_status(source_name, status) response = self.api_client.notify_source_status(source_name, status)
logger.debug("Response: " + json.dumps(response)) logger.debug("Response: " + json.dumps(response))
def notify_webstream_data(self, data, media_id): def notify_webstream_data(self, data, media_id):
logger.debug('#################################################') logger.debug("#################################################")
logger.debug('# Calling server to update webstream data #') logger.debug("# Calling server to update webstream data #")
logger.debug('#################################################') logger.debug("#################################################")
response = self.api_client.notify_webstream_data(data, media_id) response = self.api_client.notify_webstream_data(data, media_id)
logger.debug("Response: " + json.dumps(response)) logger.debug("Response: " + json.dumps(response))
def run_with_options(self, options): def run_with_options(self, options):
if options.error and options.stream_id: if options.error and options.stream_id:
self.notify_liquidsoap_status(options.error, options.stream_id, options.time) self.notify_liquidsoap_status(
options.error, options.stream_id, options.time
)
elif options.connect and options.stream_id: elif options.connect and options.stream_id:
self.notify_liquidsoap_status("OK", options.stream_id, options.time) self.notify_liquidsoap_status("OK", options.stream_id, options.time)
elif options.source_name and options.source_status: elif options.source_name and options.source_status:
@ -134,15 +187,17 @@ class Notify:
elif options.liquidsoap_started: elif options.liquidsoap_started:
self.notify_liquidsoap_started() self.notify_liquidsoap_started()
else: else:
logger.debug("Unrecognized option in options({}). Doing nothing".format(options)) logger.debug(
"Unrecognized option in options({}). Doing nothing".format(options)
)
if __name__ == '__main__': if __name__ == "__main__":
print() print()
print('#########################################') print("#########################################")
print('# *** pypo *** #') print("# *** pypo *** #")
print('# pypo notification gateway #') print("# pypo notification gateway #")
print('#########################################') print("#########################################")
# initialize # initialize
try: try:
@ -150,4 +205,3 @@ if __name__ == '__main__':
n.run_with_options(options) n.run_with_options(options)
except Exception as e: except Exception as e:
print(traceback.format_exc()) print(traceback.format_exc())

View File

@ -7,9 +7,10 @@ import time
import traceback import traceback
from api_clients.version1 import AirtimeApiClient from api_clients.version1 import AirtimeApiClient
def generate_liquidsoap_config(ss): def generate_liquidsoap_config(ss):
data = ss['msg'] data = ss["msg"]
fh = open('/etc/airtime/liquidsoap.cfg', 'w') fh = open("/etc/airtime/liquidsoap.cfg", "w")
fh.write("################################################\n") fh.write("################################################\n")
fh.write("# THIS FILE IS AUTO GENERATED. DO NOT CHANGE!! #\n") fh.write("# THIS FILE IS AUTO GENERATED. DO NOT CHANGE!! #\n")
fh.write("################################################\n") fh.write("################################################\n")
@ -17,17 +18,17 @@ def generate_liquidsoap_config(ss):
for key, value in data.items(): for key, value in data.items():
try: try:
if not "port" in key and not "bitrate" in key: # Stupid hack if not "port" in key and not "bitrate" in key: # Stupid hack
raise ValueError() raise ValueError()
str_buffer = "%s = %s\n" % (key, int(value)) str_buffer = "%s = %s\n" % (key, int(value))
except ValueError: except ValueError:
try: # Is it a boolean? try: # Is it a boolean?
if value=="true" or value=="false": if value == "true" or value == "false":
str_buffer = "%s = %s\n" % (key, value.lower()) str_buffer = "%s = %s\n" % (key, value.lower())
else: else:
raise ValueError() # Just drop into the except below raise ValueError() # Just drop into the except below
except: #Everything else is a string except: # Everything else is a string
str_buffer = "%s = \"%s\"\n" % (key, value) str_buffer = '%s = "%s"\n' % (key, value)
fh.write(str_buffer) fh.write(str_buffer)
# ignore squashes unused variable errors from Liquidsoap # ignore squashes unused variable errors from Liquidsoap
@ -38,8 +39,9 @@ def generate_liquidsoap_config(ss):
fh.write('auth_path = "%s/liquidsoap_auth.py"\n' % auth_path) fh.write('auth_path = "%s/liquidsoap_auth.py"\n' % auth_path)
fh.close() fh.close()
def run(): def run():
logging.basicConfig(format='%(message)s') logging.basicConfig(format="%(message)s")
attempts = 0 attempts = 0
max_attempts = 10 max_attempts = 10
successful = False successful = False

View File

@ -9,16 +9,16 @@ dj_type = sys.argv[1]
username = sys.argv[2] username = sys.argv[2]
password = sys.argv[3] password = sys.argv[3]
source_type = '' source_type = ""
if dj_type == '--master': if dj_type == "--master":
source_type = 'master' source_type = "master"
elif dj_type == '--dj': elif dj_type == "--dj":
source_type = 'dj' source_type = "dj"
response = api_clients.check_live_stream_auth(username, password, source_type) response = api_clients.check_live_stream_auth(username, password, source_type)
if 'msg' in response and response['msg'] == True: if "msg" in response and response["msg"] == True:
print(response['msg']) print(response["msg"])
sys.exit(0) sys.exit(0)
else: else:
print(False) print(False)

View File

@ -4,17 +4,16 @@ import telnetlib
import sys import sys
try: try:
config = ConfigObj('/etc/airtime/airtime.conf') config = ConfigObj("/etc/airtime/airtime.conf")
LS_HOST = config['pypo']['ls_host'] LS_HOST = config["pypo"]["ls_host"]
LS_PORT = config['pypo']['ls_port'] LS_PORT = config["pypo"]["ls_port"]
tn = telnetlib.Telnet(LS_HOST, LS_PORT) tn = telnetlib.Telnet(LS_HOST, LS_PORT)
tn.write("master_harbor.stop\n") tn.write("master_harbor.stop\n")
tn.write("live_dj_harbor.stop\n") tn.write("live_dj_harbor.stop\n")
tn.write('exit\n') tn.write("exit\n")
tn.read_all() tn.read_all()
except Exception as e: except Exception as e:
print("Error loading config file: {}".format(e)) print("Error loading config file: {}".format(e))
sys.exit() sys.exit()

View File

@ -18,6 +18,7 @@ from configobj import ConfigObj
from datetime import datetime from datetime import datetime
from optparse import OptionParser from optparse import OptionParser
import importlib import importlib
try: try:
from queue import Queue from queue import Queue
except ImportError: # Python 2.7.5 (CentOS 7) except ImportError: # Python 2.7.5 (CentOS 7)

View File

@ -10,9 +10,10 @@ import time
from api_clients import version1 as api_client from api_clients import version1 as api_client
class ListenerStat(Thread): class ListenerStat(Thread):
HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout
def __init__(self, config, logger=None): def __init__(self, config, logger=None):
Thread.__init__(self) Thread.__init__(self)
@ -28,50 +29,49 @@ class ListenerStat(Thread):
for node in nodelist: for node in nodelist:
if node.nodeType == node.TEXT_NODE: if node.nodeType == node.TEXT_NODE:
rc.append(node.data) rc.append(node.data)
return ''.join(rc) return "".join(rc)
def get_stream_parameters(self): def get_stream_parameters(self):
#[{"user":"", "password":"", "url":"", "port":""},{},{}] # [{"user":"", "password":"", "url":"", "port":""},{},{}]
return self.api_client.get_stream_parameters() return self.api_client.get_stream_parameters()
def get_stream_server_xml(self, ip, url, is_shoutcast=False): def get_stream_server_xml(self, ip, url, is_shoutcast=False):
auth_string = "%(admin_user)s:%(admin_pass)s" % ip auth_string = "%(admin_user)s:%(admin_pass)s" % ip
encoded = base64.b64encode(auth_string.encode('utf-8')) encoded = base64.b64encode(auth_string.encode("utf-8"))
header = {"Authorization":"Basic %s" % encoded.decode('ascii')} header = {"Authorization": "Basic %s" % encoded.decode("ascii")}
if is_shoutcast: if is_shoutcast:
#user agent is required for shoutcast auth, otherwise it returns 404. # user agent is required for shoutcast auth, otherwise it returns 404.
user_agent = "Mozilla/5.0 (Linux; rv:22.0) Gecko/20130405 Firefox/22.0" user_agent = "Mozilla/5.0 (Linux; rv:22.0) Gecko/20130405 Firefox/22.0"
header["User-Agent"] = user_agent header["User-Agent"] = user_agent
req = urllib.request.Request( req = urllib.request.Request(
#assuming that the icecast stats path is /admin/stats.xml # assuming that the icecast stats path is /admin/stats.xml
#need to fix this # need to fix this
url=url, url=url,
headers=header) headers=header,
)
f = urllib.request.urlopen(req, timeout=ListenerStat.HTTP_REQUEST_TIMEOUT) f = urllib.request.urlopen(req, timeout=ListenerStat.HTTP_REQUEST_TIMEOUT)
document = f.read() document = f.read()
return document return document
def get_icecast_stats(self, ip): def get_icecast_stats(self, ip):
document = None document = None
if "airtime.pro" in ip["host"].lower(): if "airtime.pro" in ip["host"].lower():
url = 'http://%(host)s:%(port)s/stats.xsl' % ip url = "http://%(host)s:%(port)s/stats.xsl" % ip
document = self.get_stream_server_xml(ip, url) document = self.get_stream_server_xml(ip, url)
else: else:
url = 'http://%(host)s:%(port)s/admin/stats.xml' % ip url = "http://%(host)s:%(port)s/admin/stats.xml" % ip
document = self.get_stream_server_xml(ip, url) document = self.get_stream_server_xml(ip, url)
dom = defusedxml.minidom.parseString(document) dom = defusedxml.minidom.parseString(document)
sources = dom.getElementsByTagName("source") sources = dom.getElementsByTagName("source")
mount_stats = None mount_stats = None
for s in sources: for s in sources:
#drop the leading '/' character # drop the leading '/' character
mount_name = s.getAttribute("mount")[1:] mount_name = s.getAttribute("mount")[1:]
if mount_name == ip["mount"]: if mount_name == ip["mount"]:
timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
@ -80,14 +80,16 @@ class ListenerStat(Thread):
if len(listeners): if len(listeners):
num_listeners = self.get_node_text(listeners[0].childNodes) num_listeners = self.get_node_text(listeners[0].childNodes)
mount_stats = {"timestamp":timestamp, \ mount_stats = {
"num_listeners": num_listeners, \ "timestamp": timestamp,
"mount_name": mount_name} "num_listeners": num_listeners,
"mount_name": mount_name,
}
return mount_stats return mount_stats
def get_shoutcast_stats(self, ip): def get_shoutcast_stats(self, ip):
url = 'http://%(host)s:%(port)s/admin.cgi?sid=1&mode=viewxml' % ip url = "http://%(host)s:%(port)s/admin.cgi?sid=1&mode=viewxml" % ip
document = self.get_stream_server_xml(ip, url, is_shoutcast=True) document = self.get_stream_server_xml(ip, url, is_shoutcast=True)
dom = defusedxml.minidom.parseString(document) dom = defusedxml.minidom.parseString(document)
current_listeners = dom.getElementsByTagName("CURRENTLISTENERS") current_listeners = dom.getElementsByTagName("CURRENTLISTENERS")
@ -97,34 +99,37 @@ class ListenerStat(Thread):
if len(current_listeners): if len(current_listeners):
num_listeners = self.get_node_text(current_listeners[0].childNodes) num_listeners = self.get_node_text(current_listeners[0].childNodes)
mount_stats = {"timestamp":timestamp, \ mount_stats = {
"num_listeners": num_listeners, \ "timestamp": timestamp,
"mount_name": "shoutcast"} "num_listeners": num_listeners,
"mount_name": "shoutcast",
}
return mount_stats return mount_stats
def get_stream_stats(self, stream_parameters): def get_stream_stats(self, stream_parameters):
stats = [] stats = []
#iterate over stream_parameters which is a list of dicts. Each dict # iterate over stream_parameters which is a list of dicts. Each dict
#represents one Airtime stream (currently this limit is 3). # represents one Airtime stream (currently this limit is 3).
#Note that there can be optimizations done, since if all three # Note that there can be optimizations done, since if all three
#streams are the same server, we will still initiate 3 separate # streams are the same server, we will still initiate 3 separate
#connections # connections
for k, v in stream_parameters.items(): for k, v in stream_parameters.items():
if v["enable"] == 'true': if v["enable"] == "true":
try: try:
if v["output"] == "icecast": if v["output"] == "icecast":
mount_stats = self.get_icecast_stats(v) mount_stats = self.get_icecast_stats(v)
if mount_stats: stats.append(mount_stats) if mount_stats:
stats.append(mount_stats)
else: else:
stats.append(self.get_shoutcast_stats(v)) stats.append(self.get_shoutcast_stats(v))
self.update_listener_stat_error(v["mount"], 'OK') self.update_listener_stat_error(v["mount"], "OK")
except Exception as e: except Exception as e:
try: try:
self.update_listener_stat_error(v["mount"], str(e)) self.update_listener_stat_error(v["mount"], str(e))
except Exception as e: except Exception as e:
self.logger.error('Exception: %s', e) self.logger.error("Exception: %s", e)
return stats return stats
@ -132,15 +137,15 @@ class ListenerStat(Thread):
self.api_client.push_stream_stats(stats) self.api_client.push_stream_stats(stats)
def update_listener_stat_error(self, stream_id, error): def update_listener_stat_error(self, stream_id, error):
keyname = '%s_listener_stat_error' % stream_id keyname = "%s_listener_stat_error" % stream_id
data = {keyname: error} data = {keyname: error}
self.api_client.update_stream_setting_table(data) self.api_client.update_stream_setting_table(data)
def run(self): def run(self):
#Wake up every 120 seconds and gather icecast statistics. Note that we # Wake up every 120 seconds and gather icecast statistics. Note that we
#are currently querying the server every 2 minutes for list of # are currently querying the server every 2 minutes for list of
#mountpoints as well. We could remove this query if we hooked into # mountpoints as well. We could remove this query if we hooked into
#rabbitmq events, and listened for these changes instead. # rabbitmq events, and listened for these changes instead.
while True: while True:
try: try:
stream_parameters = self.get_stream_parameters() stream_parameters = self.get_stream_parameters()
@ -149,25 +154,27 @@ class ListenerStat(Thread):
if stats: if stats:
self.push_stream_stats(stats) self.push_stream_stats(stats)
except Exception as e: except Exception as e:
self.logger.error('Exception: %s', e) self.logger.error("Exception: %s", e)
time.sleep(120) time.sleep(120)
self.logger.info('ListenerStat thread exiting') self.logger.info("ListenerStat thread exiting")
if __name__ == "__main__": if __name__ == "__main__":
# create logger # create logger
logger = logging.getLogger('std_out') logger = logging.getLogger("std_out")
logger.setLevel(logging.DEBUG) logger.setLevel(logging.DEBUG)
# create console handler and set level to debug # create console handler and set level to debug
#ch = logging.StreamHandler() # ch = logging.StreamHandler()
#ch.setLevel(logging.DEBUG) # ch.setLevel(logging.DEBUG)
# create formatter # create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(lineno)s - %(levelname)s - %(message)s') formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(lineno)s - %(levelname)s - %(message)s"
)
# add formatter to ch # add formatter to ch
#ch.setFormatter(formatter) # ch.setFormatter(formatter)
# add ch to logger # add ch to logger
#logger.addHandler(ch) # logger.addHandler(ch)
#ls = ListenerStat(logger=logger) # ls = ListenerStat(logger=logger)
#ls.run() # ls.run()

View File

@ -2,6 +2,7 @@
import re import re
from packaging.version import Version, parse from packaging.version import Version, parse
def version_cmp(version1, version2): def version_cmp(version1, version2):
version1 = parse(version1) version1 = parse(version1)
version2 = parse(version2) version2 = parse(version2)
@ -11,12 +12,14 @@ def version_cmp(version1, version2):
return 0 return 0
return -1 return -1
def date_interval_to_seconds(interval): def date_interval_to_seconds(interval):
""" """
Convert timedelta object into int representing the number of seconds. If Convert timedelta object into int representing the number of seconds. If
number of seconds is less than 0, then return 0. number of seconds is less than 0, then return 0.
""" """
seconds = (interval.microseconds + \ seconds = (
(interval.seconds + interval.days * 24 * 3600) * 10 ** 6) / float(10 ** 6) interval.microseconds + (interval.seconds + interval.days * 24 * 3600) * 10 ** 6
) / float(10 ** 6)
return seconds return seconds

View File

@ -23,20 +23,24 @@ from .timeout import ls_timeout
def keyboardInterruptHandler(signum, frame): def keyboardInterruptHandler(signum, frame):
logger = logging.getLogger() logger = logging.getLogger()
logger.info('\nKeyboard Interrupt\n') logger.info("\nKeyboard Interrupt\n")
sys.exit(0) sys.exit(0)
signal.signal(signal.SIGINT, keyboardInterruptHandler) signal.signal(signal.SIGINT, keyboardInterruptHandler)
logging.captureWarnings(True) logging.captureWarnings(True)
POLL_INTERVAL = 400 POLL_INTERVAL = 400
class PypoFetch(Thread):
def __init__(self, pypoFetch_q, pypoPush_q, media_q, telnet_lock, pypo_liquidsoap, config): class PypoFetch(Thread):
def __init__(
self, pypoFetch_q, pypoPush_q, media_q, telnet_lock, pypo_liquidsoap, config
):
Thread.__init__(self) Thread.__init__(self)
#Hacky... # Hacky...
PypoFetch.ref = self PypoFetch.ref = self
self.v1_api_client = v1_api_client.AirtimeApiClient() self.v1_api_client = v1_api_client.AirtimeApiClient()
@ -76,6 +80,7 @@ class PypoFetch(Thread):
Handle a message from RabbitMQ, put it into our yucky global var. Handle a message from RabbitMQ, put it into our yucky global var.
Hopefully there is a better way to do this. Hopefully there is a better way to do this.
""" """
def handle_message(self, message): def handle_message(self, message):
try: try:
self.logger.info("Received event from Pypo Message Handler: %s" % message) self.logger.info("Received event from Pypo Message Handler: %s" % message)
@ -85,50 +90,52 @@ class PypoFetch(Thread):
except (UnicodeDecodeError, AttributeError): except (UnicodeDecodeError, AttributeError):
pass pass
m = json.loads(message) m = json.loads(message)
command = m['event_type'] command = m["event_type"]
self.logger.info("Handling command: " + command) self.logger.info("Handling command: " + command)
if command == 'update_schedule': if command == "update_schedule":
self.schedule_data = m['schedule'] self.schedule_data = m["schedule"]
self.process_schedule(self.schedule_data) self.process_schedule(self.schedule_data)
elif command == 'reset_liquidsoap_bootstrap': elif command == "reset_liquidsoap_bootstrap":
self.set_bootstrap_variables() self.set_bootstrap_variables()
elif command == 'update_stream_setting': elif command == "update_stream_setting":
self.logger.info("Updating stream setting...") self.logger.info("Updating stream setting...")
self.regenerate_liquidsoap_conf(m['setting']) self.regenerate_liquidsoap_conf(m["setting"])
elif command == 'update_stream_format': elif command == "update_stream_format":
self.logger.info("Updating stream format...") self.logger.info("Updating stream format...")
self.update_liquidsoap_stream_format(m['stream_format']) self.update_liquidsoap_stream_format(m["stream_format"])
elif command == 'update_station_name': elif command == "update_station_name":
self.logger.info("Updating station name...") self.logger.info("Updating station name...")
self.update_liquidsoap_station_name(m['station_name']) self.update_liquidsoap_station_name(m["station_name"])
elif command == 'update_transition_fade': elif command == "update_transition_fade":
self.logger.info("Updating transition_fade...") self.logger.info("Updating transition_fade...")
self.update_liquidsoap_transition_fade(m['transition_fade']) self.update_liquidsoap_transition_fade(m["transition_fade"])
elif command == 'switch_source': elif command == "switch_source":
self.logger.info("switch_on_source show command received...") self.logger.info("switch_on_source show command received...")
self.pypo_liquidsoap.\ self.pypo_liquidsoap.get_telnet_dispatcher().switch_source(
get_telnet_dispatcher().\ m["sourcename"], m["status"]
switch_source(m['sourcename'], m['status']) )
elif command == 'disconnect_source': elif command == "disconnect_source":
self.logger.info("disconnect_on_source show command received...") self.logger.info("disconnect_on_source show command received...")
self.pypo_liquidsoap.get_telnet_dispatcher().\ self.pypo_liquidsoap.get_telnet_dispatcher().disconnect_source(
disconnect_source(m['sourcename']) m["sourcename"]
)
else: else:
self.logger.info("Unknown command: %s" % command) self.logger.info("Unknown command: %s" % command)
# update timeout value # update timeout value
if command == 'update_schedule': if command == "update_schedule":
self.listener_timeout = POLL_INTERVAL self.listener_timeout = POLL_INTERVAL
else: else:
self.listener_timeout = self.last_update_schedule_timestamp - time.time() + POLL_INTERVAL self.listener_timeout = (
self.last_update_schedule_timestamp - time.time() + POLL_INTERVAL
)
if self.listener_timeout < 0: if self.listener_timeout < 0:
self.listener_timeout = 0 self.listener_timeout = 0
self.logger.info("New timeout: %s" % self.listener_timeout) self.logger.info("New timeout: %s" % self.listener_timeout)
except Exception as e: except Exception as e:
self.logger.exception("Exception in handling Message Handler message") self.logger.exception("Exception in handling Message Handler message")
def switch_source_temp(self, sourcename, status): def switch_source_temp(self, sourcename, status):
self.logger.debug('Switching source: %s to "%s" status', sourcename, status) self.logger.debug('Switching source: %s to "%s" status', sourcename, status)
command = "streams." command = "streams."
@ -149,25 +156,28 @@ class PypoFetch(Thread):
""" """
Initialize Liquidsoap environment Initialize Liquidsoap environment
""" """
def set_bootstrap_variables(self): def set_bootstrap_variables(self):
self.logger.debug('Getting information needed on bootstrap from Airtime') self.logger.debug("Getting information needed on bootstrap from Airtime")
try: try:
info = self.v1_api_client.get_bootstrap_info() info = self.v1_api_client.get_bootstrap_info()
except Exception as e: except Exception as e:
self.logger.exception('Unable to get bootstrap info.. Exiting pypo...') self.logger.exception("Unable to get bootstrap info.. Exiting pypo...")
self.logger.debug('info:%s', info) self.logger.debug("info:%s", info)
commands = [] commands = []
for k, v in info['switch_status'].items(): for k, v in info["switch_status"].items():
commands.append(self.switch_source_temp(k, v)) commands.append(self.switch_source_temp(k, v))
stream_format = info['stream_label'] stream_format = info["stream_label"]
station_name = info['station_name'] station_name = info["station_name"]
fade = info['transition_fade'] fade = info["transition_fade"]
commands.append(('vars.stream_metadata_type %s\n' % stream_format).encode('utf-8')) commands.append(
commands.append(('vars.station_name %s\n' % station_name).encode('utf-8')) ("vars.stream_metadata_type %s\n" % stream_format).encode("utf-8")
commands.append(('vars.default_dj_fade %s\n' % fade).encode('utf-8')) )
commands.append(("vars.station_name %s\n" % station_name).encode("utf-8"))
commands.append(("vars.default_dj_fade %s\n" % fade).encode("utf-8"))
self.pypo_liquidsoap.get_telnet_dispatcher().telnet_send(commands) self.pypo_liquidsoap.get_telnet_dispatcher().telnet_send(commands)
self.pypo_liquidsoap.clear_all_queues() self.pypo_liquidsoap.clear_all_queues()
@ -182,21 +192,24 @@ class PypoFetch(Thread):
will be thrown.""" will be thrown."""
self.telnet_lock.acquire(False) self.telnet_lock.acquire(False)
self.logger.info("Restarting Liquidsoap") self.logger.info("Restarting Liquidsoap")
subprocess.call('kill -9 `pidof airtime-liquidsoap`', shell=True, close_fds=True) subprocess.call(
"kill -9 `pidof airtime-liquidsoap`", shell=True, close_fds=True
)
#Wait here and poll Liquidsoap until it has started up # Wait here and poll Liquidsoap until it has started up
self.logger.info("Waiting for Liquidsoap to start") self.logger.info("Waiting for Liquidsoap to start")
while True: while True:
try: try:
tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port']) tn = telnetlib.Telnet(
tn.write('exit\n'.encode('utf-8')) self.config["ls_host"], self.config["ls_port"]
)
tn.write("exit\n".encode("utf-8"))
tn.read_all() tn.read_all()
self.logger.info("Liquidsoap is up and running") self.logger.info("Liquidsoap is up and running")
break break
except Exception as e: except Exception as e:
#sleep 0.5 seconds and try again # sleep 0.5 seconds and try again
time.sleep(0.5) time.sleep(0.5)
except Exception as e: except Exception as e:
@ -208,11 +221,11 @@ class PypoFetch(Thread):
""" """
NOTE: This function is quite short after it was refactored. NOTE: This function is quite short after it was refactored.
""" """
def regenerate_liquidsoap_conf(self, setting): def regenerate_liquidsoap_conf(self, setting):
self.restart_liquidsoap() self.restart_liquidsoap()
self.update_liquidsoap_connection_status() self.update_liquidsoap_connection_status()
@ls_timeout @ls_timeout
def update_liquidsoap_connection_status(self): def update_liquidsoap_connection_status(self):
""" """
@ -222,20 +235,22 @@ class PypoFetch(Thread):
try: try:
self.telnet_lock.acquire() self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port']) tn = telnetlib.Telnet(self.config["ls_host"], self.config["ls_port"])
# update the boot up time of Liquidsoap. Since Liquidsoap is not restarting, # update the boot up time of Liquidsoap. Since Liquidsoap is not restarting,
# we are manually adjusting the bootup time variable so the status msg will get # we are manually adjusting the bootup time variable so the status msg will get
# updated. # updated.
current_time = time.time() current_time = time.time()
boot_up_time_command = ("vars.bootup_time " + str(current_time) + "\n").encode('utf-8') boot_up_time_command = (
"vars.bootup_time " + str(current_time) + "\n"
).encode("utf-8")
self.logger.info(boot_up_time_command) self.logger.info(boot_up_time_command)
tn.write(boot_up_time_command) tn.write(boot_up_time_command)
connection_status = ("streams.connection_status\n").encode('utf-8') connection_status = ("streams.connection_status\n").encode("utf-8")
self.logger.info(connection_status) self.logger.info(connection_status)
tn.write(connection_status) tn.write(connection_status)
tn.write('exit\n'.encode('utf-8')) tn.write("exit\n".encode("utf-8"))
output = tn.read_all() output = tn.read_all()
except Exception as e: except Exception as e:
@ -253,12 +268,13 @@ class PypoFetch(Thread):
fake_time = current_time + 1 fake_time = current_time + 1
for s in streams: for s in streams:
info = s.split(':') info = s.split(":")
stream_id = info[0] stream_id = info[0]
status = info[1] status = info[1]
if(status == "true"): if status == "true":
self.v1_api_client.notify_liquidsoap_status("OK", stream_id, str(fake_time)) self.v1_api_client.notify_liquidsoap_status(
"OK", stream_id, str(fake_time)
)
@ls_timeout @ls_timeout
def update_liquidsoap_stream_format(self, stream_format): def update_liquidsoap_stream_format(self, stream_format):
@ -266,11 +282,11 @@ class PypoFetch(Thread):
# TODO: THIS LIQUIDSOAP STUFF NEEDS TO BE MOVED TO PYPO-PUSH!!! # TODO: THIS LIQUIDSOAP STUFF NEEDS TO BE MOVED TO PYPO-PUSH!!!
try: try:
self.telnet_lock.acquire() self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port']) tn = telnetlib.Telnet(self.config["ls_host"], self.config["ls_port"])
command = ('vars.stream_metadata_type %s\n' % stream_format).encode('utf-8') command = ("vars.stream_metadata_type %s\n" % stream_format).encode("utf-8")
self.logger.info(command) self.logger.info(command)
tn.write(command) tn.write(command)
tn.write('exit\n'.encode('utf-8')) tn.write("exit\n".encode("utf-8"))
tn.read_all() tn.read_all()
except Exception as e: except Exception as e:
self.logger.exception(e) self.logger.exception(e)
@ -283,11 +299,11 @@ class PypoFetch(Thread):
# TODO: THIS LIQUIDSOAP STUFF NEEDS TO BE MOVED TO PYPO-PUSH!!! # TODO: THIS LIQUIDSOAP STUFF NEEDS TO BE MOVED TO PYPO-PUSH!!!
try: try:
self.telnet_lock.acquire() self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port']) tn = telnetlib.Telnet(self.config["ls_host"], self.config["ls_port"])
command = ('vars.default_dj_fade %s\n' % fade).encode('utf-8') command = ("vars.default_dj_fade %s\n" % fade).encode("utf-8")
self.logger.info(command) self.logger.info(command)
tn.write(command) tn.write(command)
tn.write('exit\n'.encode('utf-8')) tn.write("exit\n".encode("utf-8"))
tn.read_all() tn.read_all()
except Exception as e: except Exception as e:
self.logger.exception(e) self.logger.exception(e)
@ -301,11 +317,11 @@ class PypoFetch(Thread):
try: try:
try: try:
self.telnet_lock.acquire() self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port']) tn = telnetlib.Telnet(self.config["ls_host"], self.config["ls_port"])
command = ('vars.station_name %s\n' % station_name).encode('utf-8') command = ("vars.station_name %s\n" % station_name).encode("utf-8")
self.logger.info(command) self.logger.info(command)
tn.write(command) tn.write(command)
tn.write('exit\n'.encode('utf-8')) tn.write("exit\n".encode("utf-8"))
tn.read_all() tn.read_all()
except Exception as e: except Exception as e:
self.logger.exception(e) self.logger.exception(e)
@ -322,6 +338,7 @@ class PypoFetch(Thread):
to the cache dir (Folder-structure: cache/YYYY-MM-DD-hh-mm-ss) to the cache dir (Folder-structure: cache/YYYY-MM-DD-hh-mm-ss)
- runs the cleanup routine, to get rid of unused cached files - runs the cleanup routine, to get rid of unused cached files
""" """
def process_schedule(self, schedule_data): def process_schedule(self, schedule_data):
self.last_update_schedule_timestamp = time.time() self.last_update_schedule_timestamp = time.time()
self.logger.debug(schedule_data) self.logger.debug(schedule_data)
@ -343,20 +360,21 @@ class PypoFetch(Thread):
media_copy = {} media_copy = {}
for key in media: for key in media:
media_item = media[key] media_item = media[key]
if (media_item['type'] == 'file'): if media_item["type"] == "file":
fileExt = self.sanity_check_media_item(media_item) fileExt = self.sanity_check_media_item(media_item)
dst = os.path.join(download_dir, f'{media_item["id"]}{fileExt}') dst = os.path.join(download_dir, f'{media_item["id"]}{fileExt}')
media_item['dst'] = dst media_item["dst"] = dst
media_item['file_ready'] = False media_item["file_ready"] = False
media_filtered[key] = media_item media_filtered[key] = media_item
media_item['start'] = datetime.strptime(media_item['start'], media_item["start"] = datetime.strptime(
"%Y-%m-%d-%H-%M-%S") media_item["start"], "%Y-%m-%d-%H-%M-%S"
media_item['end'] = datetime.strptime(media_item['end'], )
"%Y-%m-%d-%H-%M-%S") media_item["end"] = datetime.strptime(
media_item["end"], "%Y-%m-%d-%H-%M-%S"
)
media_copy[key] = media_item media_copy[key] = media_item
self.media_prepare_queue.put(copy.copy(media_filtered)) self.media_prepare_queue.put(copy.copy(media_filtered))
except Exception as e: except Exception as e:
self.logger.exception(e) self.logger.exception(e)
@ -365,37 +383,36 @@ class PypoFetch(Thread):
self.logger.debug("Pushing to pypo-push") self.logger.debug("Pushing to pypo-push")
self.push_queue.put(media_copy) self.push_queue.put(media_copy)
# cleanup # cleanup
try: try:
self.cache_cleanup(media) self.cache_cleanup(media)
except Exception as e: except Exception as e:
self.logger.exception(e) self.logger.exception(e)
#do basic validation of file parameters. Useful for debugging # do basic validation of file parameters. Useful for debugging
#purposes # purposes
def sanity_check_media_item(self, media_item): def sanity_check_media_item(self, media_item):
start = datetime.strptime(media_item['start'], "%Y-%m-%d-%H-%M-%S") start = datetime.strptime(media_item["start"], "%Y-%m-%d-%H-%M-%S")
end = datetime.strptime(media_item['end'], "%Y-%m-%d-%H-%M-%S") end = datetime.strptime(media_item["end"], "%Y-%m-%d-%H-%M-%S")
mime = media_item['metadata']['mime'] mime = media_item["metadata"]["mime"]
mimetypes.init(["%s/mime.types" % os.path.dirname(os.path.realpath(__file__))]) mimetypes.init(["%s/mime.types" % os.path.dirname(os.path.realpath(__file__))])
mime_ext = mimetypes.guess_extension(mime, strict=False) mime_ext = mimetypes.guess_extension(mime, strict=False)
length1 = pure.date_interval_to_seconds(end - start) length1 = pure.date_interval_to_seconds(end - start)
length2 = media_item['cue_out'] - media_item['cue_in'] length2 = media_item["cue_out"] - media_item["cue_in"]
if abs(length2 - length1) > 1: if abs(length2 - length1) > 1:
self.logger.error("end - start length: %s", length1) self.logger.error("end - start length: %s", length1)
self.logger.error("cue_out - cue_in length: %s", length2) self.logger.error("cue_out - cue_in length: %s", length2)
self.logger.error("Two lengths are not equal!!!") self.logger.error("Two lengths are not equal!!!")
media_item['file_ext'] = mime_ext media_item["file_ext"] = mime_ext
return mime_ext return mime_ext
def is_file_opened(self, path): def is_file_opened(self, path):
#Capture stderr to avoid polluting py-interpreter.log # Capture stderr to avoid polluting py-interpreter.log
proc = Popen(["lsof", path], stdout=PIPE, stderr=PIPE) proc = Popen(["lsof", path], stdout=PIPE, stderr=PIPE)
out = proc.communicate()[0].strip() out = proc.communicate()[0].strip()
return bool(out) return bool(out)
@ -411,10 +428,14 @@ class PypoFetch(Thread):
for mkey in media: for mkey in media:
media_item = media[mkey] media_item = media[mkey]
if media_item['type'] == 'file': if media_item["type"] == "file":
if "file_ext" not in media_item.keys(): if "file_ext" not in media_item.keys():
media_item["file_ext"] = mimetypes.guess_extension(media_item['metadata']['mime'], strict=False) media_item["file_ext"] = mimetypes.guess_extension(
scheduled_file_set.add("{}{}".format(media_item["id"], media_item["file_ext"])) media_item["metadata"]["mime"], strict=False
)
scheduled_file_set.add(
"{}{}".format(media_item["id"], media_item["file_ext"])
)
expired_files = cached_file_set - scheduled_file_set expired_files = cached_file_set - scheduled_file_set
@ -424,9 +445,9 @@ class PypoFetch(Thread):
path = os.path.join(self.cache_dir, f) path = os.path.join(self.cache_dir, f)
self.logger.debug("Removing %s" % path) self.logger.debug("Removing %s" % path)
#check if this file is opened (sometimes Liquidsoap is still # check if this file is opened (sometimes Liquidsoap is still
#playing the file due to our knowledge of the track length # playing the file due to our knowledge of the track length
#being incorrect!) # being incorrect!)
if not self.is_file_opened(path): if not self.is_file_opened(path):
os.remove(path) os.remove(path)
self.logger.info("File '%s' removed" % path) self.logger.info("File '%s' removed" % path)
@ -441,7 +462,7 @@ class PypoFetch(Thread):
self.process_schedule(self.schedule_data) self.process_schedule(self.schedule_data)
return True return True
except Exception as e: except Exception as e:
self.logger.error('Unable to fetch schedule') self.logger.error("Unable to fetch schedule")
self.logger.exception(e) self.logger.exception(e)
return False return False
@ -462,11 +483,11 @@ class PypoFetch(Thread):
Timer(120, self.update_metadata_on_tunein).start() Timer(120, self.update_metadata_on_tunein).start()
def main(self): def main(self):
#Make sure all Liquidsoap queues are empty. This is important in the # Make sure all Liquidsoap queues are empty. This is important in the
#case where we've just restarted the pypo scheduler, but Liquidsoap still # case where we've just restarted the pypo scheduler, but Liquidsoap still
#is playing tracks. In this case let's just restart everything from scratch # is playing tracks. In this case let's just restart everything from scratch
#so that we can repopulate our dictionary that keeps track of what # so that we can repopulate our dictionary that keeps track of what
#Liquidsoap is playing much more easily. # Liquidsoap is playing much more easily.
self.pypo_liquidsoap.clear_all_queues() self.pypo_liquidsoap.clear_all_queues()
self.set_bootstrap_variables() self.set_bootstrap_variables()
@ -500,7 +521,9 @@ class PypoFetch(Thread):
Currently we are checking every POLL_INTERVAL seconds Currently we are checking every POLL_INTERVAL seconds
""" """
message = self.fetch_queue.get(block=True, timeout=self.listener_timeout) message = self.fetch_queue.get(
block=True, timeout=self.listener_timeout
)
manual_fetch_needed = False manual_fetch_needed = False
self.handle_message(message) self.handle_message(message)
except Empty as e: except Empty as e:
@ -513,7 +536,7 @@ class PypoFetch(Thread):
if manual_fetch_needed: if manual_fetch_needed:
self.persistent_manual_schedule_fetch(max_attempts=5) self.persistent_manual_schedule_fetch(max_attempts=5)
except Exception as e: except Exception as e:
self.logger.exception('Failed to manually fetch the schedule.') self.logger.exception("Failed to manually fetch the schedule.")
loops += 1 loops += 1
@ -522,4 +545,4 @@ class PypoFetch(Thread):
Entry point of the thread Entry point of the thread
""" """
self.main() self.main()
self.logger.info('PypoFetch thread exiting') self.logger.info("PypoFetch thread exiting")

View File

@ -18,13 +18,12 @@ import hashlib
from requests.exceptions import ConnectionError, HTTPError, Timeout from requests.exceptions import ConnectionError, HTTPError, Timeout
from api_clients import version2 as api_client from api_clients import version2 as api_client
CONFIG_PATH = '/etc/airtime/airtime.conf' CONFIG_PATH = "/etc/airtime/airtime.conf"
logging.captureWarnings(True) logging.captureWarnings(True)
class PypoFile(Thread): class PypoFile(Thread):
def __init__(self, schedule_queue, config): def __init__(self, schedule_queue, config):
Thread.__init__(self) Thread.__init__(self)
self.logger = logging.getLogger() self.logger = logging.getLogger()
@ -38,10 +37,10 @@ class PypoFile(Thread):
""" """
Copy media_item from local library directory to local cache directory. Copy media_item from local library directory to local cache directory.
""" """
src = media_item['uri'] src = media_item["uri"]
dst = media_item['dst'] dst = media_item["dst"]
src_size = media_item['filesize'] src_size = media_item["filesize"]
dst_exists = True dst_exists = True
try: try:
@ -59,34 +58,44 @@ class PypoFile(Thread):
# become an issue here... This needs proper cache management. # become an issue here... This needs proper cache management.
# https://github.com/LibreTime/libretime/issues/756#issuecomment-477853018 # https://github.com/LibreTime/libretime/issues/756#issuecomment-477853018
# https://github.com/LibreTime/libretime/pull/845 # https://github.com/LibreTime/libretime/pull/845
self.logger.debug("file %s already exists in local cache as %s, skipping copying..." % (src, dst)) self.logger.debug(
"file %s already exists in local cache as %s, skipping copying..."
% (src, dst)
)
else: else:
do_copy = True do_copy = True
media_item['file_ready'] = not do_copy media_item["file_ready"] = not do_copy
if do_copy: if do_copy:
self.logger.info("copying from %s to local cache %s" % (src, dst)) self.logger.info("copying from %s to local cache %s" % (src, dst))
try: try:
with open(dst, "wb") as handle: with open(dst, "wb") as handle:
self.logger.info(media_item) self.logger.info(media_item)
response = self.api_client.services.file_download_url(id=media_item['id']) response = self.api_client.services.file_download_url(
id=media_item["id"]
)
if not response.ok: if not response.ok:
self.logger.error(response) self.logger.error(response)
raise Exception("%s - Error occurred downloading file" % response.status_code) raise Exception(
"%s - Error occurred downloading file"
% response.status_code
)
for chunk in response.iter_content(chunk_size=1024): for chunk in response.iter_content(chunk_size=1024):
handle.write(chunk) handle.write(chunk)
#make file world readable and owner writable # make file world readable and owner writable
os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
if media_item['filesize'] == 0: if media_item["filesize"] == 0:
file_size = self.report_file_size_and_md5_to_airtime(dst, media_item["id"], host, username) file_size = self.report_file_size_and_md5_to_airtime(
dst, media_item["id"], host, username
)
media_item["filesize"] = file_size media_item["filesize"] = file_size
media_item['file_ready'] = True media_item["file_ready"] = True
except Exception as e: except Exception as e:
self.logger.error("Could not copy from %s to %s" % (src, dst)) self.logger.error("Could not copy from %s to %s" % (src, dst))
self.logger.error(e) self.logger.error(e)
@ -95,7 +104,7 @@ class PypoFile(Thread):
try: try:
file_size = os.path.getsize(file_path) file_size = os.path.getsize(file_path)
with open(file_path, 'rb') as fh: with open(file_path, "rb") as fh:
m = hashlib.md5() m = hashlib.md5()
while True: while True:
data = fh.read(8192) data = fh.read(8192)
@ -105,15 +114,21 @@ class PypoFile(Thread):
md5_hash = m.hexdigest() md5_hash = m.hexdigest()
except (OSError, IOError) as e: except (OSError, IOError) as e:
file_size = 0 file_size = 0
self.logger.error("Error getting file size and md5 hash for file id %s" % file_id) self.logger.error(
"Error getting file size and md5 hash for file id %s" % file_id
)
self.logger.error(e) self.logger.error(e)
# Make PUT request to Airtime to update the file size and hash # Make PUT request to Airtime to update the file size and hash
error_msg = "Could not update media file %s with file size and md5 hash" % file_id error_msg = (
"Could not update media file %s with file size and md5 hash" % file_id
)
try: try:
put_url = "%s://%s:%s/rest/media/%s" % (host[0], host[1], host[2], file_id) put_url = "%s://%s:%s/rest/media/%s" % (host[0], host[1], host[2], file_id)
payload = json.dumps({'filesize': file_size, 'md5': md5_hash}) payload = json.dumps({"filesize": file_size, "md5": md5_hash})
response = requests.put(put_url, data=payload, auth=requests.auth.HTTPBasicAuth(api_key, '')) response = requests.put(
put_url, data=payload, auth=requests.auth.HTTPBasicAuth(api_key, "")
)
if not response.ok: if not response.ok:
self.logger.error(error_msg) self.logger.error(error_msg)
except (ConnectionError, Timeout): except (ConnectionError, Timeout):
@ -160,7 +175,9 @@ class PypoFile(Thread):
try: try:
config.readfp(open(config_path)) config.readfp(open(config_path))
except IOError as e: except IOError as e:
logging.debug("Failed to open config file at %s: %s" % (config_path, e.strerror)) logging.debug(
"Failed to open config file at %s: %s" % (config_path, e.strerror)
)
sys.exit() sys.exit()
except Exception as e: except Exception as e:
logging.debug(e.strerror) logging.debug(e.strerror)
@ -189,12 +206,12 @@ class PypoFile(Thread):
except Empty as e: except Empty as e:
pass pass
media_item = self.get_highest_priority_media_item(self.media) media_item = self.get_highest_priority_media_item(self.media)
if media_item is not None: if media_item is not None:
self.copy_file(media_item) self.copy_file(media_item)
except Exception as e: except Exception as e:
import traceback import traceback
top = traceback.format_exc() top = traceback.format_exc()
self.logger.error(str(e)) self.logger.error(str(e))
self.logger.error(top) self.logger.error(top)
@ -204,9 +221,10 @@ class PypoFile(Thread):
""" """
Entry point of the thread Entry point of the thread
""" """
try: self.main() try:
self.main()
except Exception as e: except Exception as e:
top = traceback.format_exc() top = traceback.format_exc()
self.logger.error('PypoFile Exception: %s', top) self.logger.error("PypoFile Exception: %s", top)
time.sleep(5) time.sleep(5)
self.logger.info('PypoFile thread exiting') self.logger.info("PypoFile thread exiting")

View File

@ -11,12 +11,17 @@ import time
from queue import Empty from queue import Empty
import signal import signal
def keyboardInterruptHandler(signum, frame): def keyboardInterruptHandler(signum, frame):
logger = logging.getLogger() logger = logging.getLogger()
logger.info('\nKeyboard Interrupt\n') logger.info("\nKeyboard Interrupt\n")
sys.exit(0) sys.exit(0)
signal.signal(signal.SIGINT, keyboardInterruptHandler) signal.signal(signal.SIGINT, keyboardInterruptHandler)
class PypoLiqQueue(Thread): class PypoLiqQueue(Thread):
def __init__(self, q, pypo_liquidsoap, logger): def __init__(self, q, pypo_liquidsoap, logger):
Thread.__init__(self) Thread.__init__(self)
@ -35,18 +40,20 @@ class PypoLiqQueue(Thread):
self.logger.info("waiting indefinitely for schedule") self.logger.info("waiting indefinitely for schedule")
media_schedule = self.queue.get(block=True) media_schedule = self.queue.get(block=True)
else: else:
self.logger.info("waiting %ss until next scheduled item" % \ self.logger.info(
time_until_next_play) "waiting %ss until next scheduled item" % time_until_next_play
media_schedule = self.queue.get(block=True, \ )
timeout=time_until_next_play) media_schedule = self.queue.get(
block=True, timeout=time_until_next_play
)
except Empty as e: except Empty as e:
#Time to push a scheduled item. # Time to push a scheduled item.
media_item = schedule_deque.popleft() media_item = schedule_deque.popleft()
self.pypo_liquidsoap.play(media_item) self.pypo_liquidsoap.play(media_item)
if len(schedule_deque): if len(schedule_deque):
time_until_next_play = \ time_until_next_play = self.date_interval_to_seconds(
self.date_interval_to_seconds( schedule_deque[0]["start"] - datetime.utcnow()
schedule_deque[0]['start'] - datetime.utcnow()) )
if time_until_next_play < 0: if time_until_next_play < 0:
time_until_next_play = 0 time_until_next_play = 0
else: else:
@ -54,7 +61,7 @@ class PypoLiqQueue(Thread):
else: else:
self.logger.info("New schedule received: %s", media_schedule) self.logger.info("New schedule received: %s", media_schedule)
#new schedule received. Replace old one with this. # new schedule received. Replace old one with this.
schedule_deque.clear() schedule_deque.clear()
keys = sorted(media_schedule.keys()) keys = sorted(media_schedule.keys())
@ -63,28 +70,28 @@ class PypoLiqQueue(Thread):
if len(keys): if len(keys):
time_until_next_play = self.date_interval_to_seconds( time_until_next_play = self.date_interval_to_seconds(
media_schedule[keys[0]]['start'] - media_schedule[keys[0]]["start"] - datetime.utcnow()
datetime.utcnow()) )
else: else:
time_until_next_play = None time_until_next_play = None
def date_interval_to_seconds(self, interval): def date_interval_to_seconds(self, interval):
""" """
Convert timedelta object into int representing the number of seconds. If Convert timedelta object into int representing the number of seconds. If
number of seconds is less than 0, then return 0. number of seconds is less than 0, then return 0.
""" """
seconds = (interval.microseconds + \ seconds = (
(interval.seconds + interval.days * 24 * 3600) * 10 ** 6) / float(10 ** 6) interval.microseconds
if seconds < 0: seconds = 0 + (interval.seconds + interval.days * 24 * 3600) * 10 ** 6
) / float(10 ** 6)
if seconds < 0:
seconds = 0
return seconds return seconds
def run(self): def run(self):
try: self.main() try:
self.main()
except Exception as e: except Exception as e:
self.logger.error('PypoLiqQueue Exception: %s', traceback.format_exc()) self.logger.error("PypoLiqQueue Exception: %s", traceback.format_exc())

View File

@ -8,27 +8,25 @@ from datetime import timedelta
from . import eventtypes from . import eventtypes
import time import time
class PypoLiquidsoap():
class PypoLiquidsoap:
def __init__(self, logger, telnet_lock, host, port): def __init__(self, logger, telnet_lock, host, port):
self.logger = logger self.logger = logger
self.liq_queue_tracker = { self.liq_queue_tracker = {
"s0": None, "s0": None,
"s1": None, "s1": None,
"s2": None, "s2": None,
"s3": None, "s3": None,
"s4": None, "s4": None,
} }
self.telnet_liquidsoap = TelnetLiquidsoap(telnet_lock, \ self.telnet_liquidsoap = TelnetLiquidsoap(
logger,\ telnet_lock, logger, host, port, list(self.liq_queue_tracker.keys())
host,\ )
port,\
list(self.liq_queue_tracker.keys()))
def get_telnet_dispatcher(self): def get_telnet_dispatcher(self):
return self.telnet_liquidsoap return self.telnet_liquidsoap
def play(self, media_item): def play(self, media_item):
if media_item["type"] == eventtypes.FILE: if media_item["type"] == eventtypes.FILE:
self.handle_file_type(media_item) self.handle_file_type(media_item)
@ -37,28 +35,32 @@ class PypoLiquidsoap():
elif media_item["type"] == eventtypes.STREAM_BUFFER_START: elif media_item["type"] == eventtypes.STREAM_BUFFER_START:
self.telnet_liquidsoap.start_web_stream_buffer(media_item) self.telnet_liquidsoap.start_web_stream_buffer(media_item)
elif media_item["type"] == eventtypes.STREAM_OUTPUT_START: elif media_item["type"] == eventtypes.STREAM_OUTPUT_START:
if media_item['row_id'] != self.telnet_liquidsoap.current_prebuffering_stream_id: if (
#this is called if the stream wasn't scheduled sufficiently ahead of time media_item["row_id"]
#so that the prebuffering stage could take effect. Let's do the prebuffering now. != self.telnet_liquidsoap.current_prebuffering_stream_id
):
# this is called if the stream wasn't scheduled sufficiently ahead of time
# so that the prebuffering stage could take effect. Let's do the prebuffering now.
self.telnet_liquidsoap.start_web_stream_buffer(media_item) self.telnet_liquidsoap.start_web_stream_buffer(media_item)
self.telnet_liquidsoap.start_web_stream(media_item) self.telnet_liquidsoap.start_web_stream(media_item)
elif media_item['type'] == eventtypes.STREAM_BUFFER_END: elif media_item["type"] == eventtypes.STREAM_BUFFER_END:
self.telnet_liquidsoap.stop_web_stream_buffer() self.telnet_liquidsoap.stop_web_stream_buffer()
elif media_item['type'] == eventtypes.STREAM_OUTPUT_END: elif media_item["type"] == eventtypes.STREAM_OUTPUT_END:
self.telnet_liquidsoap.stop_web_stream_output() self.telnet_liquidsoap.stop_web_stream_output()
else: raise UnknownMediaItemType(str(media_item)) else:
raise UnknownMediaItemType(str(media_item))
def handle_file_type(self, media_item): def handle_file_type(self, media_item):
""" """
Wait 200 seconds (2000 iterations) for file to become ready, Wait 200 seconds (2000 iterations) for file to become ready,
otherwise give up on it. otherwise give up on it.
""" """
iter_num = 0 iter_num = 0
while not media_item['file_ready'] and iter_num < 2000: while not media_item["file_ready"] and iter_num < 2000:
time.sleep(0.1) time.sleep(0.1)
iter_num += 1 iter_num += 1
if media_item['file_ready']: if media_item["file_ready"]:
available_queue = self.find_available_queue() available_queue = self.find_available_queue()
try: try:
@ -68,27 +70,29 @@ class PypoLiquidsoap():
self.logger.error(e) self.logger.error(e)
raise raise
else: else:
self.logger.warn("File %s did not become ready in less than 5 seconds. Skipping...", media_item['dst']) self.logger.warn(
"File %s did not become ready in less than 5 seconds. Skipping...",
media_item["dst"],
)
def handle_event_type(self, media_item): def handle_event_type(self, media_item):
if media_item['event_type'] == "kick_out": if media_item["event_type"] == "kick_out":
self.telnet_liquidsoap.disconnect_source("live_dj") self.telnet_liquidsoap.disconnect_source("live_dj")
elif media_item['event_type'] == "switch_off": elif media_item["event_type"] == "switch_off":
self.telnet_liquidsoap.switch_source("live_dj", "off") self.telnet_liquidsoap.switch_source("live_dj", "off")
def is_media_item_finished(self, media_item): def is_media_item_finished(self, media_item):
if media_item is None: if media_item is None:
return True return True
else: else:
return datetime.utcnow() > media_item['end'] return datetime.utcnow() > media_item["end"]
def find_available_queue(self): def find_available_queue(self):
available_queue = None available_queue = None
for i in self.liq_queue_tracker: for i in self.liq_queue_tracker:
mi = self.liq_queue_tracker[i] mi = self.liq_queue_tracker[i]
if mi == None or self.is_media_item_finished(mi): if mi == None or self.is_media_item_finished(mi):
#queue "i" is available. Push to this queue # queue "i" is available. Push to this queue
available_queue = i available_queue = i
if available_queue == None: if available_queue == None:
@ -96,7 +100,6 @@ class PypoLiquidsoap():
return available_queue return available_queue
def verify_correct_present_media(self, scheduled_now): def verify_correct_present_media(self, scheduled_now):
""" """
verify whether Liquidsoap is currently playing the correct files. verify whether Liquidsoap is currently playing the correct files.
@ -122,11 +125,13 @@ class PypoLiquidsoap():
""" """
try: try:
scheduled_now_files = \ scheduled_now_files = [
[x for x in scheduled_now if x["type"] == eventtypes.FILE] x for x in scheduled_now if x["type"] == eventtypes.FILE
]
scheduled_now_webstream = \ scheduled_now_webstream = [
[x for x in scheduled_now if x["type"] == eventtypes.STREAM_OUTPUT_START] x for x in scheduled_now if x["type"] == eventtypes.STREAM_OUTPUT_START
]
schedule_ids = set([x["row_id"] for x in scheduled_now_files]) schedule_ids = set([x["row_id"] for x in scheduled_now_files])
@ -141,19 +146,21 @@ class PypoLiquidsoap():
to_be_removed = set() to_be_removed = set()
to_be_added = set() to_be_added = set()
#Iterate over the new files, and compare them to currently scheduled # Iterate over the new files, and compare them to currently scheduled
#tracks. If already in liquidsoap queue still need to make sure they don't # tracks. If already in liquidsoap queue still need to make sure they don't
#have different attributes # have different attributes
#if replay gain changes, it shouldn't change the amplification of the currently playing song # if replay gain changes, it shouldn't change the amplification of the currently playing song
for i in scheduled_now_files: for i in scheduled_now_files:
if i["row_id"] in row_id_map: if i["row_id"] in row_id_map:
mi = row_id_map[i["row_id"]] mi = row_id_map[i["row_id"]]
correct = mi['start'] == i['start'] and \ correct = (
mi['end'] == i['end'] and \ mi["start"] == i["start"]
mi['row_id'] == i['row_id'] and mi["end"] == i["end"]
and mi["row_id"] == i["row_id"]
)
if not correct: if not correct:
#need to re-add # need to re-add
self.logger.info("Track %s found to have new attr." % i) self.logger.info("Track %s found to have new attr." % i)
to_be_removed.add(i["row_id"]) to_be_removed.add(i["row_id"])
to_be_added.add(i["row_id"]) to_be_added.add(i["row_id"])
@ -162,37 +169,38 @@ class PypoLiquidsoap():
to_be_added.update(schedule_ids - liq_queue_ids) to_be_added.update(schedule_ids - liq_queue_ids)
if to_be_removed: if to_be_removed:
self.logger.info("Need to remove items from Liquidsoap: %s" % \ self.logger.info(
to_be_removed) "Need to remove items from Liquidsoap: %s" % to_be_removed
)
#remove files from Liquidsoap's queue # remove files from Liquidsoap's queue
for i in self.liq_queue_tracker: for i in self.liq_queue_tracker:
mi = self.liq_queue_tracker[i] mi = self.liq_queue_tracker[i]
if mi is not None and mi["row_id"] in to_be_removed: if mi is not None and mi["row_id"] in to_be_removed:
self.stop(i) self.stop(i)
if to_be_added: if to_be_added:
self.logger.info("Need to add items to Liquidsoap *now*: %s" % \ self.logger.info(
to_be_added) "Need to add items to Liquidsoap *now*: %s" % to_be_added
)
for i in scheduled_now_files: for i in scheduled_now_files:
if i["row_id"] in to_be_added: if i["row_id"] in to_be_added:
self.modify_cue_point(i) self.modify_cue_point(i)
self.play(i) self.play(i)
#handle webstreams # handle webstreams
current_stream_id = self.telnet_liquidsoap.get_current_stream_id() current_stream_id = self.telnet_liquidsoap.get_current_stream_id()
if scheduled_now_webstream: if scheduled_now_webstream:
if int(current_stream_id) != int(scheduled_now_webstream[0]["row_id"]): if int(current_stream_id) != int(scheduled_now_webstream[0]["row_id"]):
self.play(scheduled_now_webstream[0]) self.play(scheduled_now_webstream[0])
elif current_stream_id != "-1": elif current_stream_id != "-1":
#something is playing and it shouldn't be. # something is playing and it shouldn't be.
self.telnet_liquidsoap.stop_web_stream_buffer() self.telnet_liquidsoap.stop_web_stream_buffer()
self.telnet_liquidsoap.stop_web_stream_output() self.telnet_liquidsoap.stop_web_stream_output()
except KeyError as e: except KeyError as e:
self.logger.error("Error: Malformed event in schedule. " + str(e)) self.logger.error("Error: Malformed event in schedule. " + str(e))
def stop(self, queue): def stop(self, queue):
self.telnet_liquidsoap.queue_remove(queue) self.telnet_liquidsoap.queue_remove(queue)
self.liq_queue_tracker[queue] = None self.liq_queue_tracker[queue] = None
@ -209,24 +217,32 @@ class PypoLiquidsoap():
tnow = datetime.utcnow() tnow = datetime.utcnow()
link_start = link['start'] link_start = link["start"]
diff_td = tnow - link_start diff_td = tnow - link_start
diff_sec = self.date_interval_to_seconds(diff_td) diff_sec = self.date_interval_to_seconds(diff_td)
if diff_sec > 0: if diff_sec > 0:
self.logger.debug("media item was supposed to start %s ago. Preparing to start..", diff_sec) self.logger.debug(
original_cue_in_td = timedelta(seconds=float(link['cue_in'])) "media item was supposed to start %s ago. Preparing to start..",
link['cue_in'] = self.date_interval_to_seconds(original_cue_in_td) + diff_sec diff_sec,
)
original_cue_in_td = timedelta(seconds=float(link["cue_in"]))
link["cue_in"] = (
self.date_interval_to_seconds(original_cue_in_td) + diff_sec
)
def date_interval_to_seconds(self, interval): def date_interval_to_seconds(self, interval):
""" """
Convert timedelta object into int representing the number of seconds. If Convert timedelta object into int representing the number of seconds. If
number of seconds is less than 0, then return 0. number of seconds is less than 0, then return 0.
""" """
seconds = (interval.microseconds + \ seconds = (
(interval.seconds + interval.days * 24 * 3600) * 10 ** 6) / float(10 ** 6) interval.microseconds
if seconds < 0: seconds = 0 + (interval.seconds + interval.days * 24 * 3600) * 10 ** 6
) / float(10 ** 6)
if seconds < 0:
seconds = 0
return seconds return seconds
@ -237,5 +253,6 @@ class PypoLiquidsoap():
class UnknownMediaItemType(Exception): class UnknownMediaItemType(Exception):
pass pass
class NoQueueAvailableException(Exception): class NoQueueAvailableException(Exception):
pass pass

View File

@ -6,6 +6,7 @@ import os
import sys import sys
from threading import Thread from threading import Thread
import time import time
# For RabbitMQ # For RabbitMQ
from kombu.connection import Connection from kombu.connection import Connection
from kombu.messaging import Exchange, Queue from kombu.messaging import Exchange, Queue
@ -26,17 +27,18 @@ class RabbitConsumer(ConsumerMixin):
def get_consumers(self, Consumer, channel): def get_consumers(self, Consumer, channel):
return [ return [
Consumer(self.queues, callbacks=[self.on_message], accept=['text/plain']), Consumer(self.queues, callbacks=[self.on_message], accept=["text/plain"]),
] ]
def on_message(self, body, message): def on_message(self, body, message):
self.handler.handle_message(message.payload) self.handler.handle_message(message.payload)
message.ack() message.ack()
class PypoMessageHandler(Thread): class PypoMessageHandler(Thread):
def __init__(self, pq, rq, config): def __init__(self, pq, rq, config):
Thread.__init__(self) Thread.__init__(self)
self.logger = logging.getLogger('message_h') self.logger = logging.getLogger("message_h")
self.pypo_queue = pq self.pypo_queue = pq
self.recorder_queue = rq self.recorder_queue = rq
self.config = config self.config = config
@ -44,13 +46,17 @@ class PypoMessageHandler(Thread):
def init_rabbit_mq(self): def init_rabbit_mq(self):
self.logger.info("Initializing RabbitMQ stuff") self.logger.info("Initializing RabbitMQ stuff")
try: try:
schedule_exchange = Exchange("airtime-pypo", "direct", durable=True, auto_delete=True) schedule_exchange = Exchange(
"airtime-pypo", "direct", durable=True, auto_delete=True
)
schedule_queue = Queue("pypo-fetch", exchange=schedule_exchange, key="foo") schedule_queue = Queue("pypo-fetch", exchange=schedule_exchange, key="foo")
with Connection(self.config["host"], \ with Connection(
self.config["user"], \ self.config["host"],
self.config["password"], \ self.config["user"],
self.config["vhost"], \ self.config["password"],
heartbeat = 5) as connection: self.config["vhost"],
heartbeat=5,
) as connection:
rabbit = RabbitConsumer(connection, [schedule_queue], self) rabbit = RabbitConsumer(connection, [schedule_queue], self)
rabbit.run() rabbit.run()
except Exception as e: except Exception as e:
@ -60,6 +66,7 @@ class PypoMessageHandler(Thread):
Handle a message from RabbitMQ, put it into our yucky global var. Handle a message from RabbitMQ, put it into our yucky global var.
Hopefully there is a better way to do this. Hopefully there is a better way to do this.
""" """
def handle_message(self, message): def handle_message(self, message):
try: try:
self.logger.info("Received event from RabbitMQ: %s" % message) self.logger.info("Received event from RabbitMQ: %s" % message)
@ -69,36 +76,36 @@ class PypoMessageHandler(Thread):
except (UnicodeDecodeError, AttributeError): except (UnicodeDecodeError, AttributeError):
pass pass
m = json.loads(message) m = json.loads(message)
command = m['event_type'] command = m["event_type"]
self.logger.info("Handling command: " + command) self.logger.info("Handling command: " + command)
if command == 'update_schedule': if command == "update_schedule":
self.logger.info("Updating schedule...") self.logger.info("Updating schedule...")
self.pypo_queue.put(message) self.pypo_queue.put(message)
elif command == 'reset_liquidsoap_bootstrap': elif command == "reset_liquidsoap_bootstrap":
self.logger.info("Resetting bootstrap vars...") self.logger.info("Resetting bootstrap vars...")
self.pypo_queue.put(message) self.pypo_queue.put(message)
elif command == 'update_stream_setting': elif command == "update_stream_setting":
self.logger.info("Updating stream setting...") self.logger.info("Updating stream setting...")
self.pypo_queue.put(message) self.pypo_queue.put(message)
elif command == 'update_stream_format': elif command == "update_stream_format":
self.logger.info("Updating stream format...") self.logger.info("Updating stream format...")
self.pypo_queue.put(message) self.pypo_queue.put(message)
elif command == 'update_station_name': elif command == "update_station_name":
self.logger.info("Updating station name...") self.logger.info("Updating station name...")
self.pypo_queue.put(message) self.pypo_queue.put(message)
elif command == 'switch_source': elif command == "switch_source":
self.logger.info("switch_source command received...") self.logger.info("switch_source command received...")
self.pypo_queue.put(message) self.pypo_queue.put(message)
elif command == 'update_transition_fade': elif command == "update_transition_fade":
self.logger.info("Updating trasition fade...") self.logger.info("Updating trasition fade...")
self.pypo_queue.put(message) self.pypo_queue.put(message)
elif command == 'disconnect_source': elif command == "disconnect_source":
self.logger.info("disconnect_source command received...") self.logger.info("disconnect_source command received...")
self.pypo_queue.put(message) self.pypo_queue.put(message)
elif command == 'update_recorder_schedule': elif command == "update_recorder_schedule":
self.recorder_queue.put(message) self.recorder_queue.put(message)
elif command == 'cancel_recording': elif command == "cancel_recording":
self.recorder_queue.put(message) self.recorder_queue.put(message)
else: else:
self.logger.info("Unknown command: %s" % command) self.logger.info("Unknown command: %s" % command)
@ -109,9 +116,11 @@ class PypoMessageHandler(Thread):
try: try:
self.init_rabbit_mq() self.init_rabbit_mq()
except Exception as e: except Exception as e:
self.logger.error('Exception: %s', e) self.logger.error("Exception: %s", e)
self.logger.error("traceback: %s", traceback.format_exc()) self.logger.error("traceback: %s", traceback.format_exc())
self.logger.error("Error connecting to RabbitMQ Server. Trying again in few seconds") self.logger.error(
"Error connecting to RabbitMQ Server. Trying again in few seconds"
)
time.sleep(5) time.sleep(5)
""" """
@ -119,7 +128,7 @@ class PypoMessageHandler(Thread):
Wait for schedule updates from RabbitMQ, but in case there aren't any, Wait for schedule updates from RabbitMQ, but in case there aren't any,
poll the server to get the upcoming schedule. poll the server to get the upcoming schedule.
""" """
def run(self): def run(self):
while True: while True:
self.main() self.main()

View File

@ -29,10 +29,12 @@ PUSH_INTERVAL = 2
def is_stream(media_item): def is_stream(media_item):
return media_item['type'] == 'stream_output_start' return media_item["type"] == "stream_output_start"
def is_file(media_item): def is_file(media_item):
return media_item['type'] == 'file' return media_item["type"] == "file"
class PypoPush(Thread): class PypoPush(Thread):
def __init__(self, q, telnet_lock, pypo_liquidsoap, config): def __init__(self, q, telnet_lock, pypo_liquidsoap, config):
@ -44,20 +46,19 @@ class PypoPush(Thread):
self.config = config self.config = config
self.pushed_objects = {} self.pushed_objects = {}
self.logger = logging.getLogger('push') self.logger = logging.getLogger("push")
self.current_prebuffering_stream_id = None self.current_prebuffering_stream_id = None
self.queue_id = 0 self.queue_id = 0
self.future_scheduled_queue = Queue() self.future_scheduled_queue = Queue()
self.pypo_liquidsoap = pypo_liquidsoap self.pypo_liquidsoap = pypo_liquidsoap
self.plq = PypoLiqQueue(self.future_scheduled_queue, \ self.plq = PypoLiqQueue(
self.pypo_liquidsoap, \ self.future_scheduled_queue, self.pypo_liquidsoap, self.logger
self.logger) )
self.plq.daemon = True self.plq.daemon = True
self.plq.start() self.plq.start()
def main(self): def main(self):
loops = 0 loops = 0
heartbeat_period = math.floor(30 / PUSH_INTERVAL) heartbeat_period = math.floor(30 / PUSH_INTERVAL)
@ -72,10 +73,11 @@ class PypoPush(Thread):
raise raise
else: else:
self.logger.debug(media_schedule) self.logger.debug(media_schedule)
#separate media_schedule list into currently_playing and # separate media_schedule list into currently_playing and
#scheduled_for_future lists # scheduled_for_future lists
currently_playing, scheduled_for_future = \ currently_playing, scheduled_for_future = self.separate_present_future(
self.separate_present_future(media_schedule) media_schedule
)
self.pypo_liquidsoap.verify_correct_present_media(currently_playing) self.pypo_liquidsoap.verify_correct_present_media(currently_playing)
self.future_scheduled_queue.put(scheduled_for_future) self.future_scheduled_queue.put(scheduled_for_future)
@ -85,7 +87,6 @@ class PypoPush(Thread):
loops = 0 loops = 0
loops += 1 loops += 1
def separate_present_future(self, media_schedule): def separate_present_future(self, media_schedule):
tnow = datetime.utcnow() tnow = datetime.utcnow()
@ -96,7 +97,7 @@ class PypoPush(Thread):
for mkey in sorted_keys: for mkey in sorted_keys:
media_item = media_schedule[mkey] media_item = media_schedule[mkey]
diff_td = tnow - media_item['start'] diff_td = tnow - media_item["start"]
diff_sec = self.date_interval_to_seconds(diff_td) diff_sec = self.date_interval_to_seconds(diff_td)
if diff_sec >= 0: if diff_sec >= 0:
@ -111,8 +112,10 @@ class PypoPush(Thread):
Convert timedelta object into int representing the number of seconds. If Convert timedelta object into int representing the number of seconds. If
number of seconds is less than 0, then return 0. number of seconds is less than 0, then return 0.
""" """
seconds = (interval.microseconds + \ seconds = (
(interval.seconds + interval.days * 24 * 3600) * 10 ** 6) / float(10 ** 6) interval.microseconds
+ (interval.seconds + interval.days * 24 * 3600) * 10 ** 6
) / float(10 ** 6)
return seconds return seconds
@ -120,18 +123,18 @@ class PypoPush(Thread):
def stop_web_stream_all(self): def stop_web_stream_all(self):
try: try:
self.telnet_lock.acquire() self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.config['LS_HOST'], self.config['LS_PORT']) tn = telnetlib.Telnet(self.config["LS_HOST"], self.config["LS_PORT"])
#msg = 'dynamic_source.read_stop_all xxx\n' # msg = 'dynamic_source.read_stop_all xxx\n'
msg = 'http.stop\n' msg = "http.stop\n"
self.logger.debug(msg) self.logger.debug(msg)
tn.write(msg) tn.write(msg)
msg = 'dynamic_source.output_stop\n' msg = "dynamic_source.output_stop\n"
self.logger.debug(msg) self.logger.debug(msg)
tn.write(msg) tn.write(msg)
msg = 'dynamic_source.id -1\n' msg = "dynamic_source.id -1\n"
self.logger.debug(msg) self.logger.debug(msg)
tn.write(msg) tn.write(msg)
@ -145,10 +148,10 @@ class PypoPush(Thread):
def run(self): def run(self):
while True: while True:
try: self.main() try:
self.main()
except Exception as e: except Exception as e:
top = traceback.format_exc() top = traceback.format_exc()
self.logger.error('Pypo Push Exception: %s', top) self.logger.error("Pypo Push Exception: %s", top)
time.sleep(5) time.sleep(5)
self.logger.info('PypoPush thread exiting') self.logger.info("PypoPush thread exiting")

View File

@ -24,6 +24,7 @@ import mutagen
from api_clients import version1 as v1_api_client from api_clients import version1 as v1_api_client
from api_clients import version2 as api_client from api_clients import version2 as api_client
def api_client(logger): def api_client(logger):
""" """
api_client returns the correct instance of AirtimeApiClient. Although there is only one api_client returns the correct instance of AirtimeApiClient. Although there is only one
@ -31,15 +32,17 @@ def api_client(logger):
""" """
return v1_api_client.AirtimeApiClient(logger) return v1_api_client.AirtimeApiClient(logger)
# loading config file # loading config file
try: try:
config = ConfigObj('/etc/airtime/airtime.conf') config = ConfigObj("/etc/airtime/airtime.conf")
except Exception as e: except Exception as e:
print("Error loading config file: {}".format(e)) print("Error loading config file: {}".format(e))
sys.exit() sys.exit()
# TODO : add docstrings everywhere in this module # TODO : add docstrings everywhere in this module
def getDateTimeObj(time): def getDateTimeObj(time):
# TODO : clean up for this function later. # TODO : clean up for this function later.
# - use tuples to parse result from split (instead of indices) # - use tuples to parse result from split (instead of indices)
@ -49,17 +52,20 @@ def getDateTimeObj(time):
# shadowed # shadowed
# - add docstring to document all behaviour of this function # - add docstring to document all behaviour of this function
timeinfo = time.split(" ") timeinfo = time.split(" ")
date = [ int(x) for x in timeinfo[0].split("-") ] date = [int(x) for x in timeinfo[0].split("-")]
my_time = [ int(x) for x in timeinfo[1].split(":") ] my_time = [int(x) for x in timeinfo[1].split(":")]
return datetime.datetime(date[0], date[1], date[2], my_time[0], my_time[1], my_time[2], 0, None) return datetime.datetime(
date[0], date[1], date[2], my_time[0], my_time[1], my_time[2], 0, None
)
PUSH_INTERVAL = 2 PUSH_INTERVAL = 2
class ShowRecorder(Thread):
def __init__ (self, show_instance, show_name, filelength, start_time): class ShowRecorder(Thread):
def __init__(self, show_instance, show_name, filelength, start_time):
Thread.__init__(self) Thread.__init__(self)
self.logger = logging.getLogger('recorder') self.logger = logging.getLogger("recorder")
self.api_client = api_client(self.logger) self.api_client = api_client(self.logger)
self.filelength = filelength self.filelength = filelength
self.start_time = start_time self.start_time = start_time
@ -75,35 +81,41 @@ class ShowRecorder(Thread):
if config["pypo"]["record_file_type"] in ["mp3", "ogg"]: if config["pypo"]["record_file_type"] in ["mp3", "ogg"]:
filetype = config["pypo"]["record_file_type"] filetype = config["pypo"]["record_file_type"]
else: else:
filetype = "ogg"; filetype = "ogg"
joined_path = os.path.join(config["pypo"]["base_recorded_files"], filename) joined_path = os.path.join(config["pypo"]["base_recorded_files"], filename)
filepath = "%s.%s" % (joined_path, filetype) filepath = "%s.%s" % (joined_path, filetype)
br = config["pypo"]["record_bitrate"] br = config["pypo"]["record_bitrate"]
sr = config["pypo"]["record_samplerate"] sr = config["pypo"]["record_samplerate"]
c = config["pypo"]["record_channels"] c = config["pypo"]["record_channels"]
ss = config["pypo"]["record_sample_size"] ss = config["pypo"]["record_sample_size"]
#-f:16,2,44100 # -f:16,2,44100
#-b:256 # -b:256
command = "ecasound -f:%s,%s,%s -i alsa -o %s,%s000 -t:%s" % \ command = "ecasound -f:%s,%s,%s -i alsa -o %s,%s000 -t:%s" % (
(ss, c, sr, filepath, br, length) ss,
c,
sr,
filepath,
br,
length,
)
args = command.split(" ") args = command.split(" ")
self.logger.info("starting record") self.logger.info("starting record")
self.logger.info("command " + command) self.logger.info("command " + command)
self.p = Popen(args,stdout=PIPE,stderr=PIPE) self.p = Popen(args, stdout=PIPE, stderr=PIPE)
#blocks at the following line until the child process # blocks at the following line until the child process
#quits # quits
self.p.wait() self.p.wait()
outmsgs = self.p.stdout.readlines() outmsgs = self.p.stdout.readlines()
for msg in outmsgs: for msg in outmsgs:
m = re.search('^ERROR',msg) m = re.search("^ERROR", msg)
if not m == None: if not m == None:
self.logger.info('Recording error is found: %s', outmsgs) self.logger.info("Recording error is found: %s", outmsgs)
self.logger.info("finishing record, return code %s", self.p.returncode) self.logger.info("finishing record, return code %s", self.p.returncode)
code = self.p.returncode code = self.p.returncode
@ -112,21 +124,25 @@ class ShowRecorder(Thread):
return code, filepath return code, filepath
def cancel_recording(self): def cancel_recording(self):
#send signal interrupt (2) # send signal interrupt (2)
self.logger.info("Show manually cancelled!") self.logger.info("Show manually cancelled!")
if (self.p is not None): if self.p is not None:
self.p.send_signal(signal.SIGINT) self.p.send_signal(signal.SIGINT)
#if self.p is defined, then the child process ecasound is recording # if self.p is defined, then the child process ecasound is recording
def is_recording(self): def is_recording(self):
return (self.p is not None) return self.p is not None
def upload_file(self, filepath): def upload_file(self, filepath):
filename = os.path.split(filepath)[1] filename = os.path.split(filepath)[1]
# files is what requests actually expects # files is what requests actually expects
files = {'file': open(filepath, "rb"), 'name': filename, 'show_instance': self.show_instance} files = {
"file": open(filepath, "rb"),
"name": filename,
"show_instance": self.show_instance,
}
self.api_client.upload_recorded_show(files, self.show_instance) self.api_client.upload_recorded_show(files, self.show_instance)
@ -136,27 +152,25 @@ class ShowRecorder(Thread):
self.start_time, self.show_name, self.show_instance self.start_time, self.show_name, self.show_instance
""" """
try: try:
full_date, full_time = self.start_time.split(" ",1) full_date, full_time = self.start_time.split(" ", 1)
# No idea why we translated - to : before # No idea why we translated - to : before
#full_time = full_time.replace(":","-") # full_time = full_time.replace(":","-")
self.logger.info("time: %s" % full_time) self.logger.info("time: %s" % full_time)
artist = "Airtime Show Recorder" artist = "Airtime Show Recorder"
#set some metadata for our file daemon # set some metadata for our file daemon
recorded_file = mutagen.File(filepath, easy = True) recorded_file = mutagen.File(filepath, easy=True)
recorded_file['artist'] = artist recorded_file["artist"] = artist
recorded_file['date'] = full_date recorded_file["date"] = full_date
recorded_file['title'] = "%s-%s-%s" % (self.show_name, recorded_file["title"] = "%s-%s-%s" % (self.show_name, full_date, full_time)
full_date, full_time) # You cannot pass ints into the metadata of a file. Even tracknumber needs to be a string
#You cannot pass ints into the metadata of a file. Even tracknumber needs to be a string recorded_file["tracknumber"] = self.show_instance
recorded_file['tracknumber'] = self.show_instance
recorded_file.save() recorded_file.save()
except Exception as e: except Exception as e:
top = traceback.format_exc() top = traceback.format_exc()
self.logger.error('Exception: %s', e) self.logger.error("Exception: %s", e)
self.logger.error("traceback: %s", top) self.logger.error("traceback: %s", top)
def run(self): def run(self):
code, filepath = self.record_show() code, filepath = self.record_show()
@ -174,14 +188,15 @@ class ShowRecorder(Thread):
self.logger.info("problem recording show") self.logger.info("problem recording show")
os.remove(filepath) os.remove(filepath)
class Recorder(Thread): class Recorder(Thread):
def __init__(self, q): def __init__(self, q):
Thread.__init__(self) Thread.__init__(self)
self.logger = logging.getLogger('recorder') self.logger = logging.getLogger("recorder")
self.api_client = api_client(self.logger) self.api_client = api_client(self.logger)
self.sr = None self.sr = None
self.shows_to_record = {} self.shows_to_record = {}
self.server_timezone = '' self.server_timezone = ""
self.queue = q self.queue = q
self.loops = 0 self.loops = 0
self.logger.info("RecorderFetch: init complete") self.logger.info("RecorderFetch: init complete")
@ -189,7 +204,7 @@ class Recorder(Thread):
success = False success = False
while not success: while not success:
try: try:
self.api_client.register_component('show-recorder') self.api_client.register_component("show-recorder")
success = True success = True
except Exception as e: except Exception as e:
self.logger.error(str(e)) self.logger.error(str(e))
@ -205,7 +220,7 @@ class Recorder(Thread):
msg = json.loads(message) msg = json.loads(message)
command = msg["event_type"] command = msg["event_type"]
self.logger.info("Received msg from Pypo Message Handler: %s", msg) self.logger.info("Received msg from Pypo Message Handler: %s", msg)
if command == 'cancel_recording': if command == "cancel_recording":
if self.currently_recording(): if self.currently_recording():
self.cancel_recording() self.cancel_recording()
else: else:
@ -218,14 +233,18 @@ class Recorder(Thread):
def process_recorder_schedule(self, m): def process_recorder_schedule(self, m):
self.logger.info("Parsing recording show schedules...") self.logger.info("Parsing recording show schedules...")
temp_shows_to_record = {} temp_shows_to_record = {}
shows = m['shows'] shows = m["shows"]
for show in shows: for show in shows:
show_starts = getDateTimeObj(show['starts']) show_starts = getDateTimeObj(show["starts"])
show_end = getDateTimeObj(show['ends']) show_end = getDateTimeObj(show["ends"])
time_delta = show_end - show_starts time_delta = show_end - show_starts
temp_shows_to_record[show['starts']] = [time_delta, temp_shows_to_record[show["starts"]] = [
show['instance_id'], show['name'], m['server_timezone']] time_delta,
show["instance_id"],
show["name"],
m["server_timezone"],
]
self.shows_to_record = temp_shows_to_record self.shows_to_record = temp_shows_to_record
def get_time_till_next_show(self): def get_time_till_next_show(self):
@ -237,7 +256,7 @@ class Recorder(Thread):
next_show = getDateTimeObj(start_time) next_show = getDateTimeObj(start_time)
delta = next_show - tnow delta = next_show - tnow
s = '%s.%s' % (delta.seconds, delta.microseconds) s = "%s.%s" % (delta.seconds, delta.microseconds)
out = float(s) out = float(s)
if out < 5: if out < 5:
@ -257,7 +276,8 @@ class Recorder(Thread):
return False return False
def start_record(self): def start_record(self):
if len(self.shows_to_record) == 0: return None if len(self.shows_to_record) == 0:
return None
try: try:
delta = self.get_time_till_next_show() delta = self.get_time_till_next_show()
if delta < 5: if delta < 5:
@ -273,16 +293,25 @@ class Recorder(Thread):
T = pytz.timezone(server_timezone) T = pytz.timezone(server_timezone)
start_time_on_UTC = getDateTimeObj(start_time) start_time_on_UTC = getDateTimeObj(start_time)
start_time_on_server = start_time_on_UTC.replace(tzinfo=pytz.utc).astimezone(T) start_time_on_server = start_time_on_UTC.replace(
start_time_formatted = '%(year)d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:%(sec)02d' % \ tzinfo=pytz.utc
{'year': start_time_on_server.year, 'month': start_time_on_server.month, 'day': start_time_on_server.day, \ ).astimezone(T)
'hour': start_time_on_server.hour, 'min': start_time_on_server.minute, 'sec': start_time_on_server.second} start_time_formatted = (
"%(year)d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:%(sec)02d"
% {
"year": start_time_on_server.year,
"month": start_time_on_server.month,
"day": start_time_on_server.day,
"hour": start_time_on_server.hour,
"min": start_time_on_server.minute,
"sec": start_time_on_server.second,
}
)
seconds_waiting = 0 seconds_waiting = 0
#avoiding CC-5299 # avoiding CC-5299
while(True): while True:
if self.currently_recording(): if self.currently_recording():
self.logger.info("Previous record not finished, sleeping 100ms") self.logger.info("Previous record not finished, sleeping 100ms")
seconds_waiting = seconds_waiting + 0.1 seconds_waiting = seconds_waiting + 0.1
@ -290,16 +319,21 @@ class Recorder(Thread):
else: else:
show_length_seconds = show_length.seconds - seconds_waiting show_length_seconds = show_length.seconds - seconds_waiting
self.sr = ShowRecorder(show_instance, show_name, show_length_seconds, start_time_formatted) self.sr = ShowRecorder(
show_instance,
show_name,
show_length_seconds,
start_time_formatted,
)
self.sr.start() self.sr.start()
break break
#remove show from shows to record. # remove show from shows to record.
del self.shows_to_record[start_time] del self.shows_to_record[start_time]
#self.time_till_next_show = self.get_time_till_next_show() # self.time_till_next_show = self.get_time_till_next_show()
except Exception as e : except Exception as e:
top = traceback.format_exc() top = traceback.format_exc()
self.logger.error('Exception: %s', e) self.logger.error("Exception: %s", e)
self.logger.error("traceback: %s", top) self.logger.error("traceback: %s", top)
def run(self): def run(self):
@ -318,7 +352,7 @@ class Recorder(Thread):
self.process_recorder_schedule(temp) self.process_recorder_schedule(temp)
self.logger.info("Bootstrap recorder schedule received: %s", temp) self.logger.info("Bootstrap recorder schedule received: %s", temp)
except Exception as e: except Exception as e:
self.logger.error( traceback.format_exc() ) self.logger.error(traceback.format_exc())
self.logger.error(e) self.logger.error(e)
self.logger.info("Bootstrap complete: got initial copy of the schedule") self.logger.info("Bootstrap complete: got initial copy of the schedule")
@ -338,16 +372,16 @@ class Recorder(Thread):
self.process_recorder_schedule(temp) self.process_recorder_schedule(temp)
self.logger.info("updated recorder schedule received: %s", temp) self.logger.info("updated recorder schedule received: %s", temp)
except Exception as e: except Exception as e:
self.logger.error( traceback.format_exc() ) self.logger.error(traceback.format_exc())
self.logger.error(e) self.logger.error(e)
try: self.handle_message() try:
self.handle_message()
except Exception as e: except Exception as e:
self.logger.error( traceback.format_exc() ) self.logger.error(traceback.format_exc())
self.logger.error('Pypo Recorder Exception: %s', e) self.logger.error("Pypo Recorder Exception: %s", e)
time.sleep(PUSH_INTERVAL) time.sleep(PUSH_INTERVAL)
self.loops += 1 self.loops += 1
except Exception as e : except Exception as e:
top = traceback.format_exc() top = traceback.format_exc()
self.logger.error('Exception: %s', e) self.logger.error("Exception: %s", e)
self.logger.error("traceback: %s", top) self.logger.error("traceback: %s", top)

View File

@ -4,32 +4,36 @@ import telnetlib
from .timeout import ls_timeout from .timeout import ls_timeout
import traceback import traceback
def create_liquidsoap_annotation(media): def create_liquidsoap_annotation(media):
# We need liq_start_next value in the annotate. That is the value that controls overlap duration of crossfade. # We need liq_start_next value in the annotate. That is the value that controls overlap duration of crossfade.
filename = media['dst'] filename = media["dst"]
annotation = ('annotate:media_id="%s",liq_start_next="0",liq_fade_in="%s",' + \ annotation = (
'liq_fade_out="%s",liq_cue_in="%s",liq_cue_out="%s",' + \ 'annotate:media_id="%s",liq_start_next="0",liq_fade_in="%s",'
'schedule_table_id="%s",replay_gain="%s dB"') % \ + 'liq_fade_out="%s",liq_cue_in="%s",liq_cue_out="%s",'
(media['id'], + 'schedule_table_id="%s",replay_gain="%s dB"'
float(media['fade_in']) / 1000, ) % (
float(media['fade_out']) / 1000, media["id"],
float(media['cue_in']), float(media["fade_in"]) / 1000,
float(media['cue_out']), float(media["fade_out"]) / 1000,
media['row_id'], float(media["cue_in"]),
media['replay_gain']) float(media["cue_out"]),
media["row_id"],
media["replay_gain"],
)
# Override the the artist/title that Liquidsoap extracts from a file's metadata # Override the the artist/title that Liquidsoap extracts from a file's metadata
# with the metadata we get from Airtime. (You can modify metadata in Airtime's library, # with the metadata we get from Airtime. (You can modify metadata in Airtime's library,
# which doesn't get saved back to the file.) # which doesn't get saved back to the file.)
if 'metadata' in media: if "metadata" in media:
if 'artist_name' in media['metadata']: if "artist_name" in media["metadata"]:
artist_name = media['metadata']['artist_name'] artist_name = media["metadata"]["artist_name"]
if isinstance(artist_name, str): if isinstance(artist_name, str):
annotation += ',artist="%s"' % (artist_name.replace('"', '\\"')) annotation += ',artist="%s"' % (artist_name.replace('"', '\\"'))
if 'track_title' in media['metadata']: if "track_title" in media["metadata"]:
track_title = media['metadata']['track_title'] track_title = media["metadata"]["track_title"]
if isinstance(track_title, str): if isinstance(track_title, str):
annotation += ',title="%s"' % (track_title.replace('"', '\\"')) annotation += ',title="%s"' % (track_title.replace('"', '\\"'))
@ -37,8 +41,8 @@ def create_liquidsoap_annotation(media):
return annotation return annotation
class TelnetLiquidsoap:
class TelnetLiquidsoap:
def __init__(self, telnet_lock, logger, ls_host, ls_port, queues): def __init__(self, telnet_lock, logger, ls_host, ls_port, queues):
self.telnet_lock = telnet_lock self.telnet_lock = telnet_lock
self.ls_host = ls_host self.ls_host = ls_host
@ -53,9 +57,9 @@ class TelnetLiquidsoap:
def __is_empty(self, queue_id): def __is_empty(self, queue_id):
return True return True
tn = self.__connect() tn = self.__connect()
msg = '%s.queue\nexit\n' % queue_id msg = "%s.queue\nexit\n" % queue_id
tn.write(msg.encode('utf-8')) tn.write(msg.encode("utf-8"))
output = tn.read_all().decode('utf-8').splitlines() output = tn.read_all().decode("utf-8").splitlines()
if len(output) == 3: if len(output) == 3:
return len(output[0]) == 0 return len(output[0]) == 0
else: else:
@ -68,12 +72,12 @@ class TelnetLiquidsoap:
tn = self.__connect() tn = self.__connect()
for i in self.queues: for i in self.queues:
msg = 'queues.%s_skip\n' % i msg = "queues.%s_skip\n" % i
self.logger.debug(msg) self.logger.debug(msg)
tn.write(msg.encode('utf-8')) tn.write(msg.encode("utf-8"))
tn.write("exit\n".encode('utf-8')) tn.write("exit\n".encode("utf-8"))
self.logger.debug(tn.read_all().decode('utf-8')) self.logger.debug(tn.read_all().decode("utf-8"))
except Exception: except Exception:
raise raise
finally: finally:
@ -85,18 +89,17 @@ class TelnetLiquidsoap:
self.telnet_lock.acquire() self.telnet_lock.acquire()
tn = self.__connect() tn = self.__connect()
msg = 'queues.%s_skip\n' % queue_id msg = "queues.%s_skip\n" % queue_id
self.logger.debug(msg) self.logger.debug(msg)
tn.write(msg.encode('utf-8')) tn.write(msg.encode("utf-8"))
tn.write("exit\n".encode('utf-8')) tn.write("exit\n".encode("utf-8"))
self.logger.debug(tn.read_all().decode('utf-8')) self.logger.debug(tn.read_all().decode("utf-8"))
except Exception: except Exception:
raise raise
finally: finally:
self.telnet_lock.release() self.telnet_lock.release()
@ls_timeout @ls_timeout
def queue_push(self, queue_id, media_item): def queue_push(self, queue_id, media_item):
try: try:
@ -107,40 +110,39 @@ class TelnetLiquidsoap:
tn = self.__connect() tn = self.__connect()
annotation = create_liquidsoap_annotation(media_item) annotation = create_liquidsoap_annotation(media_item)
msg = '%s.push %s\n' % (queue_id, annotation) msg = "%s.push %s\n" % (queue_id, annotation)
self.logger.debug(msg) self.logger.debug(msg)
tn.write(msg.encode('utf-8')) tn.write(msg.encode("utf-8"))
show_name = media_item['show_name'] show_name = media_item["show_name"]
msg = 'vars.show_name %s\n' % show_name msg = "vars.show_name %s\n" % show_name
tn.write(msg.encode('utf-8')) tn.write(msg.encode("utf-8"))
self.logger.debug(msg) self.logger.debug(msg)
tn.write("exit\n".encode('utf-8')) tn.write("exit\n".encode("utf-8"))
self.logger.debug(tn.read_all().decode('utf-8')) self.logger.debug(tn.read_all().decode("utf-8"))
except Exception: except Exception:
raise raise
finally: finally:
self.telnet_lock.release() self.telnet_lock.release()
@ls_timeout @ls_timeout
def stop_web_stream_buffer(self): def stop_web_stream_buffer(self):
try: try:
self.telnet_lock.acquire() self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.ls_host, self.ls_port) tn = telnetlib.Telnet(self.ls_host, self.ls_port)
#dynamic_source.stop http://87.230.101.24:80/top100station.mp3 # dynamic_source.stop http://87.230.101.24:80/top100station.mp3
msg = 'http.stop\n' msg = "http.stop\n"
self.logger.debug(msg) self.logger.debug(msg)
tn.write(msg.encode('utf-8')) tn.write(msg.encode("utf-8"))
msg = 'dynamic_source.id -1\n' msg = "dynamic_source.id -1\n"
self.logger.debug(msg) self.logger.debug(msg)
tn.write(msg.encode('utf-8')) tn.write(msg.encode("utf-8"))
tn.write("exit\n".encode('utf-8')) tn.write("exit\n".encode("utf-8"))
self.logger.debug(tn.read_all().decode('utf-8')) self.logger.debug(tn.read_all().decode("utf-8"))
except Exception as e: except Exception as e:
self.logger.error(str(e)) self.logger.error(str(e))
@ -153,14 +155,14 @@ class TelnetLiquidsoap:
try: try:
self.telnet_lock.acquire() self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.ls_host, self.ls_port) tn = telnetlib.Telnet(self.ls_host, self.ls_port)
#dynamic_source.stop http://87.230.101.24:80/top100station.mp3 # dynamic_source.stop http://87.230.101.24:80/top100station.mp3
msg = 'dynamic_source.output_stop\n' msg = "dynamic_source.output_stop\n"
self.logger.debug(msg) self.logger.debug(msg)
tn.write(msg.encode('utf-8')) tn.write(msg.encode("utf-8"))
tn.write("exit\n".encode('utf-8')) tn.write("exit\n".encode("utf-8"))
self.logger.debug(tn.read_all().decode('utf-8')) self.logger.debug(tn.read_all().decode("utf-8"))
except Exception as e: except Exception as e:
self.logger.error(str(e)) self.logger.error(str(e))
@ -174,16 +176,16 @@ class TelnetLiquidsoap:
self.telnet_lock.acquire() self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.ls_host, self.ls_port) tn = telnetlib.Telnet(self.ls_host, self.ls_port)
#TODO: DO we need this? # TODO: DO we need this?
msg = 'streams.scheduled_play_start\n' msg = "streams.scheduled_play_start\n"
tn.write(msg.encode('utf-8')) tn.write(msg.encode("utf-8"))
msg = 'dynamic_source.output_start\n' msg = "dynamic_source.output_start\n"
self.logger.debug(msg) self.logger.debug(msg)
tn.write(msg.encode('utf-8')) tn.write(msg.encode("utf-8"))
tn.write("exit\n".encode('utf-8')) tn.write("exit\n".encode("utf-8"))
self.logger.debug(tn.read_all().decode('utf-8')) self.logger.debug(tn.read_all().decode("utf-8"))
self.current_prebuffering_stream_id = None self.current_prebuffering_stream_id = None
except Exception as e: except Exception as e:
@ -198,18 +200,18 @@ class TelnetLiquidsoap:
self.telnet_lock.acquire() self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.ls_host, self.ls_port) tn = telnetlib.Telnet(self.ls_host, self.ls_port)
msg = 'dynamic_source.id %s\n' % media_item['row_id'] msg = "dynamic_source.id %s\n" % media_item["row_id"]
self.logger.debug(msg) self.logger.debug(msg)
tn.write(msg.encode('utf-8')) tn.write(msg.encode("utf-8"))
msg = 'http.restart %s\n' % media_item['uri'] msg = "http.restart %s\n" % media_item["uri"]
self.logger.debug(msg) self.logger.debug(msg)
tn.write(msg.encode('utf-8')) tn.write(msg.encode("utf-8"))
tn.write("exit\n".encode('utf-8')) tn.write("exit\n".encode("utf-8"))
self.logger.debug(tn.read_all().decode('utf-8')) self.logger.debug(tn.read_all().decode("utf-8"))
self.current_prebuffering_stream_id = media_item['row_id'] self.current_prebuffering_stream_id = media_item["row_id"]
except Exception as e: except Exception as e:
self.logger.error(str(e)) self.logger.error(str(e))
self.logger.error(traceback.format_exc()) self.logger.error(traceback.format_exc())
@ -222,12 +224,12 @@ class TelnetLiquidsoap:
self.telnet_lock.acquire() self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.ls_host, self.ls_port) tn = telnetlib.Telnet(self.ls_host, self.ls_port)
msg = 'dynamic_source.get_id\n' msg = "dynamic_source.get_id\n"
self.logger.debug(msg) self.logger.debug(msg)
tn.write(msg.encode('utf-8')) tn.write(msg.encode("utf-8"))
tn.write("exit\n".encode('utf-8')) tn.write("exit\n".encode("utf-8"))
stream_id = tn.read_all().decode('utf-8').splitlines()[0] stream_id = tn.read_all().decode("utf-8").splitlines()[0]
self.logger.debug("stream_id: %s" % stream_id) self.logger.debug("stream_id: %s" % stream_id)
return stream_id return stream_id
@ -239,20 +241,20 @@ class TelnetLiquidsoap:
@ls_timeout @ls_timeout
def disconnect_source(self, sourcename): def disconnect_source(self, sourcename):
self.logger.debug('Disconnecting source: %s', sourcename) self.logger.debug("Disconnecting source: %s", sourcename)
command = "" command = ""
if(sourcename == "master_dj"): if sourcename == "master_dj":
command += "master_harbor.stop\n" command += "master_harbor.stop\n"
elif(sourcename == "live_dj"): elif sourcename == "live_dj":
command += "live_dj_harbor.stop\n" command += "live_dj_harbor.stop\n"
try: try:
self.telnet_lock.acquire() self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.ls_host, self.ls_port) tn = telnetlib.Telnet(self.ls_host, self.ls_port)
self.logger.info(command) self.logger.info(command)
tn.write(command.encode('utf-8')) tn.write(command.encode("utf-8"))
tn.write('exit\n'.encode('utf-8')) tn.write("exit\n".encode("utf-8"))
tn.read_all().decode('utf-8') tn.read_all().decode("utf-8")
except Exception as e: except Exception as e:
self.logger.error(traceback.format_exc()) self.logger.error(traceback.format_exc())
finally: finally:
@ -267,18 +269,17 @@ class TelnetLiquidsoap:
for i in commands: for i in commands:
self.logger.info(i) self.logger.info(i)
if type(i) is str: if type(i) is str:
i = i.encode('utf-8') i = i.encode("utf-8")
tn.write(i) tn.write(i)
tn.write('exit\n'.encode('utf-8')) tn.write("exit\n".encode("utf-8"))
tn.read_all().decode('utf-8') tn.read_all().decode("utf-8")
except Exception as e: except Exception as e:
self.logger.error(str(e)) self.logger.error(str(e))
self.logger.error(traceback.format_exc()) self.logger.error(traceback.format_exc())
finally: finally:
self.telnet_lock.release() self.telnet_lock.release()
def switch_source(self, sourcename, status): def switch_source(self, sourcename, status):
self.logger.debug('Switching source: %s to "%s" status', sourcename, status) self.logger.debug('Switching source: %s to "%s" status', sourcename, status)
command = "streams." command = "streams."
@ -296,15 +297,15 @@ class TelnetLiquidsoap:
self.telnet_send([command]) self.telnet_send([command])
class DummyTelnetLiquidsoap:
class DummyTelnetLiquidsoap:
def __init__(self, telnet_lock, logger): def __init__(self, telnet_lock, logger):
self.telnet_lock = telnet_lock self.telnet_lock = telnet_lock
self.liquidsoap_mock_queues = {} self.liquidsoap_mock_queues = {}
self.logger = logger self.logger = logger
for i in range(4): for i in range(4):
self.liquidsoap_mock_queues["s"+str(i)] = [] self.liquidsoap_mock_queues["s" + str(i)] = []
@ls_timeout @ls_timeout
def queue_push(self, queue_id, media_item): def queue_push(self, queue_id, media_item):
@ -313,6 +314,7 @@ class DummyTelnetLiquidsoap:
self.logger.info("Pushing %s to queue %s" % (media_item, queue_id)) self.logger.info("Pushing %s to queue %s" % (media_item, queue_id))
from datetime import datetime from datetime import datetime
print("Time now: {:s}".format(datetime.utcnow())) print("Time now: {:s}".format(datetime.utcnow()))
annotation = create_liquidsoap_annotation(media_item) annotation = create_liquidsoap_annotation(media_item)
@ -329,6 +331,7 @@ class DummyTelnetLiquidsoap:
self.logger.info("Purging queue %s" % queue_id) self.logger.info("Purging queue %s" % queue_id)
from datetime import datetime from datetime import datetime
print("Time now: {:s}".format(datetime.utcnow())) print("Time now: {:s}".format(datetime.utcnow()))
except Exception: except Exception:
@ -336,5 +339,6 @@ class DummyTelnetLiquidsoap:
finally: finally:
self.telnet_lock.release() self.telnet_lock.release()
class QueueNotEmptyException(Exception): class QueueNotEmptyException(Exception):
pass pass

View File

@ -13,14 +13,17 @@ import logging
from datetime import datetime from datetime import datetime
from datetime import timedelta from datetime import timedelta
def keyboardInterruptHandler(signum, frame): def keyboardInterruptHandler(signum, frame):
logger = logging.getLogger() logger = logging.getLogger()
logger.info('\nKeyboard Interrupt\n') logger.info("\nKeyboard Interrupt\n")
sys.exit(0) sys.exit(0)
signal.signal(signal.SIGINT, keyboardInterruptHandler) signal.signal(signal.SIGINT, keyboardInterruptHandler)
# configure logging # configure logging
format = '%(levelname)s - %(pathname)s - %(lineno)s - %(asctime)s - %(message)s' format = "%(levelname)s - %(pathname)s - %(lineno)s - %(asctime)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=format) logging.basicConfig(level=logging.DEBUG, format=format)
logging.captureWarnings(True) logging.captureWarnings(True)
@ -30,19 +33,18 @@ pypoPush_q = Queue()
pypoLiq_q = Queue() pypoLiq_q = Queue()
liq_queue_tracker = { liq_queue_tracker = {
"s0": None, "s0": None,
"s1": None, "s1": None,
"s2": None, "s2": None,
"s3": None, "s3": None,
} }
#dummy_telnet_liquidsoap = DummyTelnetLiquidsoap(telnet_lock, logging) # dummy_telnet_liquidsoap = DummyTelnetLiquidsoap(telnet_lock, logging)
dummy_telnet_liquidsoap = TelnetLiquidsoap(telnet_lock, logging, \ dummy_telnet_liquidsoap = TelnetLiquidsoap(telnet_lock, logging, "localhost", 1234)
"localhost", \
1234)
plq = PypoLiqQueue(pypoLiq_q, telnet_lock, logging, liq_queue_tracker, \ plq = PypoLiqQueue(
dummy_telnet_liquidsoap) pypoLiq_q, telnet_lock, logging, liq_queue_tracker, dummy_telnet_liquidsoap
)
plq.daemon = True plq.daemon = True
plq.start() plq.start()
@ -54,47 +56,43 @@ media_schedule = {}
start_dt = datetime.utcnow() + timedelta(seconds=1) start_dt = datetime.utcnow() + timedelta(seconds=1)
end_dt = datetime.utcnow() + timedelta(seconds=6) end_dt = datetime.utcnow() + timedelta(seconds=6)
media_schedule[start_dt] = {"id": 5, \ media_schedule[start_dt] = {
"type":"file", \ "id": 5,
"row_id":9, \ "type": "file",
"uri":"", \ "row_id": 9,
"dst":"/home/martin/Music/ipod/Hot Chocolate - You Sexy Thing.mp3", \ "uri": "",
"fade_in":0, \ "dst": "/home/martin/Music/ipod/Hot Chocolate - You Sexy Thing.mp3",
"fade_out":0, \ "fade_in": 0,
"cue_in":0, \ "fade_out": 0,
"cue_out":300, \ "cue_in": 0,
"start": start_dt, \ "cue_out": 300,
"end": end_dt, \ "start": start_dt,
"show_name":"Untitled", \ "end": end_dt,
"replay_gain": 0, \ "show_name": "Untitled",
"independent_event": True \ "replay_gain": 0,
} "independent_event": True,
}
start_dt = datetime.utcnow() + timedelta(seconds=2) start_dt = datetime.utcnow() + timedelta(seconds=2)
end_dt = datetime.utcnow() + timedelta(seconds=6) end_dt = datetime.utcnow() + timedelta(seconds=6)
media_schedule[start_dt] = {"id": 5, \ media_schedule[start_dt] = {
"type":"file", \ "id": 5,
"row_id":9, \ "type": "file",
"uri":"", \ "row_id": 9,
"dst":"/home/martin/Music/ipod/Good Charlotte - bloody valentine.mp3", \ "uri": "",
"fade_in":0, \ "dst": "/home/martin/Music/ipod/Good Charlotte - bloody valentine.mp3",
"fade_out":0, \ "fade_in": 0,
"cue_in":0, \ "fade_out": 0,
"cue_out":300, \ "cue_in": 0,
"start": start_dt, \ "cue_out": 300,
"end": end_dt, \ "start": start_dt,
"show_name":"Untitled", \ "end": end_dt,
"replay_gain": 0, \ "show_name": "Untitled",
"independent_event": True \ "replay_gain": 0,
} "independent_event": True,
}
pypoLiq_q.put(media_schedule) pypoLiq_q.put(media_schedule)
plq.join() plq.join()

View File

@ -2,12 +2,13 @@
import threading import threading
from . import pypofetch from . import pypofetch
def __timeout(func, timeout_duration, default, args, kwargs):
def __timeout(func, timeout_duration, default, args, kwargs):
class InterruptableThread(threading.Thread): class InterruptableThread(threading.Thread):
def __init__(self): def __init__(self):
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.result = default self.result = default
def run(self): def run(self):
self.result = func(*args, **kwargs) self.result = func(*args, **kwargs)
@ -21,10 +22,10 @@ def __timeout(func, timeout_duration, default, args, kwargs):
it.join(timeout_duration) it.join(timeout_duration)
if it.isAlive(): if it.isAlive():
"""Restart Liquidsoap and try the command one more time. If it """Restart Liquidsoap and try the command one more time. If it
fails again then there is something critically wrong...""" fails again then there is something critically wrong..."""
if first_attempt: if first_attempt:
#restart liquidsoap # restart liquidsoap
pypofetch.PypoFetch.ref.restart_liquidsoap() pypofetch.PypoFetch.ref.restart_liquidsoap()
else: else:
raise Exception("Thread did not terminate") raise Exception("Thread did not terminate")
@ -33,7 +34,9 @@ def __timeout(func, timeout_duration, default, args, kwargs):
first_attempt = False first_attempt = False
def ls_timeout(f, timeout=15, default=None): def ls_timeout(f, timeout=15, default=None):
def new_f(*args, **kwargs): def new_f(*args, **kwargs):
return __timeout(f, timeout, default, args, kwargs) return __timeout(f, timeout, default, args, kwargs)
return new_f return new_f

View File

@ -10,64 +10,63 @@ print(script_path)
os.chdir(script_path) os.chdir(script_path)
# Allows us to avoid installing the upstart init script when deploying on Airtime Pro: # Allows us to avoid installing the upstart init script when deploying on Airtime Pro:
if '--no-init-script' in sys.argv: if "--no-init-script" in sys.argv:
data_files = [] data_files = []
sys.argv.remove('--no-init-script') # super hax sys.argv.remove("--no-init-script") # super hax
else: else:
pypo_files = [] pypo_files = []
for root, dirnames, filenames in os.walk('pypo'): for root, dirnames, filenames in os.walk("pypo"):
for filename in filenames: for filename in filenames:
pypo_files.append(os.path.join(root, filename)) pypo_files.append(os.path.join(root, filename))
data_files = [ data_files = [
('/etc/init', ['install/upstart/airtime-playout.conf.template']), ("/etc/init", ["install/upstart/airtime-playout.conf.template"]),
('/etc/init', ['install/upstart/airtime-liquidsoap.conf.template']), ("/etc/init", ["install/upstart/airtime-liquidsoap.conf.template"]),
('/etc/init.d', ['install/sysvinit/airtime-playout']), ("/etc/init.d", ["install/sysvinit/airtime-playout"]),
('/etc/init.d', ['install/sysvinit/airtime-liquidsoap']), ("/etc/init.d", ["install/sysvinit/airtime-liquidsoap"]),
('/var/log/airtime/pypo', []), ("/var/log/airtime/pypo", []),
('/var/log/airtime/pypo-liquidsoap', []), ("/var/log/airtime/pypo-liquidsoap", []),
('/var/tmp/airtime/pypo', []), ("/var/tmp/airtime/pypo", []),
('/var/tmp/airtime/pypo/cache', []), ("/var/tmp/airtime/pypo/cache", []),
('/var/tmp/airtime/pypo/files', []), ("/var/tmp/airtime/pypo/files", []),
('/var/tmp/airtime/pypo/tmp', []), ("/var/tmp/airtime/pypo/tmp", []),
] ]
print(data_files) print(data_files)
setup(name='airtime-playout', setup(
version='1.0', name="airtime-playout",
description='Airtime Playout Engine', version="1.0",
url='http://github.com/sourcefabric/Airtime', description="Airtime Playout Engine",
author='sourcefabric', url="http://github.com/sourcefabric/Airtime",
license='AGPLv3', author="sourcefabric",
packages=['pypo', 'pypo.media', 'pypo.media.update', license="AGPLv3",
'liquidsoap'], packages=["pypo", "pypo.media", "pypo.media.update", "liquidsoap"],
package_data={'': ['**/*.liq', '*.cfg', '*.types']}, package_data={"": ["**/*.liq", "*.cfg", "*.types"]},
scripts=[ scripts=["bin/airtime-playout", "bin/airtime-liquidsoap", "bin/pyponotify"],
'bin/airtime-playout', install_requires=[
'bin/airtime-liquidsoap', "amqplib",
'bin/pyponotify' "anyjson",
], "argparse",
install_requires=[ "configobj",
'amqplib', "docopt",
'anyjson', "future",
'argparse', "kombu",
'configobj', "mutagen",
'docopt', "PyDispatcher",
'future', "pyinotify",
'kombu', "pytz",
'mutagen', "requests",
'PyDispatcher', "defusedxml",
'pyinotify', "packaging",
'pytz', ],
'requests', zip_safe=False,
'defusedxml', data_files=data_files,
'packaging', )
],
zip_safe=False,
data_files=data_files)
# Reload the initctl config so that playout services works # Reload the initctl config so that playout services works
if data_files: if data_files:
print("Reloading initctl configuration") print("Reloading initctl configuration")
#call(['initctl', 'reload-configuration']) # call(['initctl', 'reload-configuration'])
print("Run \"sudo service airtime-playout start\" and \"sudo service airtime-liquidsoap start\"") print(
'Run "sudo service airtime-playout start" and "sudo service airtime-liquidsoap start"'
)

View File

@ -9,7 +9,7 @@ import json
import shutil import shutil
import commands import commands
#sys.path.append('/usr/lib/airtime/media-monitor/mm2/') # sys.path.append('/usr/lib/airtime/media-monitor/mm2/')
from mm2.media.monitor.pure import is_file_supported from mm2.media.monitor.pure import is_file_supported
# create logger # create logger
@ -22,86 +22,97 @@ logging.disable(50)
# add ch to logger # add ch to logger
logger.addHandler(ch) logger.addHandler(ch)
if (os.geteuid() != 0): if os.geteuid() != 0:
print 'Must be a root user.' print "Must be a root user."
sys.exit() sys.exit()
# loading config file # loading config file
try: try:
config = ConfigObj('/etc/airtime/airtime.conf') config = ConfigObj("/etc/airtime/airtime.conf")
except Exception, e: except Exception, e:
print('Error loading config file: %s', e) print ("Error loading config file: %s", e)
sys.exit() sys.exit()
api_client = apc.AirtimeApiClient(config) api_client = apc.AirtimeApiClient(config)
#helper functions # helper functions
# copy or move files # copy or move files
# flag should be 'copy' or 'move' # flag should be 'copy' or 'move'
def copy_or_move_files_to(paths, dest, flag): def copy_or_move_files_to(paths, dest, flag):
try: try:
for path in paths: for path in paths:
if (path[0] == "/" or path[0] == "~"): if path[0] == "/" or path[0] == "~":
path = os.path.realpath(path) path = os.path.realpath(path)
else: else:
path = currentDir+path path = currentDir + path
path = apc.encode_to(path, 'utf-8') path = apc.encode_to(path, "utf-8")
dest = apc.encode_to(dest, 'utf-8') dest = apc.encode_to(dest, "utf-8")
if(os.path.exists(path)): if os.path.exists(path):
if(os.path.isdir(path)): if os.path.isdir(path):
path = format_dir_string(path) path = format_dir_string(path)
#construct full path # construct full path
sub_path = [] sub_path = []
for temp in os.listdir(path): for temp in os.listdir(path):
sub_path.append(path+temp) sub_path.append(path + temp)
copy_or_move_files_to(sub_path, dest, flag) copy_or_move_files_to(sub_path, dest, flag)
elif(os.path.isfile(path)): elif os.path.isfile(path):
#copy file to dest # copy file to dest
if(is_file_supported(path)): if is_file_supported(path):
destfile = dest+os.path.basename(path) destfile = dest + os.path.basename(path)
if(flag == 'copy'): if flag == "copy":
print "Copying %(src)s to %(dest)s..." % {'src':path, 'dest':destfile} print "Copying %(src)s to %(dest)s..." % {
"src": path,
"dest": destfile,
}
shutil.copyfile(path, destfile) shutil.copyfile(path, destfile)
elif(flag == 'move'): elif flag == "move":
print "Moving %(src)s to %(dest)s..." % {'src':path, 'dest':destfile} print "Moving %(src)s to %(dest)s..." % {
"src": path,
"dest": destfile,
}
shutil.move(path, destfile) shutil.move(path, destfile)
else: else:
print "Cannot find file or path: %s" % path print "Cannot find file or path: %s" % path
except Exception as e: except Exception as e:
print "Error: ", e print "Error: ", e
def format_dir_string(path): def format_dir_string(path):
if(path[-1] != '/'): if path[-1] != "/":
path = path+'/' path = path + "/"
return path return path
def helper_get_stor_dir(): def helper_get_stor_dir():
try: try:
res = api_client.list_all_watched_dirs() res = api_client.list_all_watched_dirs()
except Exception, e: except Exception, e:
return res return res
if(res['dirs']['1'][-1] != '/'): if res["dirs"]["1"][-1] != "/":
out = res['dirs']['1']+'/' out = res["dirs"]["1"] + "/"
return out return out
else: else:
return res['dirs']['1'] return res["dirs"]["1"]
def checkOtherOption(args): def checkOtherOption(args):
for i in args: for i in args:
if(i[0] == '-'): if i[0] == "-":
return True return True
def errorIfMultipleOption(args, msg=''):
if(checkOtherOption(args)): def errorIfMultipleOption(args, msg=""):
if(msg != ''): if checkOtherOption(args):
if msg != "":
raise OptionValueError(msg) raise OptionValueError(msg)
else: else:
raise OptionValueError("This option cannot be combined with other options") raise OptionValueError("This option cannot be combined with other options")
def printHelp(): def printHelp():
storage_dir = helper_get_stor_dir() storage_dir = helper_get_stor_dir()
if(storage_dir is None): if storage_dir is None:
storage_dir = "Unknown" storage_dir = "Unknown"
else: else:
storage_dir += "imported/" storage_dir += "imported/"
@ -129,58 +140,70 @@ There are two ways to import audio files into Airtime:
parser.print_help() parser.print_help()
print "" print ""
def CopyAction(option, opt, value, parser): def CopyAction(option, opt, value, parser):
errorIfMultipleOption(parser.rargs) errorIfMultipleOption(parser.rargs)
if(len(parser.rargs) == 0 ): if len(parser.rargs) == 0:
raise OptionValueError("No argument found. This option requires at least one argument.") raise OptionValueError(
"No argument found. This option requires at least one argument."
)
stor = helper_get_stor_dir() stor = helper_get_stor_dir()
if(stor is None): if stor is None:
print "Unable to connect to the Airtime server." print "Unable to connect to the Airtime server."
return return
dest = stor+"organize/" dest = stor + "organize/"
copy_or_move_files_to(parser.rargs, dest, 'copy') copy_or_move_files_to(parser.rargs, dest, "copy")
def MoveAction(option, opt, value, parser): def MoveAction(option, opt, value, parser):
errorIfMultipleOption(parser.rargs) errorIfMultipleOption(parser.rargs)
if(len(parser.rargs) == 0 ): if len(parser.rargs) == 0:
raise OptionValueError("No argument found. This option requires at least one argument.") raise OptionValueError(
"No argument found. This option requires at least one argument."
)
stor = helper_get_stor_dir() stor = helper_get_stor_dir()
if(stor is None): if stor is None:
exit("Unable to connect to the Airtime server.") exit("Unable to connect to the Airtime server.")
dest = stor+"organize/" dest = stor + "organize/"
copy_or_move_files_to(parser.rargs, dest, 'move') copy_or_move_files_to(parser.rargs, dest, "move")
def WatchAddAction(option, opt, value, parser): def WatchAddAction(option, opt, value, parser):
errorIfMultipleOption(parser.rargs) errorIfMultipleOption(parser.rargs)
if(len(parser.rargs) > 1): if len(parser.rargs) > 1:
raise OptionValueError("Too many arguments. This option requires exactly one argument.") raise OptionValueError(
elif(len(parser.rargs) == 0 ): "Too many arguments. This option requires exactly one argument."
raise OptionValueError("No argument found. This option requires exactly one argument.") )
elif len(parser.rargs) == 0:
raise OptionValueError(
"No argument found. This option requires exactly one argument."
)
path = parser.rargs[0] path = parser.rargs[0]
if (path[0] == "/" or path[0] == "~"): if path[0] == "/" or path[0] == "~":
path = os.path.realpath(path) path = os.path.realpath(path)
else: else:
path = currentDir+path path = currentDir + path
path = apc.encode_to(path, 'utf-8') path = apc.encode_to(path, "utf-8")
if(os.path.isdir(path)): if os.path.isdir(path):
#os.chmod(path, 0765) # os.chmod(path, 0765)
try: try:
res = api_client.add_watched_dir(path) res = api_client.add_watched_dir(path)
except Exception, e: except Exception, e:
exit("Unable to connect to the server.") exit("Unable to connect to the server.")
# success # success
if(res['msg']['code'] == 0): if res["msg"]["code"] == 0:
print "%s added to watched folder list successfully" % path print "%s added to watched folder list successfully" % path
else: else:
print "Adding a watched folder failed: %s" % res['msg']['error'] print "Adding a watched folder failed: %s" % res["msg"]["error"]
print "This error most likely caused by wrong permissions" print "This error most likely caused by wrong permissions"
print "Try fixing this error by chmodding the parent directory(ies)" print "Try fixing this error by chmodding the parent directory(ies)"
else: else:
print "Given path is not a directory: %s" % path print "Given path is not a directory: %s" % path
def WatchListAction(option, opt, value, parser): def WatchListAction(option, opt, value, parser):
errorIfMultipleOption(parser.rargs) errorIfMultipleOption(parser.rargs)
if(len(parser.rargs) > 0): if len(parser.rargs) > 0:
raise OptionValueError("This option doesn't take any arguments.") raise OptionValueError("This option doesn't take any arguments.")
try: try:
res = api_client.list_all_watched_dirs() res = api_client.list_all_watched_dirs()
@ -188,120 +211,184 @@ def WatchListAction(option, opt, value, parser):
exit("Unable to connect to the Airtime server.") exit("Unable to connect to the Airtime server.")
dirs = res["dirs"].items() dirs = res["dirs"].items()
# there will be always 1 which is storage folder # there will be always 1 which is storage folder
if(len(dirs) == 1): if len(dirs) == 1:
print "No watch folders found" print "No watch folders found"
else: else:
for key, v in dirs: for key, v in dirs:
if(key != '1'): if key != "1":
print v print v
def WatchRemoveAction(option, opt, value, parser): def WatchRemoveAction(option, opt, value, parser):
errorIfMultipleOption(parser.rargs) errorIfMultipleOption(parser.rargs)
if(len(parser.rargs) > 1): if len(parser.rargs) > 1:
raise OptionValueError("Too many arguments. This option requires exactly one argument.") raise OptionValueError(
elif(len(parser.rargs) == 0 ): "Too many arguments. This option requires exactly one argument."
raise OptionValueError("No argument found. This option requires exactly one argument.") )
elif len(parser.rargs) == 0:
raise OptionValueError(
"No argument found. This option requires exactly one argument."
)
path = parser.rargs[0] path = parser.rargs[0]
if (path[0] == "/" or path[0] == "~"): if path[0] == "/" or path[0] == "~":
path = os.path.realpath(path) path = os.path.realpath(path)
else: else:
path = currentDir+path path = currentDir + path
path = apc.encode_to(path, 'utf-8') path = apc.encode_to(path, "utf-8")
if(os.path.isdir(path)): if os.path.isdir(path):
try: try:
res = api_client.remove_watched_dir(path) res = api_client.remove_watched_dir(path)
except Exception, e: except Exception, e:
exit("Unable to connect to the Airtime server.") exit("Unable to connect to the Airtime server.")
# success # success
if(res['msg']['code'] == 0): if res["msg"]["code"] == 0:
print "%s removed from watch folder list successfully." % path print "%s removed from watch folder list successfully." % path
else: else:
print "Removing the watch folder failed: %s" % res['msg']['error'] print "Removing the watch folder failed: %s" % res["msg"]["error"]
else: else:
print "The given path is not a directory: %s" % path print "The given path is not a directory: %s" % path
def StorageSetAction(option, opt, value, parser): def StorageSetAction(option, opt, value, parser):
bypass = False bypass = False
isF = '-f' in parser.rargs isF = "-f" in parser.rargs
isForce = '--force' in parser.rargs isForce = "--force" in parser.rargs
if(isF or isForce ): if isF or isForce:
bypass = True bypass = True
if(isF): if isF:
parser.rargs.remove('-f') parser.rargs.remove("-f")
if(isForce): if isForce:
parser.rargs.remove('--force') parser.rargs.remove("--force")
if(not bypass): if not bypass:
errorIfMultipleOption(parser.rargs, "Only [-f] and [--force] option is allowed with this option.") errorIfMultipleOption(
possibleInput = ['y','Y','n','N'] parser.rargs, "Only [-f] and [--force] option is allowed with this option."
confirm = raw_input("Are you sure you want to change the storage directory? (y/N)") )
confirm = confirm or 'N' possibleInput = ["y", "Y", "n", "N"]
while(confirm not in possibleInput): confirm = raw_input(
"Are you sure you want to change the storage directory? (y/N)"
)
confirm = confirm or "N"
while confirm not in possibleInput:
print "Not an acceptable input: %s\n" % confirm print "Not an acceptable input: %s\n" % confirm
confirm = raw_input("Are you sure you want to change the storage directory? (y/N) ") confirm = raw_input(
confirm = confirm or 'N' "Are you sure you want to change the storage directory? (y/N) "
if(confirm == 'n' or confirm =='N'): )
confirm = confirm or "N"
if confirm == "n" or confirm == "N":
sys.exit(1) sys.exit(1)
if(len(parser.rargs) > 1): if len(parser.rargs) > 1:
raise OptionValueError("Too many arguments. This option requires exactly one argument.") raise OptionValueError(
elif(len(parser.rargs) == 0 ): "Too many arguments. This option requires exactly one argument."
raise OptionValueError("No argument found. This option requires exactly one argument.") )
elif len(parser.rargs) == 0:
raise OptionValueError(
"No argument found. This option requires exactly one argument."
)
path = parser.rargs[0] path = parser.rargs[0]
if (path[0] == "/" or path[0] == "~"): if path[0] == "/" or path[0] == "~":
path = os.path.realpath(path) path = os.path.realpath(path)
else: else:
path = currentDir+path path = currentDir + path
path = apc.encode_to(path, 'utf-8') path = apc.encode_to(path, "utf-8")
if(os.path.isdir(path)): if os.path.isdir(path):
try: try:
res = api_client.set_storage_dir(path) res = api_client.set_storage_dir(path)
except Exception, e: except Exception, e:
exit("Unable to connect to the Airtime server.") exit("Unable to connect to the Airtime server.")
# success # success
if(res['msg']['code'] == 0): if res["msg"]["code"] == 0:
print "Successfully set storage folder to %s" % path print "Successfully set storage folder to %s" % path
else: else:
print "Setting storage folder failed: %s" % res['msg']['error'] print "Setting storage folder failed: %s" % res["msg"]["error"]
else: else:
print "The given path is not a directory: %s" % path print "The given path is not a directory: %s" % path
def StorageGetAction(option, opt, value, parser): def StorageGetAction(option, opt, value, parser):
errorIfMultipleOption(parser.rargs) errorIfMultipleOption(parser.rargs)
if(len(parser.rargs) > 0): if len(parser.rargs) > 0:
raise OptionValueError("This option does not take any arguments.") raise OptionValueError("This option does not take any arguments.")
print helper_get_stor_dir() print helper_get_stor_dir()
class OptionValueError(RuntimeError): class OptionValueError(RuntimeError):
def __init__(self, msg): def __init__(self, msg):
self.msg = msg self.msg = msg
usage = """[-c|--copy FILE/DIR [FILE/DIR...]] [-m|--move FILE/DIR [FILE/DIR...]] usage = """[-c|--copy FILE/DIR [FILE/DIR...]] [-m|--move FILE/DIR [FILE/DIR...]]
[--watch-add DIR] [--watch-list] [--watch-remove DIR] [--watch-add DIR] [--watch-list] [--watch-remove DIR]
[--storage-dir-set DIR] [--storage-dir-get]""" [--storage-dir-set DIR] [--storage-dir-get]"""
parser = OptionParser(usage=usage, add_help_option=False) parser = OptionParser(usage=usage, add_help_option=False)
parser.add_option('-c','--copy', action='callback', callback=CopyAction, metavar='FILE', help='Copy FILE(s) into the storage directory.\nYou can specify multiple files or directories.') parser.add_option(
parser.add_option('-m','--move', action='callback', callback=MoveAction, metavar='FILE', help='Move FILE(s) into the storage directory.\nYou can specify multiple files or directories.') "-c",
parser.add_option('--watch-add', action='callback', callback=WatchAddAction, help='Add DIR to the watched folders list.') "--copy",
parser.add_option('--watch-list', action='callback', callback=WatchListAction, help='Show the list of folders that are watched.') action="callback",
parser.add_option('--watch-remove', action='callback', callback=WatchRemoveAction, help='Remove DIR from the watched folders list.') callback=CopyAction,
parser.add_option('--storage-dir-set', action='callback', callback=StorageSetAction, help='Set storage dir to DIR.') metavar="FILE",
parser.add_option('--storage-dir-get', action='callback', callback=StorageGetAction, help='Show the current storage dir.') help="Copy FILE(s) into the storage directory.\nYou can specify multiple files or directories.",
parser.add_option('-h', '--help', dest='help', action='store_true', help='show this help message and exit') )
parser.add_option(
"-m",
"--move",
action="callback",
callback=MoveAction,
metavar="FILE",
help="Move FILE(s) into the storage directory.\nYou can specify multiple files or directories.",
)
parser.add_option(
"--watch-add",
action="callback",
callback=WatchAddAction,
help="Add DIR to the watched folders list.",
)
parser.add_option(
"--watch-list",
action="callback",
callback=WatchListAction,
help="Show the list of folders that are watched.",
)
parser.add_option(
"--watch-remove",
action="callback",
callback=WatchRemoveAction,
help="Remove DIR from the watched folders list.",
)
parser.add_option(
"--storage-dir-set",
action="callback",
callback=StorageSetAction,
help="Set storage dir to DIR.",
)
parser.add_option(
"--storage-dir-get",
action="callback",
callback=StorageGetAction,
help="Show the current storage dir.",
)
parser.add_option(
"-h",
"--help",
dest="help",
action="store_true",
help="show this help message and exit",
)
# pop "--dir" # pop "--dir"
#sys.argv.pop(1) # sys.argv.pop(1)
# pop "invoked pwd" # pop "invoked pwd"
currentDir = os.getcwd() #sys.argv.pop(1)+'/' currentDir = os.getcwd() # sys.argv.pop(1)+'/'
if('-l' in sys.argv or '--link' in sys.argv): if "-l" in sys.argv or "--link" in sys.argv:
print "\nThe [-l][--link] option is deprecated. Please use the --watch-add option.\nTry 'airtime-import -h' for more detail.\n" print "\nThe [-l][--link] option is deprecated. Please use the --watch-add option.\nTry 'airtime-import -h' for more detail.\n"
sys.exit() sys.exit()
if('-h' in sys.argv): if "-h" in sys.argv:
printHelp() printHelp()
sys.exit() sys.exit()
if(len(sys.argv) == 1 or '-' not in sys.argv[1]): if len(sys.argv) == 1 or "-" not in sys.argv[1]:
printHelp() printHelp()
sys.exit() sys.exit()
@ -309,10 +396,10 @@ try:
(option, args) = parser.parse_args() (option, args) = parser.parse_args()
except Exception, e: except Exception, e:
printHelp() printHelp()
if hasattr(e, 'msg'): if hasattr(e, "msg"):
print "Error: "+e.msg print "Error: " + e.msg
else: else:
print "Error: ",e print "Error: ", e
sys.exit() sys.exit()
except SystemExit: except SystemExit:
printHelp() printHelp()
@ -321,7 +408,3 @@ except SystemExit:
if option.help: if option.help:
printHelp() printHelp()
sys.exit() sys.exit()

View File

@ -21,14 +21,14 @@ logging.disable(50)
logger.addHandler(ch) logger.addHandler(ch)
if os.geteuid() != 0: if os.geteuid() != 0:
print 'Must be a root user.' print "Must be a root user."
sys.exit(1) sys.exit(1)
# loading config file # loading config file
try: try:
config = ConfigObj('/etc/airtime/airtime.conf') config = ConfigObj("/etc/airtime/airtime.conf")
except Exception, e: except Exception, e:
print('Error loading config file: %s', e) print ("Error loading config file: %s", e)
sys.exit(1) sys.exit(1)
api_client = apc.AirtimeApiClient(config) api_client = apc.AirtimeApiClient(config)
@ -43,25 +43,29 @@ try:
# filepath # filepath
files = api_client.get_files_without_silan_value() files = api_client.get_files_without_silan_value()
total_files = len(files) total_files = len(files)
if total_files == 0: break if total_files == 0:
break
processed_data = [] processed_data = []
total = 0 total = 0
for f in files: for f in files:
full_path = f['fp'] full_path = f["fp"]
# silence detect(set default queue in and out) # silence detect(set default queue in and out)
try: try:
command = ['silan', '-b' '-f', 'JSON', full_path] command = ["silan", "-b" "-f", "JSON", full_path]
proc = subprocess.Popen(command, stdout=subprocess.PIPE) proc = subprocess.Popen(command, stdout=subprocess.PIPE)
out = proc.communicate()[0].strip('\r\n') out = proc.communicate()[0].strip("\r\n")
info = json.loads(out) info = json.loads(out)
data = {} data = {}
data['cuein'] = str('{0:f}'.format(info['sound'][0][0])) data["cuein"] = str("{0:f}".format(info["sound"][0][0]))
data['cueout'] = str('{0:f}'.format(info['sound'][-1][1])) data["cueout"] = str("{0:f}".format(info["sound"][-1][1]))
data['length'] = str('{0:f}'.format(info['file duration'])) data["length"] = str("{0:f}".format(info["file duration"]))
processed_data.append((f['id'], data)) processed_data.append((f["id"], data))
total += 1 total += 1
if total % 5 == 0: if total % 5 == 0:
print "Total %s / %s files has been processed.." % (total, total_files) print "Total %s / %s files has been processed.." % (
total,
total_files,
)
except Exception, e: except Exception, e:
print e print e
print traceback.format_exc() print traceback.format_exc()
@ -70,7 +74,7 @@ try:
try: try:
print api_client.update_cue_values_by_silan(processed_data) print api_client.update_cue_values_by_silan(processed_data)
except Exception ,e: except Exception, e:
print e print e
print traceback.format_exc() print traceback.format_exc()
print "Total %d songs Processed" % subtotal print "Total %d songs Processed" % subtotal

View File

@ -16,32 +16,35 @@ if os.geteuid() == 0:
print "Please run this program as non-root" print "Please run this program as non-root"
sys.exit(1) sys.exit(1)
def printUsage(): def printUsage():
print "airtime-test-soundcard [-v] [-o alsa | ao | oss | portaudio | pulseaudio ] [-h]" print "airtime-test-soundcard [-v] [-o alsa | ao | oss | portaudio | pulseaudio ] [-h]"
print " Where: " print " Where: "
print " -v verbose mode" print " -v verbose mode"
print " -o Linux Sound API (default: alsa)" print " -o Linux Sound API (default: alsa)"
print " -h show help menu " print " -h show help menu "
def find_liquidsoap_binary(): def find_liquidsoap_binary():
""" """
Starting with Airtime 2.0, we don't know the exact location of the Liquidsoap Starting with Airtime 2.0, we don't know the exact location of the Liquidsoap
binary because it may have been installed through a debian package. Let's find binary because it may have been installed through a debian package. Let's find
the location of this binary. the location of this binary.
""" """
rv = subprocess.call("which airtime-liquidsoap > /dev/null", shell=True) rv = subprocess.call("which airtime-liquidsoap > /dev/null", shell=True)
if rv == 0: if rv == 0:
return "airtime-liquidsoap" return "airtime-liquidsoap"
return None return None
try: try:
optlist, args = getopt.getopt(sys.argv[1:], 'hvo:') optlist, args = getopt.getopt(sys.argv[1:], "hvo:")
except getopt.GetoptError, g: except getopt.GetoptError, g:
printUsage() printUsage()
sys.exit(1) sys.exit(1)
sound_api_types = set(["alsa", "ao", "oss", "portaudio", "pulseaudio"]) sound_api_types = set(["alsa", "ao", "oss", "portaudio", "pulseaudio"])
verbose = False verbose = False
@ -63,26 +66,25 @@ for o, a in optlist:
try: try:
print "Sound API: %s" % sound_api print "Sound API: %s" % sound_api
print "Outputting to soundcard. You should be able to hear a monotonous tone. Press ctrl-c to quit." print "Outputting to soundcard. You should be able to hear a monotonous tone. Press ctrl-c to quit."
liquidsoap_exe = find_liquidsoap_binary() liquidsoap_exe = find_liquidsoap_binary()
if liquidsoap_exe is None: if liquidsoap_exe is None:
raise Exception("Liquidsoap not found!") raise Exception("Liquidsoap not found!")
command = "%s 'output.%s(sine())'" % (liquidsoap_exe, sound_api) command = "%s 'output.%s(sine())'" % (liquidsoap_exe, sound_api)
if not verbose: if not verbose:
command += " > /dev/null" command += " > /dev/null"
#print command # print command
rv = subprocess.call(command, shell=True) rv = subprocess.call(command, shell=True)
#if we reach this point, it means that our subprocess exited without the user # if we reach this point, it means that our subprocess exited without the user
#doing a keyboard interrupt. This means there was a problem outputting to the # doing a keyboard interrupt. This means there was a problem outputting to the
#soundcard. Print appropriate message. # soundcard. Print appropriate message.
print "There was an error using the selected sound API. Please select a different API " + \ print "There was an error using the selected sound API. Please select a different API " + "and run this program again. Use the -h option for help"
"and run this program again. Use the -h option for help"
except KeyboardInterrupt, ki: except KeyboardInterrupt, ki:
print "\nExiting" print "\nExiting"
except Exception, e: except Exception, e:

View File

@ -16,6 +16,7 @@ if os.geteuid() == 0:
print "Please run this program as non-root" print "Please run this program as non-root"
sys.exit(1) sys.exit(1)
def printUsage(): def printUsage():
print "airtime-test-stream [-v] [-o icecast | shoutcast ] [-H hostname] [-P port] [-u username] [-p password] [-m mount]" print "airtime-test-stream [-v] [-o icecast | shoutcast ] [-H hostname] [-P port] [-u username] [-p password] [-m mount]"
print " Where: " print " Where: "
@ -42,7 +43,8 @@ def find_liquidsoap_binary():
return None return None
optlist, args = getopt.getopt(sys.argv[1:], 'hvo:H:P:u:p:m:')
optlist, args = getopt.getopt(sys.argv[1:], "hvo:H:P:u:p:m:")
stream_types = set(["shoutcast", "icecast"]) stream_types = set(["shoutcast", "icecast"])
verbose = False verbose = False
@ -89,31 +91,38 @@ try:
print "Mount: %s\n" % mount print "Mount: %s\n" % mount
url = "http://%s:%s/%s" % (host, port, mount) url = "http://%s:%s/%s" % (host, port, mount)
print "Outputting to %s streaming server. You should be able to hear a monotonous tone on '%s'. Press ctrl-c to quit." % (stream_type, url) print "Outputting to %s streaming server. You should be able to hear a monotonous tone on '%s'. Press ctrl-c to quit." % (
stream_type,
url,
)
liquidsoap_exe = find_liquidsoap_binary() liquidsoap_exe = find_liquidsoap_binary()
if liquidsoap_exe is None: if liquidsoap_exe is None:
raise Exception("Liquidsoap not found!") raise Exception("Liquidsoap not found!")
if stream_type == "icecast": if stream_type == "icecast":
command = "%s 'output.icecast(%%vorbis, host = \"%s\", port = %s, user= \"%s\", password = \"%s\", mount=\"%s\", sine())'" % (liquidsoap_exe, host, port, user, password, mount) command = (
'%s \'output.icecast(%%vorbis, host = "%s", port = %s, user= "%s", password = "%s", mount="%s", sine())\''
% (liquidsoap_exe, host, port, user, password, mount)
)
else: else:
command = "%s 'output.shoutcast(%%mp3, host=\"%s\", port = %s, user= \"%s\", password = \"%s\", sine())'" \ command = (
% (liquidsoap_exe, host, port, user, password) '%s \'output.shoutcast(%%mp3, host="%s", port = %s, user= "%s", password = "%s", sine())\''
% (liquidsoap_exe, host, port, user, password)
)
if not verbose: if not verbose:
command += " 2>/dev/null | grep \"failed\"" command += ' 2>/dev/null | grep "failed"'
else: else:
print command print command
#print command # print command
rv = subprocess.call(command, shell=True) rv = subprocess.call(command, shell=True)
#if we reach this point, it means that our subprocess exited without the user # if we reach this point, it means that our subprocess exited without the user
#doing a keyboard interrupt. This means there was a problem outputting to the # doing a keyboard interrupt. This means there was a problem outputting to the
#stream server. Print appropriate message. # stream server. Print appropriate message.
print "There was an error with your stream configuration. Please review your configuration " + \ print "There was an error with your stream configuration. Please review your configuration " + "and run this program again. Use the -h option for help"
"and run this program again. Use the -h option for help"
except KeyboardInterrupt, ki: except KeyboardInterrupt, ki:
print "\nExiting" print "\nExiting"

View File

@ -7,39 +7,45 @@ import requests
from urlparse import urlparse from urlparse import urlparse
import sys import sys
CONFIG_PATH='/etc/airtime/airtime.conf' CONFIG_PATH = "/etc/airtime/airtime.conf"
GENERAL_CONFIG_SECTION = "general" GENERAL_CONFIG_SECTION = "general"
def read_config_file(config_path): def read_config_file(config_path):
"""Parse the application's config file located at config_path.""" """Parse the application's config file located at config_path."""
config = ConfigParser.SafeConfigParser() config = ConfigParser.SafeConfigParser()
try: try:
config.readfp(open(config_path)) config.readfp(open(config_path))
except IOError as e: except IOError as e:
print "Failed to open config file at " + config_path + ": " + e.strerror print "Failed to open config file at " + config_path + ": " + e.strerror
exit(-1) exit(-1)
except Exception: except Exception:
print e.strerror print e.strerror
exit(-1) exit(-1)
return config return config
if __name__ == '__main__':
if __name__ == "__main__":
config = read_config_file(CONFIG_PATH) config = read_config_file(CONFIG_PATH)
api_key = config.get(GENERAL_CONFIG_SECTION, 'api_key') api_key = config.get(GENERAL_CONFIG_SECTION, "api_key")
base_url = config.get(GENERAL_CONFIG_SECTION, 'base_url') base_url = config.get(GENERAL_CONFIG_SECTION, "base_url")
base_dir = config.get(GENERAL_CONFIG_SECTION, 'base_dir') base_dir = config.get(GENERAL_CONFIG_SECTION, "base_dir")
base_port = config.get(GENERAL_CONFIG_SECTION, 'base_port', 80) base_port = config.get(GENERAL_CONFIG_SECTION, "base_port", 80)
action = "upgrade" action = "upgrade"
station_url = "" station_url = ""
default_url = "http://%s:%s%s" % (base_url, base_port, base_dir) default_url = "http://%s:%s%s" % (base_url, base_port, base_dir)
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--downgrade', help='Downgrade the station', action="store_true") parser.add_argument(
parser.add_argument('station_url', help='station URL', nargs='?', default=default_url) "--downgrade", help="Downgrade the station", action="store_true"
)
parser.add_argument(
"station_url", help="station URL", nargs="?", default=default_url
)
args = parser.parse_args() args = parser.parse_args()
if args.downgrade: if args.downgrade:
action = "downgrade" action = "downgrade"
@ -47,12 +53,11 @@ if __name__ == '__main__':
station_url = args.station_url station_url = args.station_url
# Add http:// if you were lazy and didn't pass a scheme to this script # Add http:// if you were lazy and didn't pass a scheme to this script
url = urlparse(station_url) url = urlparse(station_url)
if not url.scheme: if not url.scheme:
station_url = "http://%s" % station_url station_url = "http://%s" % station_url
print "Requesting %s..." % action print "Requesting %s..." % action
r = requests.get("%s/%s" % (station_url, action), auth=(api_key, '')) r = requests.get("%s/%s" % (station_url, action), auth=(api_key, ""))
print r.text print r.text
r.raise_for_status() r.raise_for_status()