Format code using black

This commit is contained in:
jo 2021-05-27 16:23:02 +02:00
parent efe4fa027e
commit c27f020d73
85 changed files with 3238 additions and 2243 deletions

View File

@ -2,7 +2,8 @@
from django.apps import AppConfig
from django.db.models.signals import pre_save
class LibreTimeAPIConfig(AppConfig):
name = 'libretimeapi'
verbose_name = 'LibreTime API'
default_auto_field = 'django.db.models.AutoField'
name = "libretimeapi"
verbose_name = "LibreTime API"
default_auto_field = "django.db.models.AutoField"

View File

@ -1,21 +1,23 @@
# -*- coding: utf-8 -*-
from django.contrib.auth.models import BaseUserManager
class UserManager(BaseUserManager):
def create_user(self, username, type, email, first_name, last_name, password):
user = self.model(username=username,
type=type,
email=email,
first_name=first_name,
last_name=last_name)
user = self.model(
username=username,
type=type,
email=email,
first_name=first_name,
last_name=last_name,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, first_name, last_name, password):
user = self.create_user(username, 'A', email, first_name, last_name, password)
user = self.create_user(username, "A", email, first_name, last_name, password)
return user
def get_by_natural_key(self, username):
return self.get(username=username)

View File

@ -15,18 +15,20 @@ class LoginAttempt(models.Model):
class Meta:
managed = False
db_table = 'cc_login_attempts'
db_table = "cc_login_attempts"
class Session(models.Model):
sessid = models.CharField(primary_key=True, max_length=32)
userid = models.ForeignKey('User', models.DO_NOTHING, db_column='userid', blank=True, null=True)
userid = models.ForeignKey(
"User", models.DO_NOTHING, db_column="userid", blank=True, null=True
)
login = models.CharField(max_length=255, blank=True, null=True)
ts = models.DateTimeField(blank=True, null=True)
class Meta:
managed = False
db_table = 'cc_sess'
db_table = "cc_sess"
USER_TYPE_CHOICES = ()
@ -35,12 +37,14 @@ for item in USER_TYPES.items():
class User(AbstractBaseUser):
username = models.CharField(db_column='login', unique=True, max_length=255)
password = models.CharField(db_column='pass', max_length=255) # Field renamed because it was a Python reserved word.
username = models.CharField(db_column="login", unique=True, max_length=255)
password = models.CharField(
db_column="pass", max_length=255
) # Field renamed because it was a Python reserved word.
type = models.CharField(max_length=1, choices=USER_TYPE_CHOICES)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
last_login = models.DateTimeField(db_column='lastlogin', blank=True, null=True)
last_login = models.DateTimeField(db_column="lastlogin", blank=True, null=True)
lastfail = models.DateTimeField(blank=True, null=True)
skype_contact = models.CharField(max_length=1024, blank=True, null=True)
jabber_contact = models.CharField(max_length=1024, blank=True, null=True)
@ -48,13 +52,13 @@ class User(AbstractBaseUser):
cell_phone = models.CharField(max_length=1024, blank=True, null=True)
login_attempts = models.IntegerField(blank=True, null=True)
USERNAME_FIELD = 'username'
EMAIL_FIELD = 'email'
REQUIRED_FIELDS = ['type', 'email', 'first_name', 'last_name']
USERNAME_FIELD = "username"
EMAIL_FIELD = "email"
REQUIRED_FIELDS = ["type", "email", "first_name", "last_name"]
objects = UserManager()
def get_full_name(self):
return '{} {}'.format(self.first_name, self.last_name)
return "{} {}".format(self.first_name, self.last_name)
def get_short_name(self):
return self.first_name
@ -66,7 +70,7 @@ class User(AbstractBaseUser):
self.password = hashlib.md5(password.encode()).hexdigest()
def is_staff(self):
print('is_staff')
print("is_staff")
return self.type == ADMIN
def check_password(self, password):
@ -82,6 +86,7 @@ class User(AbstractBaseUser):
(managed = True), then this can be replaced with
django.contrib.auth.models.PermissionMixin.
"""
def is_superuser(self):
return self.type == ADMIN
@ -125,7 +130,7 @@ class User(AbstractBaseUser):
class Meta:
managed = False
db_table = 'cc_subjs'
db_table = "cc_subjs"
class UserToken(models.Model):
@ -139,4 +144,4 @@ class UserToken(models.Model):
class Meta:
managed = False
db_table = 'cc_subjs_token'
db_table = "cc_subjs_token"

View File

@ -4,11 +4,13 @@ from django.db import models
class CeleryTask(models.Model):
task_id = models.CharField(max_length=256)
track_reference = models.ForeignKey('ThirdPartyTrackReference', models.DO_NOTHING, db_column='track_reference')
track_reference = models.ForeignKey(
"ThirdPartyTrackReference", models.DO_NOTHING, db_column="track_reference"
)
name = models.CharField(max_length=256, blank=True, null=True)
dispatch_time = models.DateTimeField(blank=True, null=True)
status = models.CharField(max_length=256)
class Meta:
managed = False
db_table = 'celery_tasks'
db_table = "celery_tasks"

View File

@ -8,5 +8,4 @@ class Country(models.Model):
class Meta:
managed = False
db_table = 'cc_country'
db_table = "cc_country"

View File

@ -6,11 +6,20 @@ class File(models.Model):
name = models.CharField(max_length=255)
mime = models.CharField(max_length=255)
ftype = models.CharField(max_length=128)
directory = models.ForeignKey('MusicDir', models.DO_NOTHING, db_column='directory', blank=True, null=True)
directory = models.ForeignKey(
"MusicDir", models.DO_NOTHING, db_column="directory", blank=True, null=True
)
filepath = models.TextField(blank=True, null=True)
import_status = models.IntegerField()
currently_accessing = models.IntegerField(db_column='currentlyaccessing')
edited_by = models.ForeignKey('User', models.DO_NOTHING, db_column='editedby', blank=True, null=True, related_name='edited_files')
currently_accessing = models.IntegerField(db_column="currentlyaccessing")
edited_by = models.ForeignKey(
"User",
models.DO_NOTHING,
db_column="editedby",
blank=True,
null=True,
related_name="edited_files",
)
mtime = models.DateTimeField(blank=True, null=True)
utime = models.DateTimeField(blank=True, null=True)
lptime = models.DateTimeField(blank=True, null=True)
@ -59,8 +68,10 @@ class File(models.Model):
contributor = models.CharField(max_length=512, blank=True, null=True)
language = models.CharField(max_length=512, blank=True, null=True)
file_exists = models.BooleanField(blank=True, null=True)
replay_gain = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True)
owner = models.ForeignKey('User', models.DO_NOTHING, blank=True, null=True)
replay_gain = models.DecimalField(
max_digits=8, decimal_places=2, blank=True, null=True
)
owner = models.ForeignKey("User", models.DO_NOTHING, blank=True, null=True)
cuein = models.DurationField(blank=True, null=True)
cueout = models.DurationField(blank=True, null=True)
silan_check = models.BooleanField(blank=True, null=True)
@ -77,10 +88,10 @@ class File(models.Model):
class Meta:
managed = False
db_table = 'cc_files'
db_table = "cc_files"
permissions = [
('change_own_file', 'Change the files where they are the owner'),
('delete_own_file', 'Delete the files where they are the owner'),
("change_own_file", "Change the files where they are the owner"),
("delete_own_file", "Delete the files where they are the owner"),
]
@ -92,15 +103,16 @@ class MusicDir(models.Model):
class Meta:
managed = False
db_table = 'cc_music_dirs'
db_table = "cc_music_dirs"
class CloudFile(models.Model):
storage_backend = models.CharField(max_length=512)
resource_id = models.TextField()
filename = models.ForeignKey(File, models.DO_NOTHING, blank=True, null=True,
db_column='cc_file_id')
filename = models.ForeignKey(
File, models.DO_NOTHING, blank=True, null=True, db_column="cc_file_id"
)
class Meta:
managed = False
db_table = 'cloud_file'
db_table = "cloud_file"

View File

@ -8,7 +8,7 @@ class Playlist(models.Model):
name = models.CharField(max_length=255)
mtime = models.DateTimeField(blank=True, null=True)
utime = models.DateTimeField(blank=True, null=True)
creator = models.ForeignKey('User', models.DO_NOTHING, blank=True, null=True)
creator = models.ForeignKey("User", models.DO_NOTHING, blank=True, null=True)
description = models.CharField(max_length=512, blank=True, null=True)
length = models.DurationField(blank=True, null=True)
@ -17,7 +17,7 @@ class Playlist(models.Model):
class Meta:
managed = False
db_table = 'cc_playlist'
db_table = "cc_playlist"
class PlaylistContent(models.Model):
@ -39,4 +39,4 @@ class PlaylistContent(models.Model):
class Meta:
managed = False
db_table = 'cc_playlistcontents'
db_table = "cc_playlistcontents"

View File

@ -4,13 +4,13 @@ from .files import File
class ListenerCount(models.Model):
timestamp = models.ForeignKey('Timestamp', models.DO_NOTHING)
mount_name = models.ForeignKey('MountName', models.DO_NOTHING)
timestamp = models.ForeignKey("Timestamp", models.DO_NOTHING)
mount_name = models.ForeignKey("MountName", models.DO_NOTHING)
listener_count = models.IntegerField()
class Meta:
managed = False
db_table = 'cc_listener_count'
db_table = "cc_listener_count"
class LiveLog(models.Model):
@ -20,18 +20,20 @@ class LiveLog(models.Model):
class Meta:
managed = False
db_table = 'cc_live_log'
db_table = "cc_live_log"
class PlayoutHistory(models.Model):
file = models.ForeignKey(File, models.DO_NOTHING, blank=True, null=True)
starts = models.DateTimeField()
ends = models.DateTimeField(blank=True, null=True)
instance = models.ForeignKey('ShowInstance', models.DO_NOTHING, blank=True, null=True)
instance = models.ForeignKey(
"ShowInstance", models.DO_NOTHING, blank=True, null=True
)
class Meta:
managed = False
db_table = 'cc_playout_history'
db_table = "cc_playout_history"
class PlayoutHistoryMetadata(models.Model):
@ -41,7 +43,7 @@ class PlayoutHistoryMetadata(models.Model):
class Meta:
managed = False
db_table = 'cc_playout_history_metadata'
db_table = "cc_playout_history_metadata"
class PlayoutHistoryTemplate(models.Model):
@ -50,7 +52,7 @@ class PlayoutHistoryTemplate(models.Model):
class Meta:
managed = False
db_table = 'cc_playout_history_template'
db_table = "cc_playout_history_template"
class PlayoutHistoryTemplateField(models.Model):
@ -63,7 +65,7 @@ class PlayoutHistoryTemplateField(models.Model):
class Meta:
managed = False
db_table = 'cc_playout_history_template_field'
db_table = "cc_playout_history_template_field"
class Timestamp(models.Model):
@ -71,4 +73,4 @@ class Timestamp(models.Model):
class Meta:
managed = False
db_table = 'cc_timestamp'
db_table = "cc_timestamp"

View File

@ -8,14 +8,14 @@ class ImportedPodcast(models.Model):
auto_ingest = models.BooleanField()
auto_ingest_timestamp = models.DateTimeField(blank=True, null=True)
album_override = models.BooleanField()
podcast = models.ForeignKey('Podcast', models.DO_NOTHING)
podcast = models.ForeignKey("Podcast", models.DO_NOTHING)
def get_owner(self):
return self.podcast.owner
class Meta:
managed = False
db_table = 'imported_podcast'
db_table = "imported_podcast"
class Podcast(models.Model):
@ -32,17 +32,19 @@ class Podcast(models.Model):
itunes_subtitle = models.CharField(max_length=4096, blank=True, null=True)
itunes_category = models.CharField(max_length=4096, blank=True, null=True)
itunes_explicit = models.CharField(max_length=4096, blank=True, null=True)
owner = models.ForeignKey(User, models.DO_NOTHING, db_column='owner', blank=True, null=True)
owner = models.ForeignKey(
User, models.DO_NOTHING, db_column="owner", blank=True, null=True
)
def get_owner(self):
return self.owner
class Meta:
managed = False
db_table = 'podcast'
db_table = "podcast"
permissions = [
('change_own_podcast', 'Change the podcasts where they are the owner'),
('delete_own_podcast', 'Delete the podcasts where they are the owner'),
("change_own_podcast", "Change the podcasts where they are the owner"),
("delete_own_podcast", "Delete the podcasts where they are the owner"),
]
@ -60,10 +62,16 @@ class PodcastEpisode(models.Model):
class Meta:
managed = False
db_table = 'podcast_episodes'
db_table = "podcast_episodes"
permissions = [
('change_own_podcastepisode', 'Change the episodes of podcasts where they are the owner'),
('delete_own_podcastepisode', 'Delete the episodes of podcasts where they are the owner'),
(
"change_own_podcastepisode",
"Change the episodes of podcasts where they are the owner",
),
(
"delete_own_podcastepisode",
"Delete the episodes of podcasts where they are the owner",
),
]
@ -75,4 +83,4 @@ class StationPodcast(models.Model):
class Meta:
managed = False
db_table = 'station_podcast'
db_table = "station_podcast"

View File

@ -3,14 +3,16 @@ from django.db import models
class Preference(models.Model):
subjid = models.ForeignKey('User', models.DO_NOTHING, db_column='subjid', blank=True, null=True)
subjid = models.ForeignKey(
"User", models.DO_NOTHING, db_column="subjid", blank=True, null=True
)
keystr = models.CharField(unique=True, max_length=255, blank=True, null=True)
valstr = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = 'cc_pref'
unique_together = (('subjid', 'keystr'),)
db_table = "cc_pref"
unique_together = (("subjid", "keystr"),)
class MountName(models.Model):
@ -18,7 +20,7 @@ class MountName(models.Model):
class Meta:
managed = False
db_table = 'cc_mount_name'
db_table = "cc_mount_name"
class StreamSetting(models.Model):
@ -28,4 +30,4 @@ class StreamSetting(models.Model):
class Meta:
managed = False
db_table = 'cc_stream_setting'
db_table = "cc_stream_setting"

View File

@ -7,14 +7,14 @@ class Schedule(models.Model):
starts = models.DateTimeField()
ends = models.DateTimeField()
file = models.ForeignKey(File, models.DO_NOTHING, blank=True, null=True)
stream = models.ForeignKey('Webstream', models.DO_NOTHING, blank=True, null=True)
stream = models.ForeignKey("Webstream", models.DO_NOTHING, blank=True, null=True)
clip_length = models.DurationField(blank=True, null=True)
fade_in = models.TimeField(blank=True, null=True)
fade_out = models.TimeField(blank=True, null=True)
cue_in = models.DurationField()
cue_out = models.DurationField()
media_item_played = models.BooleanField(blank=True, null=True)
instance = models.ForeignKey('ShowInstance', models.DO_NOTHING)
instance = models.ForeignKey("ShowInstance", models.DO_NOTHING)
playout_status = models.SmallIntegerField()
broadcasted = models.SmallIntegerField()
position = models.IntegerField()
@ -24,8 +24,8 @@ class Schedule(models.Model):
class Meta:
managed = False
db_table = 'cc_schedule'
db_table = "cc_schedule"
permissions = [
('change_own_schedule', 'Change the content on their shows'),
('delete_own_schedule', 'Delete the content on their shows'),
("change_own_schedule", "Change the content on their shows"),
("delete_own_schedule", "Delete the content on their shows"),
]

View File

@ -8,5 +8,4 @@ class ServiceRegister(models.Model):
class Meta:
managed = False
db_table = 'cc_service_register'
db_table = "cc_service_register"

View File

@ -27,7 +27,7 @@ class Show(models.Model):
class Meta:
managed = False
db_table = 'cc_show'
db_table = "cc_show"
class ShowDays(models.Model):
@ -47,16 +47,16 @@ class ShowDays(models.Model):
class Meta:
managed = False
db_table = 'cc_show_days'
db_table = "cc_show_days"
class ShowHost(models.Model):
show = models.ForeignKey(Show, models.DO_NOTHING)
subjs = models.ForeignKey('User', models.DO_NOTHING)
subjs = models.ForeignKey("User", models.DO_NOTHING)
class Meta:
managed = False
db_table = 'cc_show_hosts'
db_table = "cc_show_hosts"
class ShowInstance(models.Model):
@ -66,7 +66,7 @@ class ShowInstance(models.Model):
show = models.ForeignKey(Show, models.DO_NOTHING)
record = models.SmallIntegerField(blank=True, null=True)
rebroadcast = models.SmallIntegerField(blank=True, null=True)
instance = models.ForeignKey('self', models.DO_NOTHING, blank=True, null=True)
instance = models.ForeignKey("self", models.DO_NOTHING, blank=True, null=True)
file = models.ForeignKey(File, models.DO_NOTHING, blank=True, null=True)
time_filled = models.DurationField(blank=True, null=True)
created = models.DateTimeField()
@ -79,7 +79,7 @@ class ShowInstance(models.Model):
class Meta:
managed = False
db_table = 'cc_show_instances'
db_table = "cc_show_instances"
class ShowRebroadcast(models.Model):
@ -92,4 +92,4 @@ class ShowRebroadcast(models.Model):
class Meta:
managed = False
db_table = 'cc_show_rebroadcast'
db_table = "cc_show_rebroadcast"

View File

@ -6,7 +6,7 @@ class SmartBlock(models.Model):
name = models.CharField(max_length=255)
mtime = models.DateTimeField(blank=True, null=True)
utime = models.DateTimeField(blank=True, null=True)
creator = models.ForeignKey('User', models.DO_NOTHING, blank=True, null=True)
creator = models.ForeignKey("User", models.DO_NOTHING, blank=True, null=True)
description = models.CharField(max_length=512, blank=True, null=True)
length = models.DurationField(blank=True, null=True)
type = models.CharField(max_length=7, blank=True, null=True)
@ -16,16 +16,22 @@ class SmartBlock(models.Model):
class Meta:
managed = False
db_table = 'cc_block'
db_table = "cc_block"
permissions = [
('change_own_smartblock', 'Change the smartblocks where they are the owner'),
('delete_own_smartblock', 'Delete the smartblocks where they are the owner'),
(
"change_own_smartblock",
"Change the smartblocks where they are the owner",
),
(
"delete_own_smartblock",
"Delete the smartblocks where they are the owner",
),
]
class SmartBlockContent(models.Model):
block = models.ForeignKey(SmartBlock, models.DO_NOTHING, blank=True, null=True)
file = models.ForeignKey('File', models.DO_NOTHING, blank=True, null=True)
file = models.ForeignKey("File", models.DO_NOTHING, blank=True, null=True)
position = models.IntegerField(blank=True, null=True)
trackoffset = models.FloatField()
cliplength = models.DurationField(blank=True, null=True)
@ -39,10 +45,16 @@ class SmartBlockContent(models.Model):
class Meta:
managed = False
db_table = 'cc_blockcontents'
db_table = "cc_blockcontents"
permissions = [
('change_own_smartblockcontent', 'Change the content of smartblocks where they are the owner'),
('delete_own_smartblockcontent', 'Delete the content of smartblocks where they are the owner'),
(
"change_own_smartblockcontent",
"Change the content of smartblocks where they are the owner",
),
(
"delete_own_smartblockcontent",
"Delete the content of smartblocks where they are the owner",
),
]
@ -59,9 +71,14 @@ class SmartBlockCriteria(models.Model):
class Meta:
managed = False
db_table = 'cc_blockcriteria'
db_table = "cc_blockcriteria"
permissions = [
('change_own_smartblockcriteria', 'Change the criteria of smartblocks where they are the owner'),
('delete_own_smartblockcriteria', 'Delete the criteria of smartblocks where they are the owner'),
(
"change_own_smartblockcriteria",
"Change the criteria of smartblocks where they are the owner",
),
(
"delete_own_smartblockcriteria",
"Delete the criteria of smartblocks where they are the owner",
),
]

View File

@ -12,7 +12,8 @@ class ThirdPartyTrackReference(models.Model):
class Meta:
managed = False
db_table = 'third_party_track_references'
db_table = "third_party_track_references"
class TrackType(models.Model):
code = models.CharField(max_length=16, unique=True)
@ -22,5 +23,4 @@ class TrackType(models.Model):
class Meta:
managed = False
db_table = 'cc_track_types'
db_table = "cc_track_types"

View File

@ -1,12 +1,12 @@
# -*- coding: utf-8 -*-
GUEST = 'G'
DJ = 'H'
PROGRAM_MANAGER = 'P'
ADMIN = 'A'
GUEST = "G"
DJ = "H"
PROGRAM_MANAGER = "P"
ADMIN = "A"
USER_TYPES = {
GUEST: 'Guest',
DJ: 'DJ',
PROGRAM_MANAGER: 'Program Manager',
ADMIN: 'Admin',
GUEST: "Guest",
DJ: "DJ",
PROGRAM_MANAGER: "Program Manager",
ADMIN: "Admin",
}

View File

@ -21,10 +21,10 @@ class Webstream(models.Model):
class Meta:
managed = False
db_table = 'cc_webstream'
db_table = "cc_webstream"
permissions = [
('change_own_webstream', 'Change the webstreams where they are the owner'),
('delete_own_webstream', 'Delete the webstreams where they are the owner'),
("change_own_webstream", "Change the webstreams where they are the owner"),
("delete_own_webstream", "Delete the webstreams where they are the owner"),
]
@ -38,4 +38,4 @@ class WebstreamMetadata(models.Model):
class Meta:
managed = False
db_table = 'cc_webstream_metadata'
db_table = "cc_webstream_metadata"

View File

@ -5,98 +5,101 @@ from .models.user_constants import GUEST, DJ, PROGRAM_MANAGER, USER_TYPES
logger = logging.getLogger(__name__)
GUEST_PERMISSIONS = ['view_schedule',
'view_show',
'view_showdays',
'view_showhost',
'view_showinstance',
'view_showrebroadcast',
'view_file',
'view_podcast',
'view_podcastepisode',
'view_playlist',
'view_playlistcontent',
'view_smartblock',
'view_smartblockcontent',
'view_smartblockcriteria',
'view_webstream',
'view_apiroot',
]
DJ_PERMISSIONS = GUEST_PERMISSIONS + ['add_file',
'add_podcast',
'add_podcastepisode',
'add_playlist',
'add_playlistcontent',
'add_smartblock',
'add_smartblockcontent',
'add_smartblockcriteria',
'add_webstream',
'change_own_schedule',
'change_own_file',
'change_own_podcast',
'change_own_podcastepisode',
'change_own_playlist',
'change_own_playlistcontent',
'change_own_smartblock',
'change_own_smartblockcontent',
'change_own_smartblockcriteria',
'change_own_webstream',
'delete_own_schedule',
'delete_own_file',
'delete_own_podcast',
'delete_own_podcastepisode',
'delete_own_playlist',
'delete_own_playlistcontent',
'delete_own_smartblock',
'delete_own_smartblockcontent',
'delete_own_smartblockcriteria',
'delete_own_webstream',
]
PROGRAM_MANAGER_PERMISSIONS = GUEST_PERMISSIONS + ['add_show',
'add_showdays',
'add_showhost',
'add_showinstance',
'add_showrebroadcast',
'add_file',
'add_podcast',
'add_podcastepisode',
'add_playlist',
'add_playlistcontent',
'add_smartblock',
'add_smartblockcontent',
'add_smartblockcriteria',
'add_webstream',
'change_schedule',
'change_show',
'change_showdays',
'change_showhost',
'change_showinstance',
'change_showrebroadcast',
'change_file',
'change_podcast',
'change_podcastepisode',
'change_playlist',
'change_playlistcontent',
'change_smartblock',
'change_smartblockcontent',
'change_smartblockcriteria',
'change_webstream',
'delete_schedule',
'delete_show',
'delete_showdays',
'delete_showhost',
'delete_showinstance',
'delete_showrebroadcast',
'delete_file',
'delete_podcast',
'delete_podcastepisode',
'delete_playlist',
'delete_playlistcontent',
'delete_smartblock',
'delete_smartblockcontent',
'delete_smartblockcriteria',
'delete_webstream',
]
GUEST_PERMISSIONS = [
"view_schedule",
"view_show",
"view_showdays",
"view_showhost",
"view_showinstance",
"view_showrebroadcast",
"view_file",
"view_podcast",
"view_podcastepisode",
"view_playlist",
"view_playlistcontent",
"view_smartblock",
"view_smartblockcontent",
"view_smartblockcriteria",
"view_webstream",
"view_apiroot",
]
DJ_PERMISSIONS = GUEST_PERMISSIONS + [
"add_file",
"add_podcast",
"add_podcastepisode",
"add_playlist",
"add_playlistcontent",
"add_smartblock",
"add_smartblockcontent",
"add_smartblockcriteria",
"add_webstream",
"change_own_schedule",
"change_own_file",
"change_own_podcast",
"change_own_podcastepisode",
"change_own_playlist",
"change_own_playlistcontent",
"change_own_smartblock",
"change_own_smartblockcontent",
"change_own_smartblockcriteria",
"change_own_webstream",
"delete_own_schedule",
"delete_own_file",
"delete_own_podcast",
"delete_own_podcastepisode",
"delete_own_playlist",
"delete_own_playlistcontent",
"delete_own_smartblock",
"delete_own_smartblockcontent",
"delete_own_smartblockcriteria",
"delete_own_webstream",
]
PROGRAM_MANAGER_PERMISSIONS = GUEST_PERMISSIONS + [
"add_show",
"add_showdays",
"add_showhost",
"add_showinstance",
"add_showrebroadcast",
"add_file",
"add_podcast",
"add_podcastepisode",
"add_playlist",
"add_playlistcontent",
"add_smartblock",
"add_smartblockcontent",
"add_smartblockcriteria",
"add_webstream",
"change_schedule",
"change_show",
"change_showdays",
"change_showhost",
"change_showinstance",
"change_showrebroadcast",
"change_file",
"change_podcast",
"change_podcastepisode",
"change_playlist",
"change_playlistcontent",
"change_smartblock",
"change_smartblockcontent",
"change_smartblockcriteria",
"change_webstream",
"delete_schedule",
"delete_show",
"delete_showdays",
"delete_showhost",
"delete_showinstance",
"delete_showrebroadcast",
"delete_file",
"delete_podcast",
"delete_podcastepisode",
"delete_playlist",
"delete_playlistcontent",
"delete_smartblock",
"delete_smartblockcontent",
"delete_smartblockcriteria",
"delete_webstream",
]
GROUPS = {
GUEST: GUEST_PERMISSIONS,

View File

@ -4,21 +4,22 @@ from django.conf import settings
from .models.user_constants import DJ
REQUEST_PERMISSION_TYPE_MAP = {
'GET': 'view',
'HEAD': 'view',
'OPTIONS': 'view',
'POST': 'change',
'PUT': 'change',
'DELETE': 'delete',
'PATCH': 'change',
"GET": "view",
"HEAD": "view",
"OPTIONS": "view",
"POST": "change",
"PUT": "change",
"DELETE": "delete",
"PATCH": "change",
}
def get_own_obj(request, view):
user = request.user
if user is None or user.type != DJ:
return ''
if request.method == 'GET':
return ''
return ""
if request.method == "GET":
return ""
qs = view.queryset.all()
try:
model_owners = []
@ -27,32 +28,34 @@ def get_own_obj(request, view):
if owner not in model_owners:
model_owners.append(owner)
if len(model_owners) == 1 and user in model_owners:
return 'own_'
return "own_"
except AttributeError:
return ''
return ''
return ""
return ""
def get_permission_for_view(request, view):
try:
permission_type = REQUEST_PERMISSION_TYPE_MAP[request.method]
if view.__class__.__name__ == 'APIRootView':
return '{}_apiroot'.format(permission_type)
if view.__class__.__name__ == "APIRootView":
return "{}_apiroot".format(permission_type)
model = view.model_permission_name
own_obj = get_own_obj(request, view)
return '{permission_type}_{own_obj}{model}'.format(permission_type=permission_type,
own_obj=own_obj,
model=model)
return "{permission_type}_{own_obj}{model}".format(
permission_type=permission_type, own_obj=own_obj, model=model
)
except AttributeError:
return None
def check_authorization_header(request):
auth_header = request.META.get('Authorization')
if not auth_header:
auth_header = request.META.get('HTTP_AUTHORIZATION', '')
if auth_header.startswith('Api-Key'):
def check_authorization_header(request):
auth_header = request.META.get("Authorization")
if not auth_header:
auth_header = request.META.get("HTTP_AUTHORIZATION", "")
if auth_header.startswith("Api-Key"):
token = auth_header.split()[1]
if token == settings.CONFIG.get('general', 'api_key'):
if token == settings.CONFIG.get("general", "api_key"):
return True
return False
@ -63,6 +66,7 @@ class IsAdminOrOwnUser(BasePermission):
Django's standard permission system. For details see
https://www.django-rest-framework.org/api-guide/permissions/#custom-permissions
"""
def has_permission(self, request, view):
if request.user.is_superuser():
return True
@ -84,6 +88,7 @@ class IsSystemTokenOrUser(BasePermission):
an API-Key header. All standard-users (i.e. not using the API-Key) have their
permissions checked against Django's standard permission system.
"""
def has_permission(self, request, view):
if request.user and request.user.is_authenticated:
perm = get_permission_for_view(request, view)
@ -91,7 +96,7 @@ class IsSystemTokenOrUser(BasePermission):
# model. This use-case allows users to view the base of the API
# explorer. Their assigned group permissions determine further access
# into the explorer.
if perm == 'view_apiroot':
if perm == "view_apiroot":
return True
return request.user.has_perm(perm)
return check_authorization_header(request)

View File

@ -3,264 +3,305 @@ from django.contrib.auth import get_user_model
from rest_framework import serializers
from .models import *
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = get_user_model()
fields = [
'item_url',
'username',
'type',
'first_name',
'last_name',
'lastfail',
'skype_contact',
'jabber_contact',
'email',
'cell_phone',
'login_attempts',
"item_url",
"username",
"type",
"first_name",
"last_name",
"lastfail",
"skype_contact",
"jabber_contact",
"email",
"cell_phone",
"login_attempts",
]
class SmartBlockSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = SmartBlock
fields = '__all__'
fields = "__all__"
class SmartBlockContentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = SmartBlockContent
fields = '__all__'
fields = "__all__"
class SmartBlockCriteriaSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = SmartBlockCriteria
fields = '__all__'
fields = "__all__"
class CountrySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Country
fields = '__all__'
fields = "__all__"
class FileSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = File
fields = '__all__'
fields = "__all__"
class ListenerCountSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ListenerCount
fields = '__all__'
fields = "__all__"
class LiveLogSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = LiveLog
fields = '__all__'
fields = "__all__"
class LoginAttemptSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = LoginAttempt
fields = '__all__'
fields = "__all__"
class MountNameSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = MountName
fields = '__all__'
fields = "__all__"
class MusicDirSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = MusicDir
fields = '__all__'
fields = "__all__"
class PlaylistSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Playlist
fields = '__all__'
fields = "__all__"
class PlaylistContentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = PlaylistContent
fields = '__all__'
fields = "__all__"
class PlayoutHistorySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = PlayoutHistory
fields = '__all__'
fields = "__all__"
class PlayoutHistoryMetadataSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = PlayoutHistoryMetadata
fields = '__all__'
fields = "__all__"
class PlayoutHistoryTemplateSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = PlayoutHistoryTemplate
fields = '__all__'
fields = "__all__"
class PlayoutHistoryTemplateFieldSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = PlayoutHistoryTemplateField
fields = '__all__'
fields = "__all__"
class PreferenceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Preference
fields = '__all__'
fields = "__all__"
class ScheduleSerializer(serializers.HyperlinkedModelSerializer):
file_id = serializers.IntegerField(source='file.id', read_only=True)
stream_id = serializers.IntegerField(source='stream.id', read_only=True)
instance_id = serializers.IntegerField(source='instance.id', read_only=True)
file_id = serializers.IntegerField(source="file.id", read_only=True)
stream_id = serializers.IntegerField(source="stream.id", read_only=True)
instance_id = serializers.IntegerField(source="instance.id", read_only=True)
class Meta:
model = Schedule
fields = [
'item_url',
'id',
'starts',
'ends',
'clip_length',
'fade_in',
'fade_out',
'cue_in',
'cue_out',
'media_item_played',
'file',
'file_id',
'stream',
'stream_id',
'instance',
'instance_id',
"item_url",
"id",
"starts",
"ends",
"clip_length",
"fade_in",
"fade_out",
"cue_in",
"cue_out",
"media_item_played",
"file",
"file_id",
"stream",
"stream_id",
"instance",
"instance_id",
]
class ServiceRegisterSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ServiceRegister
fields = '__all__'
fields = "__all__"
class SessionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Session
fields = '__all__'
fields = "__all__"
class ShowSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Show
fields = [
'item_url',
'id',
'name',
'url',
'genre',
'description',
'color',
'background_color',
'linked',
'is_linkable',
'image_path',
'has_autoplaylist',
'autoplaylist_repeat',
'autoplaylist',
"item_url",
"id",
"name",
"url",
"genre",
"description",
"color",
"background_color",
"linked",
"is_linkable",
"image_path",
"has_autoplaylist",
"autoplaylist_repeat",
"autoplaylist",
]
class ShowDaysSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ShowDays
fields = '__all__'
fields = "__all__"
class ShowHostSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ShowHost
fields = '__all__'
fields = "__all__"
class ShowInstanceSerializer(serializers.HyperlinkedModelSerializer):
show_id = serializers.IntegerField(source='show.id', read_only=True)
file_id = serializers.IntegerField(source='file.id', read_only=True)
show_id = serializers.IntegerField(source="show.id", read_only=True)
file_id = serializers.IntegerField(source="file.id", read_only=True)
class Meta:
model = ShowInstance
fields = [
'item_url',
'id',
'description',
'starts',
'ends',
'record',
'rebroadcast',
'time_filled',
'created',
'last_scheduled',
'modified_instance',
'autoplaylist_built',
'show',
'show_id',
'instance',
'file',
'file_id',
"item_url",
"id",
"description",
"starts",
"ends",
"record",
"rebroadcast",
"time_filled",
"created",
"last_scheduled",
"modified_instance",
"autoplaylist_built",
"show",
"show_id",
"instance",
"file",
"file_id",
]
class ShowRebroadcastSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ShowRebroadcast
fields = '__all__'
fields = "__all__"
class StreamSettingSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = StreamSetting
fields = '__all__'
fields = "__all__"
class UserTokenSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UserToken
fields = '__all__'
fields = "__all__"
class TimestampSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Timestamp
fields = '__all__'
fields = "__all__"
class WebstreamSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Webstream
fields = '__all__'
fields = "__all__"
class WebstreamMetadataSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = WebstreamMetadata
fields = '__all__'
fields = "__all__"
class CeleryTaskSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = CeleryTask
fields = '__all__'
fields = "__all__"
class CloudFileSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = CloudFile
fields = '__all__'
fields = "__all__"
class ImportedPodcastSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ImportedPodcast
fields = '__all__'
fields = "__all__"
class PodcastSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Podcast
fields = '__all__'
fields = "__all__"
class PodcastEpisodeSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = PodcastEpisode
fields = '__all__'
fields = "__all__"
class StationPodcastSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = StationPodcast
fields = '__all__'
fields = "__all__"
class ThirdPartyTrackReferenceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ThirdPartyTrackReference
fields = '__all__'
fields = "__all__"
class TrackTypeSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = TrackType
fields = '__all__'
fields = "__all__"

View File

@ -3,10 +3,11 @@ import configparser
import os
from .utils import read_config_file, get_random_string
LIBRETIME_CONF_DIR = os.getenv('LIBRETIME_CONF_DIR', '/etc/airtime')
DEFAULT_CONFIG_PATH = os.getenv('LIBRETIME_CONF_FILE',
os.path.join(LIBRETIME_CONF_DIR, 'airtime.conf'))
API_VERSION = '2.0.0'
LIBRETIME_CONF_DIR = os.getenv("LIBRETIME_CONF_DIR", "/etc/airtime")
DEFAULT_CONFIG_PATH = os.getenv(
"LIBRETIME_CONF_FILE", os.path.join(LIBRETIME_CONF_DIR, "airtime.conf")
)
API_VERSION = "2.0.0"
try:
CONFIG = read_config_file(DEFAULT_CONFIG_PATH)
@ -18,70 +19,70 @@ except IOError:
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_random_string(CONFIG.get('general', 'api_key', fallback=''))
SECRET_KEY = get_random_string(CONFIG.get("general", "api_key", fallback=""))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('LIBRETIME_DEBUG', False)
DEBUG = os.getenv("LIBRETIME_DEBUG", False)
ALLOWED_HOSTS = ['*']
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'libretimeapi.apps.LibreTimeAPIConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'url_filter',
"libretimeapi.apps.LibreTimeAPIConfig",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"url_filter",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = 'libretimeapi.urls'
ROOT_URLCONF = "libretimeapi.urls"
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = 'libretimeapi.wsgi.application'
WSGI_APPLICATION = "libretimeapi.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': CONFIG.get('database', 'dbname', fallback=''),
'USER': CONFIG.get('database', 'dbuser', fallback=''),
'PASSWORD': CONFIG.get('database', 'dbpass', fallback=''),
'HOST': CONFIG.get('database', 'host', fallback=''),
'PORT': '5432',
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": CONFIG.get("database", "dbname", fallback=""),
"USER": CONFIG.get("database", "dbuser", fallback=""),
"PASSWORD": CONFIG.get("database", "dbpass", fallback=""),
"HOST": CONFIG.get("database", "host", fallback=""),
"PORT": "5432",
}
}
@ -91,40 +92,40 @@ DATABASES = {
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.BasicAuthentication",
),
'DEFAULT_PERMISSION_CLASSES': [
'libretimeapi.permissions.IsSystemTokenOrUser',
"DEFAULT_PERMISSION_CLASSES": [
"libretimeapi.permissions.IsSystemTokenOrUser",
],
'DEFAULT_FILTER_BACKENDS': [
'url_filter.integrations.drf.DjangoFilterBackend',
"DEFAULT_FILTER_BACKENDS": [
"url_filter.integrations.drf.DjangoFilterBackend",
],
'URL_FIELD_NAME': 'item_url',
"URL_FIELD_NAME": "item_url",
}
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = "en-us"
TIME_ZONE = 'UTC'
TIME_ZONE = "UTC"
USE_I18N = True
@ -136,50 +137,53 @@ USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/api/static/'
STATIC_URL = "/api/static/"
if not DEBUG:
STATIC_ROOT = os.getenv('LIBRETIME_STATIC_ROOT', '/usr/share/airtime/api')
STATIC_ROOT = os.getenv("LIBRETIME_STATIC_ROOT", "/usr/share/airtime/api")
AUTH_USER_MODEL = 'libretimeapi.User'
AUTH_USER_MODEL = "libretimeapi.User"
TEST_RUNNER = 'libretimeapi.tests.runners.ManagedModelTestRunner'
TEST_RUNNER = "libretimeapi.tests.runners.ManagedModelTestRunner"
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '{levelname} {message}',
'style': '{',
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {
"format": "{levelname} {message}",
"style": "{",
},
'verbose': {
'format': '{asctime} {module} {levelname} {message}',
'style': '{',
}
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(CONFIG.get('pypo', 'log_base_dir', fallback='.').replace('\'',''), 'api.log'),
'formatter': 'verbose',
},
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple',
"verbose": {
"format": "{asctime} {module} {levelname} {message}",
"style": "{",
},
},
'loggers': {
'django': {
'handlers': ['file', 'console'],
'level': 'INFO',
'propogate': True,
"handlers": {
"file": {
"level": "DEBUG",
"class": "logging.FileHandler",
"filename": os.path.join(
CONFIG.get("pypo", "log_base_dir", fallback=".").replace("'", ""),
"api.log",
),
"formatter": "verbose",
},
'libretimeapi': {
'handlers': ['file', 'console'],
'level': 'INFO',
'propogate': True,
"console": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "simple",
},
},
"loggers": {
"django": {
"handlers": ["file", "console"],
"level": "INFO",
"propogate": True,
},
"libretimeapi": {
"handlers": ["file", "console"],
"level": "INFO",
"propogate": True,
},
},
}

View File

@ -8,18 +8,17 @@ class ManagedModelTestRunner(DiscoverRunner):
project managed for the duration of the test run, so that one doesn't need
to execute the SQL manually to create them.
"""
def setup_test_environment(self, *args, **kwargs):
from django.apps import apps
self.unmanaged_models = [m for m in apps.get_models()
if not m._meta.managed]
self.unmanaged_models = [m for m in apps.get_models() if not m._meta.managed]
for m in self.unmanaged_models:
m._meta.managed = True
super(ManagedModelTestRunner, self).setup_test_environment(*args,
**kwargs)
super(ManagedModelTestRunner, self).setup_test_environment(*args, **kwargs)
def teardown_test_environment(self, *args, **kwargs):
super(ManagedModelTestRunner, self).teardown_test_environment(*args,
**kwargs)
super(ManagedModelTestRunner, self).teardown_test_environment(*args, **kwargs)
# reset unmanaged models
for m in self.unmanaged_models:
m._meta.managed = False

View File

@ -9,33 +9,40 @@ from libretimeapi.permission_constants import GROUPS
class TestUserManager(APITestCase):
def test_create_user(self):
user = User.objects.create_user('test',
email='test@example.com',
password='test',
type=DJ,
first_name='test',
last_name='user')
user = User.objects.create_user(
"test",
email="test@example.com",
password="test",
type=DJ,
first_name="test",
last_name="user",
)
db_user = User.objects.get(pk=user.pk)
self.assertEqual(db_user.username, user.username)
def test_create_superuser(self):
user = User.objects.create_superuser('test',
email='test@example.com',
password='test',
first_name='test',
last_name='user')
user = User.objects.create_superuser(
"test",
email="test@example.com",
password="test",
first_name="test",
last_name="user",
)
db_user = User.objects.get(pk=user.pk)
self.assertEqual(db_user.username, user.username)
class TestUser(APITestCase):
def test_guest_get_group_perms(self):
user = User.objects.create_user('test',
email='test@example.com',
password='test',
type=GUEST,
first_name='test',
last_name='user')
user = User.objects.create_user(
"test",
email="test@example.com",
password="test",
type=GUEST,
first_name="test",
last_name="user",
)
permissions = user.get_group_permissions()
# APIRoot permission hardcoded in the check as it isn't a Permission object
str_perms = [p.codename for p in permissions] + ['view_apiroot']
str_perms = [p.codename for p in permissions] + ["view_apiroot"]
self.assertCountEqual(str_perms, GROUPS[GUEST])

View File

@ -6,7 +6,11 @@ from django.conf import settings
from rest_framework.test import APITestCase, APIRequestFactory
from model_bakery import baker
from libretimeapi.permissions import IsSystemTokenOrUser
from libretimeapi.permission_constants import GUEST_PERMISSIONS, DJ_PERMISSIONS, PROGRAM_MANAGER_PERMISSIONS
from libretimeapi.permission_constants import (
GUEST_PERMISSIONS,
DJ_PERMISSIONS,
PROGRAM_MANAGER_PERMISSIONS,
)
from libretimeapi.models.user_constants import GUEST, DJ, PROGRAM_MANAGER, ADMIN
@ -16,54 +20,56 @@ class TestIsSystemTokenOrUser(APITestCase):
cls.path = "/api/v2/files/"
def test_unauthorized(self):
response = self.client.get(self.path.format('files'))
response = self.client.get(self.path.format("files"))
self.assertEqual(response.status_code, 403)
def test_token_incorrect(self):
token = 'doesnotexist'
token = "doesnotexist"
request = APIRequestFactory().get(self.path)
request.user = AnonymousUser()
request.META['Authorization'] = 'Api-Key {token}'.format(token=token)
request.META["Authorization"] = "Api-Key {token}".format(token=token)
allowed = IsSystemTokenOrUser().has_permission(request, None)
self.assertFalse(allowed)
def test_token_correct(self):
token = settings.CONFIG.get('general', 'api_key')
token = settings.CONFIG.get("general", "api_key")
request = APIRequestFactory().get(self.path)
request.user = AnonymousUser()
request.META['Authorization'] = 'Api-Key {token}'.format(token=token)
request.META["Authorization"] = "Api-Key {token}".format(token=token)
allowed = IsSystemTokenOrUser().has_permission(request, None)
self.assertTrue(allowed)
class TestPermissions(APITestCase):
URLS = [
'schedule',
'shows',
'show-days',
'show-hosts',
'show-instances',
'show-rebroadcasts',
'files',
'playlists',
'playlist-contents',
'smart-blocks',
'smart-block-contents',
'smart-block-criteria',
'webstreams',
"schedule",
"shows",
"show-days",
"show-hosts",
"show-instances",
"show-rebroadcasts",
"files",
"playlists",
"playlist-contents",
"smart-blocks",
"smart-block-contents",
"smart-block-criteria",
"webstreams",
]
def logged_in_test_model(self, model, name, user_type, fn):
path = self.path.format(model)
user_created = get_user_model().objects.filter(username=name)
if not user_created:
user = get_user_model().objects.create_user(name,
email='test@example.com',
password='test',
type=user_type,
first_name='test',
last_name='user')
self.client.login(username=name, password='test')
user = get_user_model().objects.create_user(
name,
email="test@example.com",
password="test",
type=user_type,
first_name="test",
last_name="user",
)
self.client.login(username=name, password="test")
return fn(path)
@classmethod
@ -72,49 +78,57 @@ class TestPermissions(APITestCase):
def test_guest_permissions_success(self):
for model in self.URLS:
response = self.logged_in_test_model(model, 'guest', GUEST, self.client.get)
self.assertEqual(response.status_code, 200,
msg='Invalid for model {}'.format(model))
response = self.logged_in_test_model(model, "guest", GUEST, self.client.get)
self.assertEqual(
response.status_code, 200, msg="Invalid for model {}".format(model)
)
def test_guest_permissions_failure(self):
for model in self.URLS:
response = self.logged_in_test_model(model, 'guest', GUEST, self.client.post)
self.assertEqual(response.status_code, 403,
msg='Invalid for model {}'.format(model))
response = self.logged_in_test_model('users', 'guest', GUEST, self.client.get)
self.assertEqual(response.status_code, 403, msg='Invalid for model users')
response = self.logged_in_test_model(
model, "guest", GUEST, self.client.post
)
self.assertEqual(
response.status_code, 403, msg="Invalid for model {}".format(model)
)
response = self.logged_in_test_model("users", "guest", GUEST, self.client.get)
self.assertEqual(response.status_code, 403, msg="Invalid for model users")
def test_dj_get_permissions(self):
for model in self.URLS:
response = self.logged_in_test_model(model, 'dj', DJ, self.client.get)
self.assertEqual(response.status_code, 200,
msg='Invalid for model {}'.format(model))
response = self.logged_in_test_model(model, "dj", DJ, self.client.get)
self.assertEqual(
response.status_code, 200, msg="Invalid for model {}".format(model)
)
def test_dj_post_permissions(self):
user = get_user_model().objects.create_user('test-dj',
email='test@example.com',
password='test',
type=DJ,
first_name='test',
last_name='user')
f = baker.make('libretimeapi.File',
owner=user)
model = 'files/{}'.format(f.id)
user = get_user_model().objects.create_user(
"test-dj",
email="test@example.com",
password="test",
type=DJ,
first_name="test",
last_name="user",
)
f = baker.make("libretimeapi.File", owner=user)
model = "files/{}".format(f.id)
path = self.path.format(model)
self.client.login(username='test-dj', password='test')
response = self.client.patch(path, {'name': 'newFilename'})
self.client.login(username="test-dj", password="test")
response = self.client.patch(path, {"name": "newFilename"})
self.assertEqual(response.status_code, 200)
def test_dj_post_permissions_failure(self):
user = get_user_model().objects.create_user('test-dj',
email='test@example.com',
password='test',
type=DJ,
first_name='test',
last_name='user')
f = baker.make('libretimeapi.File')
model = 'files/{}'.format(f.id)
user = get_user_model().objects.create_user(
"test-dj",
email="test@example.com",
password="test",
type=DJ,
first_name="test",
last_name="user",
)
f = baker.make("libretimeapi.File")
model = "files/{}".format(f.id)
path = self.path.format(model)
self.client.login(username='test-dj', password='test')
response = self.client.patch(path, {'name': 'newFilename'})
self.client.login(username="test-dj", password="test")
response = self.client.patch(path, {"name": "newFilename"})
self.assertEqual(response.status_code, 403)

View File

@ -11,29 +11,32 @@ class TestFileViewSet(APITestCase):
@classmethod
def setUpTestData(cls):
cls.path = "/api/v2/files/{id}/download/"
cls.token = settings.CONFIG.get('general', 'api_key')
cls.token = settings.CONFIG.get("general", "api_key")
def test_invalid(self):
path = self.path.format(id='a')
self.client.credentials(HTTP_AUTHORIZATION='Api-Key {}'.format(self.token))
path = self.path.format(id="a")
self.client.credentials(HTTP_AUTHORIZATION="Api-Key {}".format(self.token))
response = self.client.get(path)
self.assertEqual(response.status_code, 400)
def test_does_not_exist(self):
path = self.path.format(id='1')
self.client.credentials(HTTP_AUTHORIZATION='Api-Key {}'.format(self.token))
path = self.path.format(id="1")
self.client.credentials(HTTP_AUTHORIZATION="Api-Key {}".format(self.token))
response = self.client.get(path)
self.assertEqual(response.status_code, 404)
def test_exists(self):
music_dir = baker.make('libretimeapi.MusicDir',
directory=os.path.join(os.path.dirname(__file__),
'resources'))
f = baker.make('libretimeapi.File',
directory=music_dir,
mime='audio/mp3',
filepath='song.mp3')
music_dir = baker.make(
"libretimeapi.MusicDir",
directory=os.path.join(os.path.dirname(__file__), "resources"),
)
f = baker.make(
"libretimeapi.File",
directory=music_dir,
mime="audio/mp3",
filepath="song.mp3",
)
path = self.path.format(id=str(f.pk))
self.client.credentials(HTTP_AUTHORIZATION='Api-Key {}'.format(self.token))
self.client.credentials(HTTP_AUTHORIZATION="Api-Key {}".format(self.token))
response = self.client.get(path)
self.assertEqual(response.status_code, 200)

View File

@ -5,48 +5,48 @@ from rest_framework import routers
from .views import *
router = routers.DefaultRouter()
router.register('smart-blocks', SmartBlockViewSet)
router.register('smart-block-contents', SmartBlockContentViewSet)
router.register('smart-block-criteria', SmartBlockCriteriaViewSet)
router.register('countries', CountryViewSet)
router.register('files', FileViewSet)
router.register('listener-counts', ListenerCountViewSet)
router.register('live-logs', LiveLogViewSet)
router.register('login-attempts', LoginAttemptViewSet)
router.register('mount-names', MountNameViewSet)
router.register('music-dirs', MusicDirViewSet)
router.register('playlists', PlaylistViewSet)
router.register('playlist-contents', PlaylistContentViewSet)
router.register('playout-history', PlayoutHistoryViewSet)
router.register('playout-history-metadata', PlayoutHistoryMetadataViewSet)
router.register('playout-history-templates', PlayoutHistoryTemplateViewSet)
router.register('playout-history-template-fields', PlayoutHistoryTemplateFieldViewSet)
router.register('preferences', PreferenceViewSet)
router.register('schedule', ScheduleViewSet)
router.register('service-registers', ServiceRegisterViewSet)
router.register('sessions', SessionViewSet)
router.register('shows', ShowViewSet)
router.register('show-days', ShowDaysViewSet)
router.register('show-hosts', ShowHostViewSet)
router.register('show-instances', ShowInstanceViewSet)
router.register('show-rebroadcasts', ShowRebroadcastViewSet)
router.register('stream-settings', StreamSettingViewSet)
router.register('users', UserViewSet)
router.register('user-tokens', UserTokenViewSet)
router.register('timestamps', TimestampViewSet)
router.register('webstreams', WebstreamViewSet)
router.register('webstream-metadata', WebstreamMetadataViewSet)
router.register('celery-tasks', CeleryTaskViewSet)
router.register('cloud-files', CloudFileViewSet)
router.register('imported-podcasts', ImportedPodcastViewSet)
router.register('podcasts', PodcastViewSet)
router.register('podcast-episodes', PodcastEpisodeViewSet)
router.register('station-podcasts', StationPodcastViewSet)
router.register('third-party-track-references', ThirdPartyTrackReferenceViewSet)
router.register('track-types', TrackTypeViewSet)
router.register("smart-blocks", SmartBlockViewSet)
router.register("smart-block-contents", SmartBlockContentViewSet)
router.register("smart-block-criteria", SmartBlockCriteriaViewSet)
router.register("countries", CountryViewSet)
router.register("files", FileViewSet)
router.register("listener-counts", ListenerCountViewSet)
router.register("live-logs", LiveLogViewSet)
router.register("login-attempts", LoginAttemptViewSet)
router.register("mount-names", MountNameViewSet)
router.register("music-dirs", MusicDirViewSet)
router.register("playlists", PlaylistViewSet)
router.register("playlist-contents", PlaylistContentViewSet)
router.register("playout-history", PlayoutHistoryViewSet)
router.register("playout-history-metadata", PlayoutHistoryMetadataViewSet)
router.register("playout-history-templates", PlayoutHistoryTemplateViewSet)
router.register("playout-history-template-fields", PlayoutHistoryTemplateFieldViewSet)
router.register("preferences", PreferenceViewSet)
router.register("schedule", ScheduleViewSet)
router.register("service-registers", ServiceRegisterViewSet)
router.register("sessions", SessionViewSet)
router.register("shows", ShowViewSet)
router.register("show-days", ShowDaysViewSet)
router.register("show-hosts", ShowHostViewSet)
router.register("show-instances", ShowInstanceViewSet)
router.register("show-rebroadcasts", ShowRebroadcastViewSet)
router.register("stream-settings", StreamSettingViewSet)
router.register("users", UserViewSet)
router.register("user-tokens", UserTokenViewSet)
router.register("timestamps", TimestampViewSet)
router.register("webstreams", WebstreamViewSet)
router.register("webstream-metadata", WebstreamMetadataViewSet)
router.register("celery-tasks", CeleryTaskViewSet)
router.register("cloud-files", CloudFileViewSet)
router.register("imported-podcasts", ImportedPodcastViewSet)
router.register("podcasts", PodcastViewSet)
router.register("podcast-episodes", PodcastEpisodeViewSet)
router.register("station-podcasts", StationPodcastViewSet)
router.register("third-party-track-references", ThirdPartyTrackReferenceViewSet)
router.register("track-types", TrackTypeViewSet)
urlpatterns = [
path('api/v2/', include(router.urls)),
path('api/v2/version/', version),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path("api/v2/", include(router.urls)),
path("api/v2/version/", version),
path("api-auth/", include("rest_framework.urls", namespace="rest_framework")),
]

View File

@ -4,23 +4,27 @@ import sys
import string
import random
def read_config_file(config_path):
"""Parse the application's config file located at config_path."""
config = configparser.ConfigParser()
try:
config.readfp(open(config_path))
except IOError as e:
print("Failed to open config file at {}: {}".format(config_path, e.strerror),
file=sys.stderr)
print(
"Failed to open config file at {}: {}".format(config_path, e.strerror),
file=sys.stderr,
)
raise e
except Exception as e:
print(e.strerror, file=sys.stderr)
raise e
return config
def get_random_string(seed):
"""Generates a random string based on the given seed"""
choices = string.ascii_letters + string.digits + string.punctuation
seed = seed.encode('utf-8')
seed = seed.encode("utf-8")
rand = random.Random(seed)
return [rand.choice(choices) for i in range(16)]

View File

@ -10,220 +10,261 @@ from rest_framework.response import Response
from .serializers import *
from .permissions import IsAdminOrOwnUser
class UserViewSet(viewsets.ModelViewSet):
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
permission_classes = [IsAdminOrOwnUser]
model_permission_name = 'user'
model_permission_name = "user"
class SmartBlockViewSet(viewsets.ModelViewSet):
queryset = SmartBlock.objects.all()
serializer_class = SmartBlockSerializer
model_permission_name = 'smartblock'
model_permission_name = "smartblock"
class SmartBlockContentViewSet(viewsets.ModelViewSet):
queryset = SmartBlockContent.objects.all()
serializer_class = SmartBlockContentSerializer
model_permission_name = 'smartblockcontent'
model_permission_name = "smartblockcontent"
class SmartBlockCriteriaViewSet(viewsets.ModelViewSet):
queryset = SmartBlockCriteria.objects.all()
serializer_class = SmartBlockCriteriaSerializer
model_permission_name = 'smartblockcriteria'
model_permission_name = "smartblockcriteria"
class CountryViewSet(viewsets.ModelViewSet):
queryset = Country.objects.all()
serializer_class = CountrySerializer
model_permission_name = 'country'
model_permission_name = "country"
class FileViewSet(viewsets.ModelViewSet):
queryset = File.objects.all()
serializer_class = FileSerializer
model_permission_name = 'file'
model_permission_name = "file"
@action(detail=True, methods=['GET'])
@action(detail=True, methods=["GET"])
def download(self, request, pk=None):
if pk is None:
return Response('No file requested', status=status.HTTP_400_BAD_REQUEST)
return Response("No file requested", status=status.HTTP_400_BAD_REQUEST)
try:
pk = int(pk)
except ValueError:
return Response('File ID should be an integer',
status=status.HTTP_400_BAD_REQUEST)
return Response(
"File ID should be an integer", status=status.HTTP_400_BAD_REQUEST
)
filename = get_object_or_404(File, pk=pk)
directory = filename.directory
path = os.path.join(directory.directory, filename.filepath)
response = FileResponse(open(path, 'rb'), content_type=filename.mime)
response = FileResponse(open(path, "rb"), content_type=filename.mime)
return response
class ListenerCountViewSet(viewsets.ModelViewSet):
queryset = ListenerCount.objects.all()
serializer_class = ListenerCountSerializer
model_permission_name = 'listenercount'
model_permission_name = "listenercount"
class LiveLogViewSet(viewsets.ModelViewSet):
queryset = LiveLog.objects.all()
serializer_class = LiveLogSerializer
model_permission_name = 'livelog'
model_permission_name = "livelog"
class LoginAttemptViewSet(viewsets.ModelViewSet):
queryset = LoginAttempt.objects.all()
serializer_class = LoginAttemptSerializer
model_permission_name = 'loginattempt'
model_permission_name = "loginattempt"
class MountNameViewSet(viewsets.ModelViewSet):
queryset = MountName.objects.all()
serializer_class = MountNameSerializer
model_permission_name = 'mountname'
model_permission_name = "mountname"
class MusicDirViewSet(viewsets.ModelViewSet):
queryset = MusicDir.objects.all()
serializer_class = MusicDirSerializer
model_permission_name = 'musicdir'
model_permission_name = "musicdir"
class PlaylistViewSet(viewsets.ModelViewSet):
queryset = Playlist.objects.all()
serializer_class = PlaylistSerializer
model_permission_name = 'playlist'
model_permission_name = "playlist"
class PlaylistContentViewSet(viewsets.ModelViewSet):
queryset = PlaylistContent.objects.all()
serializer_class = PlaylistContentSerializer
model_permission_name = 'playlistcontent'
model_permission_name = "playlistcontent"
class PlayoutHistoryViewSet(viewsets.ModelViewSet):
queryset = PlayoutHistory.objects.all()
serializer_class = PlayoutHistorySerializer
model_permission_name = 'playouthistory'
model_permission_name = "playouthistory"
class PlayoutHistoryMetadataViewSet(viewsets.ModelViewSet):
queryset = PlayoutHistoryMetadata.objects.all()
serializer_class = PlayoutHistoryMetadataSerializer
model_permission_name = 'playouthistorymetadata'
model_permission_name = "playouthistorymetadata"
class PlayoutHistoryTemplateViewSet(viewsets.ModelViewSet):
queryset = PlayoutHistoryTemplate.objects.all()
serializer_class = PlayoutHistoryTemplateSerializer
model_permission_name = 'playouthistorytemplate'
model_permission_name = "playouthistorytemplate"
class PlayoutHistoryTemplateFieldViewSet(viewsets.ModelViewSet):
queryset = PlayoutHistoryTemplateField.objects.all()
serializer_class = PlayoutHistoryTemplateFieldSerializer
model_permission_name = 'playouthistorytemplatefield'
model_permission_name = "playouthistorytemplatefield"
class PreferenceViewSet(viewsets.ModelViewSet):
queryset = Preference.objects.all()
serializer_class = PreferenceSerializer
model_permission_name = 'perference'
model_permission_name = "perference"
class ScheduleViewSet(viewsets.ModelViewSet):
queryset = Schedule.objects.all()
serializer_class = ScheduleSerializer
filter_fields = ('starts', 'ends', 'playout_status', 'broadcasted')
model_permission_name = 'schedule'
filter_fields = ("starts", "ends", "playout_status", "broadcasted")
model_permission_name = "schedule"
class ServiceRegisterViewSet(viewsets.ModelViewSet):
queryset = ServiceRegister.objects.all()
serializer_class = ServiceRegisterSerializer
model_permission_name = 'serviceregister'
model_permission_name = "serviceregister"
class SessionViewSet(viewsets.ModelViewSet):
queryset = Session.objects.all()
serializer_class = SessionSerializer
model_permission_name = 'session'
model_permission_name = "session"
class ShowViewSet(viewsets.ModelViewSet):
queryset = Show.objects.all()
serializer_class = ShowSerializer
model_permission_name = 'show'
model_permission_name = "show"
class ShowDaysViewSet(viewsets.ModelViewSet):
queryset = ShowDays.objects.all()
serializer_class = ShowDaysSerializer
model_permission_name = 'showdays'
model_permission_name = "showdays"
class ShowHostViewSet(viewsets.ModelViewSet):
queryset = ShowHost.objects.all()
serializer_class = ShowHostSerializer
model_permission_name = 'showhost'
model_permission_name = "showhost"
class ShowInstanceViewSet(viewsets.ModelViewSet):
queryset = ShowInstance.objects.all()
serializer_class = ShowInstanceSerializer
model_permission_name = 'showinstance'
model_permission_name = "showinstance"
class ShowRebroadcastViewSet(viewsets.ModelViewSet):
queryset = ShowRebroadcast.objects.all()
serializer_class = ShowRebroadcastSerializer
model_permission_name = 'showrebroadcast'
model_permission_name = "showrebroadcast"
class StreamSettingViewSet(viewsets.ModelViewSet):
queryset = StreamSetting.objects.all()
serializer_class = StreamSettingSerializer
model_permission_name = 'streamsetting'
model_permission_name = "streamsetting"
class UserTokenViewSet(viewsets.ModelViewSet):
queryset = UserToken.objects.all()
serializer_class = UserTokenSerializer
model_permission_name = 'usertoken'
model_permission_name = "usertoken"
class TimestampViewSet(viewsets.ModelViewSet):
queryset = Timestamp.objects.all()
serializer_class = TimestampSerializer
model_permission_name = 'timestamp'
model_permission_name = "timestamp"
class WebstreamViewSet(viewsets.ModelViewSet):
queryset = Webstream.objects.all()
serializer_class = WebstreamSerializer
model_permission_name = 'webstream'
model_permission_name = "webstream"
class WebstreamMetadataViewSet(viewsets.ModelViewSet):
queryset = WebstreamMetadata.objects.all()
serializer_class = WebstreamMetadataSerializer
model_permission_name = 'webstreametadata'
model_permission_name = "webstreametadata"
class CeleryTaskViewSet(viewsets.ModelViewSet):
queryset = CeleryTask.objects.all()
serializer_class = CeleryTaskSerializer
model_permission_name = 'celerytask'
model_permission_name = "celerytask"
class CloudFileViewSet(viewsets.ModelViewSet):
queryset = CloudFile.objects.all()
serializer_class = CloudFileSerializer
model_permission_name = 'cloudfile'
model_permission_name = "cloudfile"
class ImportedPodcastViewSet(viewsets.ModelViewSet):
queryset = ImportedPodcast.objects.all()
serializer_class = ImportedPodcastSerializer
model_permission_name = 'importedpodcast'
model_permission_name = "importedpodcast"
class PodcastViewSet(viewsets.ModelViewSet):
queryset = Podcast.objects.all()
serializer_class = PodcastSerializer
model_permission_name = 'podcast'
model_permission_name = "podcast"
class PodcastEpisodeViewSet(viewsets.ModelViewSet):
queryset = PodcastEpisode.objects.all()
serializer_class = PodcastEpisodeSerializer
model_permission_name = 'podcastepisode'
model_permission_name = "podcastepisode"
class StationPodcastViewSet(viewsets.ModelViewSet):
queryset = StationPodcast.objects.all()
serializer_class = StationPodcastSerializer
model_permission_name = 'station'
model_permission_name = "station"
class ThirdPartyTrackReferenceViewSet(viewsets.ModelViewSet):
queryset = ThirdPartyTrackReference.objects.all()
serializer_class = ThirdPartyTrackReferenceSerializer
model_permission_name = 'thirdpartytrackreference'
model_permission_name = "thirdpartytrackreference"
class TrackTypeViewSet(viewsets.ModelViewSet):
queryset = TrackType.objects.all()
serializer_class = TrackTypeSerializer
model_permission_name = 'tracktype'
model_permission_name = "tracktype"
@api_view(['GET'])
@permission_classes((AllowAny, ))
@api_view(["GET"])
@permission_classes((AllowAny,))
def version(request, *args, **kwargs):
return Response({'api_version': settings.API_VERSION})
return Response({"api_version": settings.API_VERSION})

View File

@ -12,6 +12,6 @@ import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'libretimeapi.settings')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "libretimeapi.settings")
application = get_wsgi_application()

View File

@ -8,26 +8,26 @@ print(script_path)
os.chdir(script_path)
setup(
name='libretime-api',
version='2.0.0a1',
name="libretime-api",
version="2.0.0a1",
packages=find_packages(),
include_package_data=True,
description='LibreTime API backend server',
url='https://github.com/LibreTime/libretime',
author='LibreTime Contributors',
scripts=['bin/libretime-api'],
description="LibreTime API backend server",
url="https://github.com/LibreTime/libretime",
author="LibreTime Contributors",
scripts=["bin/libretime-api"],
install_requires=[
'coreapi',
'Django~=3.0',
'djangorestframework',
'django-url-filter',
'markdown',
'model_bakery',
'psycopg2',
"coreapi",
"Django~=3.0",
"djangorestframework",
"django-url-filter",
"markdown",
"model_bakery",
"psycopg2",
],
project_urls={
'Bug Tracker': 'https://github.com/LibreTime/libretime/issues',
'Documentation': 'https://libretime.org',
'Source Code': 'https://github.com/LibreTime/libretime',
"Bug Tracker": "https://github.com/LibreTime/libretime/issues",
"Documentation": "https://libretime.org",
"Source Code": "https://github.com/LibreTime/libretime",
},
)

View File

@ -16,8 +16,8 @@ similar code when it starts up (but then makes changes if something is different
"""
class AirtimeMediaMonitorBootstrap():
class AirtimeMediaMonitorBootstrap:
"""AirtimeMediaMonitorBootstrap constructor
Keyword Arguments:
@ -25,8 +25,9 @@ class AirtimeMediaMonitorBootstrap():
pe -- reference to an instance of ProcessEvent
api_clients -- reference of api_clients to communicate with airtime-server
"""
def __init__(self):
config = ConfigObj('/etc/airtime/airtime.conf')
config = ConfigObj("/etc/airtime/airtime.conf")
self.api_client = apc.api_client_factory(config)
"""
@ -36,25 +37,26 @@ class AirtimeMediaMonitorBootstrap():
print 'Error configuring logging: ', e
sys.exit(1)
"""
self.logger = logging.getLogger()
self.logger.info("Adding %s on watch list...", "xxx")
self.scan()
"""On bootup we want to scan all directories and look for files that
weren't there or files that changed before media-monitor process
went offline.
"""
def scan(self):
directories = self.get_list_of_watched_dirs();
directories = self.get_list_of_watched_dirs()
self.logger.info("watched directories found: %s", directories)
for id, dir in directories.iteritems():
self.logger.debug("%s, %s", id, dir)
#CHANGED!!!
#self.sync_database_to_filesystem(id, api_client.encode_to(dir, "utf-8"))
# CHANGED!!!
# self.sync_database_to_filesystem(id, api_client.encode_to(dir, "utf-8"))
self.sync_database_to_filesystem(id, dir)
"""Gets a list of files that the Airtime database knows for a specific directory.
@ -62,6 +64,7 @@ class AirtimeMediaMonitorBootstrap():
get_list_of_watched_dirs function.
dir_id -- row id of the directory in the cc_watched_dirs database table
"""
def list_db_files(self, dir_id):
return self.api_client.list_all_db_files(dir_id)
@ -69,23 +72,29 @@ class AirtimeMediaMonitorBootstrap():
returns the path and the database row id for this path for all watched directories. Also
returns the Stor directory, which can be identified by its row id (always has value of "1")
"""
def get_list_of_watched_dirs(self):
json = self.api_client.list_all_watched_dirs()
return json["dirs"]
def scan_dir_for_existing_files(self, dir):
command = 'find "%s" -type f -iname "*.ogg" -o -iname "*.mp3" -readable' % dir.replace('"', '\\"')
command = (
'find "%s" -type f -iname "*.ogg" -o -iname "*.mp3" -readable'
% dir.replace('"', '\\"')
)
self.logger.debug(command)
#CHANGED!!
# CHANGED!!
stdout = self.exec_command(command).decode("UTF-8")
return stdout.splitlines()
def exec_command(self, command):
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
self.logger.warn("command \n%s\n return with a non-zero return value", command)
self.logger.warn(
"command \n%s\n return with a non-zero return value", command
)
self.logger.error(stderr)
return stdout
@ -98,6 +107,7 @@ class AirtimeMediaMonitorBootstrap():
dir_id -- row id of the directory in the cc_watched_dirs database table
dir -- pathname of the directory
"""
def sync_database_to_filesystem(self, dir_id, dir):
"""
set to hold new and/or modified files. We use a set to make it ok if files are added
@ -107,7 +117,7 @@ class AirtimeMediaMonitorBootstrap():
db_known_files_set = set()
files = self.list_db_files(dir_id)
for file in files['files']:
for file in files["files"]:
db_known_files_set.add(file)
existing_files = self.scan_dir_for_existing_files(dir)
@ -115,18 +125,17 @@ class AirtimeMediaMonitorBootstrap():
existing_files_set = set()
for file_path in existing_files:
if len(file_path.strip(" \n")) > 0:
existing_files_set.add(file_path[len(dir):])
existing_files_set.add(file_path[len(dir) :])
deleted_files_set = db_known_files_set - existing_files_set
new_files_set = existing_files_set - db_known_files_set
print("DB Known files: \n%s\n\n" % len(db_known_files_set))
print("FS Known files: \n%s\n\n" % len(existing_files_set))
print("Deleted files: \n%s\n\n" % deleted_files_set)
print("New files: \n%s\n\n" % new_files_set)
print ("DB Known files: \n%s\n\n"%len(db_known_files_set))
print ("FS Known files: \n%s\n\n"%len(existing_files_set))
print ("Deleted files: \n%s\n\n"%deleted_files_set)
print ("New files: \n%s\n\n"%new_files_set)
if __name__ == "__main__":
AirtimeMediaMonitorBootstrap()

View File

@ -10,24 +10,25 @@ from . import config_file
from functools import partial
from .metadata_analyzer import MetadataAnalyzer
from .replaygain_analyzer import ReplayGainAnalyzer
from .status_reporter import StatusReporter
from .status_reporter import StatusReporter
from .message_listener import MessageListener
class AirtimeAnalyzerServer:
"""A server for importing uploads to Airtime as background jobs.
"""
"""A server for importing uploads to Airtime as background jobs."""
# Constants
# Constants
_LOG_PATH = "/var/log/airtime/airtime_analyzer.log"
# Variables
_log_level = logging.INFO
def __init__(self, rmq_config_path, http_retry_queue_path, debug=False):
# Dump a stacktrace with 'kill -SIGUSR2 <PID>'
signal.signal(signal.SIGUSR2, lambda sig, frame: AirtimeAnalyzerServer.dump_stacktrace())
signal.signal(
signal.SIGUSR2, lambda sig, frame: AirtimeAnalyzerServer.dump_stacktrace()
)
# Configure logging
self.setup_logging(debug)
@ -43,11 +44,10 @@ class AirtimeAnalyzerServer:
self._msg_listener = MessageListener(rmq_config)
StatusReporter.stop_thread()
def setup_logging(self, debug):
"""Set up nicely formatted logging and log rotation.
Keyword arguments:
debug -- a boolean indicating whether to enable super verbose logging
to the screen and disk.
@ -55,27 +55,30 @@ class AirtimeAnalyzerServer:
if debug:
self._log_level = logging.DEBUG
else:
#Disable most pika/rabbitmq logging:
pika_logger = logging.getLogger('pika')
# Disable most pika/rabbitmq logging:
pika_logger = logging.getLogger("pika")
pika_logger.setLevel(logging.CRITICAL)
# Set up logging
logFormatter = logging.Formatter("%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s")
logFormatter = logging.Formatter(
"%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s"
)
rootLogger = logging.getLogger()
rootLogger.setLevel(self._log_level)
fileHandler = logging.handlers.RotatingFileHandler(filename=self._LOG_PATH, maxBytes=1024*1024*30,
backupCount=8)
fileHandler = logging.handlers.RotatingFileHandler(
filename=self._LOG_PATH, maxBytes=1024 * 1024 * 30, backupCount=8
)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
@classmethod
def dump_stacktrace(stack):
''' Dump a stacktrace for all threads '''
"""Dump a stacktrace for all threads"""
code = []
for threadId, stack in list(sys._current_frames().items()):
code.append("\n# ThreadID: %s" % threadId)
@ -83,4 +86,4 @@ class AirtimeAnalyzerServer:
code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
code.append(" %s" % (line.strip()))
logging.info('\n'.join(code))
logging.info("\n".join(code))

View File

@ -3,8 +3,7 @@
class Analyzer:
""" Abstract base class for all "analyzers".
"""
"""Abstract base class for all "analyzers"."""
@staticmethod
def analyze(filename, metadata):

View File

@ -12,20 +12,28 @@ from .cuepoint_analyzer import CuePointAnalyzer
from .replaygain_analyzer import ReplayGainAnalyzer
from .playability_analyzer import *
class AnalyzerPipeline:
""" Analyzes and imports an audio file into the Airtime library.
This currently performs metadata extraction (eg. gets the ID3 tags from an MP3),
then moves the file to the Airtime music library (stor/imported), and returns
the results back to the parent process. This class is used in an isolated process
so that if it crashes, it does not kill the entire airtime_analyzer daemon and
the failure to import can be reported back to the web application.
class AnalyzerPipeline:
"""Analyzes and imports an audio file into the Airtime library.
This currently performs metadata extraction (eg. gets the ID3 tags from an MP3),
then moves the file to the Airtime music library (stor/imported), and returns
the results back to the parent process. This class is used in an isolated process
so that if it crashes, it does not kill the entire airtime_analyzer daemon and
the failure to import can be reported back to the web application.
"""
IMPORT_STATUS_FAILED = 2
@staticmethod
def run_analysis(queue, audio_file_path, import_directory, original_filename, storage_backend, file_prefix):
def run_analysis(
queue,
audio_file_path,
import_directory,
original_filename,
storage_backend,
file_prefix,
):
"""Analyze and import an audio file, and put all extracted metadata into queue.
Keyword arguments:
@ -50,14 +58,29 @@ class AnalyzerPipeline:
if not isinstance(queue, Queue):
raise TypeError("queue must be a Queue.Queue()")
if not isinstance(audio_file_path, str):
raise TypeError("audio_file_path must be unicode. Was of type " + type(audio_file_path).__name__ + " instead.")
raise TypeError(
"audio_file_path must be unicode. Was of type "
+ type(audio_file_path).__name__
+ " instead."
)
if not isinstance(import_directory, str):
raise TypeError("import_directory must be unicode. Was of type " + type(import_directory).__name__ + " instead.")
raise TypeError(
"import_directory must be unicode. Was of type "
+ type(import_directory).__name__
+ " instead."
)
if not isinstance(original_filename, str):
raise TypeError("original_filename must be unicode. Was of type " + type(original_filename).__name__ + " instead.")
raise TypeError(
"original_filename must be unicode. Was of type "
+ type(original_filename).__name__
+ " instead."
)
if not isinstance(file_prefix, str):
raise TypeError("file_prefix must be unicode. Was of type " + type(file_prefix).__name__ + " instead.")
raise TypeError(
"file_prefix must be unicode. Was of type "
+ type(file_prefix).__name__
+ " instead."
)
# Analyze the audio file we were told to analyze:
# First, we extract the ID3 tags and other metadata:
@ -69,9 +92,11 @@ class AnalyzerPipeline:
metadata = ReplayGainAnalyzer.analyze(audio_file_path, metadata)
metadata = PlayabilityAnalyzer.analyze(audio_file_path, metadata)
metadata = FileMoverAnalyzer.move(audio_file_path, import_directory, original_filename, metadata)
metadata = FileMoverAnalyzer.move(
audio_file_path, import_directory, original_filename, metadata
)
metadata["import_status"] = 0 # Successfully imported
metadata["import_status"] = 0 # Successfully imported
# Note that the queue we're putting the results into is our interprocess communication
# back to the main process.
@ -93,9 +118,8 @@ class AnalyzerPipeline:
def python_logger_deadlock_workaround():
# Workaround for: http://bugs.python.org/issue6721#msg140215
logger_names = list(logging.Logger.manager.loggerDict.keys())
logger_names.append(None) # Root logger
logger_names.append(None) # Root logger
for name in logger_names:
for handler in logging.getLogger(name).handlers:
handler.createLock()
logging._lock = threading.RLock()

View File

@ -9,21 +9,32 @@ import os
import airtime_analyzer.airtime_analyzer as aa
VERSION = "1.0"
LIBRETIME_CONF_DIR = os.getenv('LIBRETIME_CONF_DIR', '/etc/airtime')
DEFAULT_RMQ_CONFIG_PATH = os.path.join(LIBRETIME_CONF_DIR, 'airtime.conf')
DEFAULT_HTTP_RETRY_PATH = '/tmp/airtime_analyzer_http_retries'
LIBRETIME_CONF_DIR = os.getenv("LIBRETIME_CONF_DIR", "/etc/airtime")
DEFAULT_RMQ_CONFIG_PATH = os.path.join(LIBRETIME_CONF_DIR, "airtime.conf")
DEFAULT_HTTP_RETRY_PATH = "/tmp/airtime_analyzer_http_retries"
def main():
'''Entry-point for this application'''
"""Entry-point for this application"""
print("LibreTime Analyzer {}".format(VERSION))
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--daemon", help="run as a daemon", action="store_true")
parser.add_argument("--debug", help="log full debugging output", action="store_true")
parser.add_argument("--rmq-config-file", help="specify a configuration file with RabbitMQ settings (default is %s)" % DEFAULT_RMQ_CONFIG_PATH)
parser.add_argument("--http-retry-queue-file", help="specify where incompleted HTTP requests will be serialized (default is %s)" % DEFAULT_HTTP_RETRY_PATH)
parser.add_argument(
"--debug", help="log full debugging output", action="store_true"
)
parser.add_argument(
"--rmq-config-file",
help="specify a configuration file with RabbitMQ settings (default is %s)"
% DEFAULT_RMQ_CONFIG_PATH,
)
parser.add_argument(
"--http-retry-queue-file",
help="specify where incompleted HTTP requests will be serialized (default is %s)"
% DEFAULT_HTTP_RETRY_PATH,
)
args = parser.parse_args()
#Default config file path
# Default config file path
rmq_config_path = DEFAULT_RMQ_CONFIG_PATH
http_retry_queue_path = DEFAULT_HTTP_RETRY_PATH
if args.rmq_config_file:
@ -33,14 +44,19 @@ def main():
if args.daemon:
with daemon.DaemonContext():
aa.AirtimeAnalyzerServer(rmq_config_path=rmq_config_path,
http_retry_queue_path=http_retry_queue_path,
debug=args.debug)
aa.AirtimeAnalyzerServer(
rmq_config_path=rmq_config_path,
http_retry_queue_path=http_retry_queue_path,
debug=args.debug,
)
else:
# Run without daemonizing
aa.AirtimeAnalyzerServer(rmq_config_path=rmq_config_path,
http_retry_queue_path=http_retry_queue_path,
debug=args.debug)
aa.AirtimeAnalyzerServer(
rmq_config_path=rmq_config_path,
http_retry_queue_path=http_retry_queue_path,
debug=args.debug,
)
if __name__ == "__main__":
main()

View File

@ -2,6 +2,7 @@
import configparser
def read_config_file(config_path):
"""Parse the application's config file located at config_path."""
config = configparser.SafeConfigParser()

View File

@ -8,26 +8,38 @@ from .analyzer import Analyzer
class CuePointAnalyzer(Analyzer):
''' This class extracts the cue-in time, cue-out time, and length of a track using silan. '''
"""This class extracts the cue-in time, cue-out time, and length of a track using silan."""
SILAN_EXECUTABLE = 'silan'
SILAN_EXECUTABLE = "silan"
@staticmethod
def analyze(filename, metadata):
''' Extracts the cue-in and cue-out times along and sets the file duration based on that.
"""Extracts the cue-in and cue-out times along and sets the file duration based on that.
The cue points are there to skip the silence at the start and end of a track, and are determined
using "silan", which analyzes the loudness in a track.
:param filename: The full path to the file to analyzer
:param metadata: A metadata dictionary where the results will be put
:return: The metadata dictionary
'''
''' The silan -F 0.99 parameter tweaks the highpass filter. The default is 0.98, but at that setting,
"""
""" The silan -F 0.99 parameter tweaks the highpass filter. The default is 0.98, but at that setting,
the unit test on the short m4a file fails. With the new setting, it gets the correct cue-in time and
all the unit tests pass.
'''
command = [CuePointAnalyzer.SILAN_EXECUTABLE, '-b', '-F', '0.99', '-f', 'JSON', '-t', '1.0', filename]
"""
command = [
CuePointAnalyzer.SILAN_EXECUTABLE,
"-b",
"-F",
"0.99",
"-f",
"JSON",
"-t",
"1.0",
filename,
]
try:
results_json = subprocess.check_output(command, stderr=subprocess.STDOUT, close_fds=True)
results_json = subprocess.check_output(
command, stderr=subprocess.STDOUT, close_fds=True
)
try:
results_json = results_json.decode()
except (UnicodeDecodeError, AttributeError):
@ -35,40 +47,51 @@ class CuePointAnalyzer(Analyzer):
silan_results = json.loads(results_json)
# Defensive coding against Silan wildly miscalculating the cue in and out times:
silan_length_seconds = float(silan_results['file duration'])
silan_cuein = format(silan_results['sound'][0][0], 'f')
silan_cueout = format(silan_results['sound'][0][1], 'f')
silan_length_seconds = float(silan_results["file duration"])
silan_cuein = format(silan_results["sound"][0][0], "f")
silan_cueout = format(silan_results["sound"][0][1], "f")
# Sanity check the results against any existing metadata passed to us (presumably extracted by Mutagen):
if 'length_seconds' in metadata:
if "length_seconds" in metadata:
# Silan has a rare bug where it can massively overestimate the length or cue out time sometimes.
if (silan_length_seconds - metadata['length_seconds'] > 3) or (float(silan_cueout) - metadata['length_seconds'] > 2):
if (silan_length_seconds - metadata["length_seconds"] > 3) or (
float(silan_cueout) - metadata["length_seconds"] > 2
):
# Don't trust anything silan says then...
raise Exception("Silan cue out {0} or length {1} differs too much from the Mutagen length {2}. Ignoring Silan values."
.format(silan_cueout, silan_length_seconds, metadata['length_seconds']))
raise Exception(
"Silan cue out {0} or length {1} differs too much from the Mutagen length {2}. Ignoring Silan values.".format(
silan_cueout,
silan_length_seconds,
metadata["length_seconds"],
)
)
# Don't allow silan to trim more than the greater of 3 seconds or 5% off the start of a track
if float(silan_cuein) > max(silan_length_seconds*0.05, 3):
raise Exception("Silan cue in time {0} too big, ignoring.".format(silan_cuein))
if float(silan_cuein) > max(silan_length_seconds * 0.05, 3):
raise Exception(
"Silan cue in time {0} too big, ignoring.".format(silan_cuein)
)
else:
# Only use the Silan track length in the worst case, where Mutagen didn't give us one for some reason.
# (This is mostly to make the unit tests still pass.)
# Convert the length into a formatted time string.
metadata['length_seconds'] = silan_length_seconds #
track_length = datetime.timedelta(seconds=metadata['length_seconds'])
metadata["length_seconds"] = silan_length_seconds #
track_length = datetime.timedelta(seconds=metadata["length_seconds"])
metadata["length"] = str(track_length)
''' XXX: I've commented out the track_length stuff below because Mutagen seems more accurate than silan
""" XXX: I've commented out the track_length stuff below because Mutagen seems more accurate than silan
as of Mutagen version 1.31. We are always going to use Mutagen's length now because Silan's
length can be off by a few seconds reasonably often.
'''
"""
metadata['cuein'] = silan_cuein
metadata['cueout'] = silan_cueout
metadata["cuein"] = silan_cuein
metadata["cueout"] = silan_cueout
except OSError as e: # silan was not found
logging.warn("Failed to run: %s - %s. %s" % (command[0], e.strerror, "Do you have silan installed?"))
except subprocess.CalledProcessError as e: # silan returned an error code
except OSError as e: # silan was not found
logging.warn(
"Failed to run: %s - %s. %s"
% (command[0], e.strerror, "Do you have silan installed?")
)
except subprocess.CalledProcessError as e: # silan returned an error code
logging.warn("%s %s %s", e.cmd, e.output, e.returncode)
except Exception as e:
logging.warn(e)

View File

@ -9,10 +9,12 @@ import uuid
from .analyzer import Analyzer
class FileMoverAnalyzer(Analyzer):
"""This analyzer copies a file over from a temporary directory (stor/organize)
into the Airtime library (stor/imported).
into the Airtime library (stor/imported).
"""
@staticmethod
def analyze(audio_file_path, metadata):
"""Dummy method because we need more info than analyze gets passed to it"""
@ -21,27 +23,38 @@ class FileMoverAnalyzer(Analyzer):
@staticmethod
def move(audio_file_path, import_directory, original_filename, metadata):
"""Move the file at audio_file_path over into the import_directory/import,
renaming it to original_filename.
renaming it to original_filename.
Keyword arguments:
audio_file_path: Path to the file to be imported.
import_directory: Path to the "import" directory inside the Airtime stor directory.
(eg. /srv/airtime/stor/import)
original_filename: The filename of the file when it was uploaded to Airtime.
metadata: A dictionary where the "full_path" of where the file is moved to will be added.
Keyword arguments:
audio_file_path: Path to the file to be imported.
import_directory: Path to the "import" directory inside the Airtime stor directory.
(eg. /srv/airtime/stor/import)
original_filename: The filename of the file when it was uploaded to Airtime.
metadata: A dictionary where the "full_path" of where the file is moved to will be added.
"""
if not isinstance(audio_file_path, str):
raise TypeError("audio_file_path must be string. Was of type " + type(audio_file_path).__name__)
raise TypeError(
"audio_file_path must be string. Was of type "
+ type(audio_file_path).__name__
)
if not isinstance(import_directory, str):
raise TypeError("import_directory must be string. Was of type " + type(import_directory).__name__)
raise TypeError(
"import_directory must be string. Was of type "
+ type(import_directory).__name__
)
if not isinstance(original_filename, str):
raise TypeError("original_filename must be string. Was of type " + type(original_filename).__name__)
raise TypeError(
"original_filename must be string. Was of type "
+ type(original_filename).__name__
)
if not isinstance(metadata, dict):
raise TypeError("metadata must be a dict. Was of type " + type(metadata).__name__)
raise TypeError(
"metadata must be a dict. Was of type " + type(metadata).__name__
)
if not os.path.exists(audio_file_path):
raise FileNotFoundError("audio file not found: {}".format(audio_file_path))
#Import the file over to it's final location.
# Import the file over to it's final location.
# TODO: Also, handle the case where the move fails and write some code
# to possibly move the file to problem_files.
@ -50,52 +63,65 @@ class FileMoverAnalyzer(Analyzer):
final_file_path = import_directory
orig_file_basename, orig_file_extension = os.path.splitext(original_filename)
if "artist_name" in metadata:
final_file_path += "/" + metadata["artist_name"][0:max_dir_len] # truncating with array slicing
final_file_path += (
"/" + metadata["artist_name"][0:max_dir_len]
) # truncating with array slicing
if "album_title" in metadata:
final_file_path += "/" + metadata["album_title"][0:max_dir_len]
# Note that orig_file_extension includes the "." already
final_file_path += "/" + orig_file_basename[0:max_file_len] + orig_file_extension
final_file_path += (
"/" + orig_file_basename[0:max_file_len] + orig_file_extension
)
#Ensure any redundant slashes are stripped
# Ensure any redundant slashes are stripped
final_file_path = os.path.normpath(final_file_path)
#If a file with the same name already exists in the "import" directory, then
#we add a unique string to the end of this one. We never overwrite a file on import
#because if we did that, it would mean Airtime's database would have
#the wrong information for the file we just overwrote (eg. the song length would be wrong!)
#If the final file path is the same as the file we've been told to import (which
#you often do when you're debugging), then don't move the file at all.
# If a file with the same name already exists in the "import" directory, then
# we add a unique string to the end of this one. We never overwrite a file on import
# because if we did that, it would mean Airtime's database would have
# the wrong information for the file we just overwrote (eg. the song length would be wrong!)
# If the final file path is the same as the file we've been told to import (which
# you often do when you're debugging), then don't move the file at all.
if os.path.exists(final_file_path):
if os.path.samefile(audio_file_path, final_file_path):
metadata["full_path"] = final_file_path
return metadata
base_file_path, file_extension = os.path.splitext(final_file_path)
final_file_path = "%s_%s%s" % (base_file_path, time.strftime("%m-%d-%Y-%H-%M-%S", time.localtime()), file_extension)
final_file_path = "%s_%s%s" % (
base_file_path,
time.strftime("%m-%d-%Y-%H-%M-%S", time.localtime()),
file_extension,
)
#If THAT path exists, append a UUID instead:
# If THAT path exists, append a UUID instead:
while os.path.exists(final_file_path):
base_file_path, file_extension = os.path.splitext(final_file_path)
final_file_path = "%s_%s%s" % (base_file_path, str(uuid.uuid4()), file_extension)
final_file_path = "%s_%s%s" % (
base_file_path,
str(uuid.uuid4()),
file_extension,
)
#Ensure the full path to the file exists
# Ensure the full path to the file exists
mkdir_p(os.path.dirname(final_file_path))
#Move the file into its final destination directory
# Move the file into its final destination directory
logging.debug("Moving %s to %s" % (audio_file_path, final_file_path))
shutil.move(audio_file_path, final_file_path)
metadata["full_path"] = final_file_path
return metadata
def mkdir_p(path):
""" Make all directories in a tree (like mkdir -p)"""
"""Make all directories in a tree (like mkdir -p)"""
if path == "":
return
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
else:
raise

View File

@ -5,8 +5,8 @@ import json
import time
import select
import signal
import logging
import multiprocessing
import logging
import multiprocessing
import queue
from .analyzer_pipeline import AnalyzerPipeline
from .status_reporter import StatusReporter
@ -54,29 +54,30 @@ QUEUE = "airtime-uploads"
So that is a quick overview of the design constraints for this application, and
why airtime_analyzer is written this way.
"""
class MessageListener:
class MessageListener:
def __init__(self, rmq_config):
''' Start listening for file upload notification messages
from RabbitMQ
Keyword arguments:
rmq_config: A ConfigParser object containing the [rabbitmq] configuration.
'''
"""Start listening for file upload notification messages
from RabbitMQ
Keyword arguments:
rmq_config: A ConfigParser object containing the [rabbitmq] configuration.
"""
self._shutdown = False
# Read the RabbitMQ connection settings from the rmq_config file
# The exceptions throw here by default give good error messages.
# The exceptions throw here by default give good error messages.
RMQ_CONFIG_SECTION = "rabbitmq"
self._host = rmq_config.get(RMQ_CONFIG_SECTION, 'host')
self._port = rmq_config.getint(RMQ_CONFIG_SECTION, 'port')
self._username = rmq_config.get(RMQ_CONFIG_SECTION, 'user')
self._password = rmq_config.get(RMQ_CONFIG_SECTION, 'password')
self._vhost = rmq_config.get(RMQ_CONFIG_SECTION, 'vhost')
self._host = rmq_config.get(RMQ_CONFIG_SECTION, "host")
self._port = rmq_config.getint(RMQ_CONFIG_SECTION, "port")
self._username = rmq_config.get(RMQ_CONFIG_SECTION, "user")
self._password = rmq_config.get(RMQ_CONFIG_SECTION, "password")
self._vhost = rmq_config.get(RMQ_CONFIG_SECTION, "vhost")
# Set up a signal handler so we can shutdown gracefully
# For some reason, this signal handler must be set up here. I'd rather
# For some reason, this signal handler must be set up here. I'd rather
# put it in AirtimeAnalyzerServer, but it doesn't work there (something to do
# with pika's SIGTERM handler interfering with it, I think...)
signal.signal(signal.SIGTERM, self.graceful_shutdown)
@ -86,9 +87,9 @@ class MessageListener:
self.connect_to_messaging_server()
self.wait_for_messages()
except (KeyboardInterrupt, SystemExit):
break # Break out of the while loop and exit the application
break # Break out of the while loop and exit the application
except select.error:
pass
pass
except pika.exceptions.AMQPError as e:
if self._shutdown:
break
@ -100,27 +101,37 @@ class MessageListener:
self.disconnect_from_messaging_server()
logging.info("Exiting cleanly.")
def connect_to_messaging_server(self):
'''Connect to the RabbitMQ server and start listening for messages.'''
self._connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._host,
port=self._port, virtual_host=self._vhost,
credentials=pika.credentials.PlainCredentials(self._username, self._password)))
"""Connect to the RabbitMQ server and start listening for messages."""
self._connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=self._host,
port=self._port,
virtual_host=self._vhost,
credentials=pika.credentials.PlainCredentials(
self._username, self._password
),
)
)
self._channel = self._connection.channel()
self._channel.exchange_declare(exchange=EXCHANGE, exchange_type=EXCHANGE_TYPE, durable=True)
self._channel.exchange_declare(
exchange=EXCHANGE, exchange_type=EXCHANGE_TYPE, durable=True
)
result = self._channel.queue_declare(queue=QUEUE, durable=True)
self._channel.queue_bind(exchange=EXCHANGE, queue=QUEUE, routing_key=ROUTING_KEY)
self._channel.queue_bind(
exchange=EXCHANGE, queue=QUEUE, routing_key=ROUTING_KEY
)
logging.info(" Listening for messages...")
self._channel.basic_consume(QUEUE, self.msg_received_callback, auto_ack=False)
def wait_for_messages(self):
'''Wait until we've received a RabbitMQ message.'''
"""Wait until we've received a RabbitMQ message."""
self._channel.start_consuming()
def disconnect_from_messaging_server(self):
'''Stop consuming RabbitMQ messages and disconnect'''
"""Stop consuming RabbitMQ messages and disconnect"""
# If you try to close a connection that's already closed, you're going to have a bad time.
# We're breaking EAFP because this can be called multiple times depending on exception
# handling flow here.
@ -128,43 +139,45 @@ class MessageListener:
self._channel.stop_consuming()
if not self._connection.is_closed and not self._connection.is_closing:
self._connection.close()
def graceful_shutdown(self, signum, frame):
'''Disconnect and break out of the message listening loop'''
"""Disconnect and break out of the message listening loop"""
self._shutdown = True
self.disconnect_from_messaging_server()
def msg_received_callback(self, channel, method_frame, header_frame, body):
''' A callback method that runs when a RabbitMQ message is received.
Here we parse the message, spin up an analyzer process, and report the
metadata back to the Airtime web application (or report an error).
'''
logging.info(" - Received '%s' on routing_key '%s'" % (body, method_frame.routing_key))
#Declare all variables here so they exist in the exception handlers below, no matter what.
"""A callback method that runs when a RabbitMQ message is received.
Here we parse the message, spin up an analyzer process, and report the
metadata back to the Airtime web application (or report an error).
"""
logging.info(
" - Received '%s' on routing_key '%s'" % (body, method_frame.routing_key)
)
# Declare all variables here so they exist in the exception handlers below, no matter what.
audio_file_path = ""
#final_file_path = ""
# final_file_path = ""
import_directory = ""
original_filename = ""
callback_url = ""
api_key = ""
callback_url = ""
api_key = ""
file_prefix = ""
''' Spin up a worker process. We use the multiprocessing module and multiprocessing.Queue
""" Spin up a worker process. We use the multiprocessing module and multiprocessing.Queue
to pass objects between the processes so that if the analyzer process crashes, it does not
take down the rest of the daemon and we NACK that message so that it doesn't get
propagated to other airtime_analyzer daemons (eg. running on other servers).
We avoid cascading failure this way.
'''
"""
try:
try:
body = body.decode()
except (UnicodeDecodeError, AttributeError):
pass
msg_dict = json.loads(body)
api_key = msg_dict["api_key"]
callback_url = msg_dict["callback_url"]
api_key = msg_dict["api_key"]
callback_url = msg_dict["callback_url"]
audio_file_path = msg_dict["tmp_file_path"]
import_directory = msg_dict["import_directory"]
@ -172,48 +185,71 @@ class MessageListener:
file_prefix = msg_dict["file_prefix"]
storage_backend = msg_dict["storage_backend"]
audio_metadata = MessageListener.spawn_analyzer_process(audio_file_path, import_directory, original_filename, storage_backend, file_prefix)
StatusReporter.report_success_to_callback_url(callback_url, api_key, audio_metadata)
audio_metadata = MessageListener.spawn_analyzer_process(
audio_file_path,
import_directory,
original_filename,
storage_backend,
file_prefix,
)
StatusReporter.report_success_to_callback_url(
callback_url, api_key, audio_metadata
)
except KeyError as e:
# A field in msg_dict that we needed was missing (eg. audio_file_path)
logging.exception("A mandatory airtime_analyzer message field was missing from the message.")
logging.exception(
"A mandatory airtime_analyzer message field was missing from the message."
)
# See the huge comment about NACK below.
channel.basic_nack(delivery_tag=method_frame.delivery_tag, multiple=False,
requeue=False) #Important that it doesn't requeue the message
channel.basic_nack(
delivery_tag=method_frame.delivery_tag, multiple=False, requeue=False
) # Important that it doesn't requeue the message
except Exception as e:
logging.exception(e)
''' If ANY exception happens while processing a file, we're going to NACK to the
""" If ANY exception happens while processing a file, we're going to NACK to the
messaging server and tell it to remove the message from the queue.
(NACK is a negative acknowledgement. We could use ACK instead, but this might come
in handy in the future.)
Exceptions in this context are unexpected, unhandled errors. We try to recover
from as many errors as possible in AnalyzerPipeline, but we're safeguarding ourselves
here from any catastrophic or genuinely unexpected errors:
'''
channel.basic_nack(delivery_tag=method_frame.delivery_tag, multiple=False,
requeue=False) #Important that it doesn't requeue the message
"""
channel.basic_nack(
delivery_tag=method_frame.delivery_tag, multiple=False, requeue=False
) # Important that it doesn't requeue the message
#
# TODO: If the JSON was invalid or the web server is down,
# TODO: If the JSON was invalid or the web server is down,
# then don't report that failure to the REST API
#TODO: Catch exceptions from this HTTP request too:
if callback_url: # If we got an invalid message, there might be no callback_url in the JSON
# TODO: Catch exceptions from this HTTP request too:
if (
callback_url
): # If we got an invalid message, there might be no callback_url in the JSON
# Report this as a failed upload to the File Upload REST API.
StatusReporter.report_failure_to_callback_url(callback_url, api_key, import_status=2,
reason='An error occurred while importing this file')
StatusReporter.report_failure_to_callback_url(
callback_url,
api_key,
import_status=2,
reason="An error occurred while importing this file",
)
else:
# ACK at the very end, after the message has been successfully processed.
# If we don't ack, then RabbitMQ will redeliver the message in the future.
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
@staticmethod
def spawn_analyzer_process(audio_file_path, import_directory, original_filename, storage_backend, file_prefix):
''' Spawn a child process to analyze and import a new audio file. '''
'''
def spawn_analyzer_process(
audio_file_path,
import_directory,
original_filename,
storage_backend,
file_prefix,
):
"""Spawn a child process to analyze and import a new audio file."""
"""
q = multiprocessing.Queue()
p = multiprocessing.Process(target=AnalyzerPipeline.run_analysis,
args=(q, audio_file_path, import_directory, original_filename, storage_backend, file_prefix))
@ -225,12 +261,19 @@ class MessageListener:
logging.info(results)
else:
raise Exception("Analyzer process terminated unexpectedly.")
'''
"""
metadata = {}
q = queue.Queue()
try:
AnalyzerPipeline.run_analysis(q, audio_file_path, import_directory, original_filename, storage_backend, file_prefix)
AnalyzerPipeline.run_analysis(
q,
audio_file_path,
import_directory,
original_filename,
storage_backend,
file_prefix,
)
metadata = q.get()
except Exception as e:
logging.error("Analyzer pipeline exception: %s" % str(e))
@ -241,4 +284,3 @@ class MessageListener:
q.get()
return metadata

View File

@ -9,32 +9,36 @@ import os
import hashlib
from .analyzer import Analyzer
class MetadataAnalyzer(Analyzer):
class MetadataAnalyzer(Analyzer):
@staticmethod
def analyze(filename, metadata):
''' Extract audio metadata from tags embedded in the file (eg. ID3 tags)
"""Extract audio metadata from tags embedded in the file (eg. ID3 tags)
Keyword arguments:
filename: The path to the audio file to extract metadata from.
metadata: A dictionary that the extracted metadata will be added to.
'''
Keyword arguments:
filename: The path to the audio file to extract metadata from.
metadata: A dictionary that the extracted metadata will be added to.
"""
if not isinstance(filename, str):
raise TypeError("filename must be string. Was of type " + type(filename).__name__)
raise TypeError(
"filename must be string. Was of type " + type(filename).__name__
)
if not isinstance(metadata, dict):
raise TypeError("metadata must be a dict. Was of type " + type(metadata).__name__)
raise TypeError(
"metadata must be a dict. Was of type " + type(metadata).__name__
)
if not os.path.exists(filename):
raise FileNotFoundError("audio file not found: {}".format(filename))
#Airtime <= 2.5.x nonsense:
# Airtime <= 2.5.x nonsense:
metadata["ftype"] = "audioclip"
#Other fields we'll want to set for Airtime:
# Other fields we'll want to set for Airtime:
metadata["hidden"] = False
# Get file size and md5 hash of the file
metadata["filesize"] = os.path.getsize(filename)
with open(filename, 'rb') as fh:
with open(filename, "rb") as fh:
m = hashlib.md5()
while True:
data = fh.read(8192)
@ -46,37 +50,41 @@ class MetadataAnalyzer(Analyzer):
# Mutagen doesn't handle WAVE files so we use a different package
ms = magic.open(magic.MIME_TYPE)
ms.load()
with open(filename, 'rb') as fh:
with open(filename, "rb") as fh:
mime_check = ms.buffer(fh.read(2014))
metadata["mime"] = mime_check
if mime_check == 'audio/x-wav':
if mime_check == "audio/x-wav":
return MetadataAnalyzer._analyze_wave(filename, metadata)
#Extract metadata from an audio file using mutagen
# Extract metadata from an audio file using mutagen
audio_file = mutagen.File(filename, easy=True)
#Bail if the file couldn't be parsed. The title should stay as the filename
#inside Airtime.
if audio_file == None: # Don't use "if not" here. It is wrong due to mutagen's design.
# Bail if the file couldn't be parsed. The title should stay as the filename
# inside Airtime.
if (
audio_file == None
): # Don't use "if not" here. It is wrong due to mutagen's design.
return metadata
# Note that audio_file can equal {} if the file is valid but there's no metadata tags.
# We can still try to grab the info variables below.
#Grab other file information that isn't encoded in a tag, but instead usually
#in the file header. Mutagen breaks that out into a separate "info" object:
# Grab other file information that isn't encoded in a tag, but instead usually
# in the file header. Mutagen breaks that out into a separate "info" object:
info = audio_file.info
if hasattr(info, "sample_rate"): # Mutagen is annoying and inconsistent
if hasattr(info, "sample_rate"): # Mutagen is annoying and inconsistent
metadata["sample_rate"] = info.sample_rate
if hasattr(info, "length"):
metadata["length_seconds"] = info.length
#Converting the length in seconds (float) to a formatted time string
# Converting the length in seconds (float) to a formatted time string
track_length = datetime.timedelta(seconds=info.length)
metadata["length"] = str(track_length) #time.strftime("%H:%M:%S.%f", track_length)
metadata["length"] = str(
track_length
) # time.strftime("%H:%M:%S.%f", track_length)
# Other fields for Airtime
metadata["cueout"] = metadata["length"]
# Set a default cue in time in seconds
metadata["cuein"] = 0.0;
metadata["cuein"] = 0.0
if hasattr(info, "bitrate"):
metadata["bit_rate"] = info.bitrate
@ -86,11 +94,11 @@ class MetadataAnalyzer(Analyzer):
if audio_file.mime:
metadata["mime"] = audio_file.mime[0]
#Try to get the number of channels if mutagen can...
# Try to get the number of channels if mutagen can...
try:
#Special handling for getting the # of channels from MP3s. It's in the "mode" field
#which is 0=Stereo, 1=Joint Stereo, 2=Dual Channel, 3=Mono. Part of the ID3 spec...
if metadata["mime"] in ["audio/mpeg", 'audio/mp3']:
# Special handling for getting the # of channels from MP3s. It's in the "mode" field
# which is 0=Stereo, 1=Joint Stereo, 2=Dual Channel, 3=Mono. Part of the ID3 spec...
if metadata["mime"] in ["audio/mpeg", "audio/mp3"]:
if info.mode == 3:
metadata["channels"] = 1
else:
@ -98,54 +106,54 @@ class MetadataAnalyzer(Analyzer):
else:
metadata["channels"] = info.channels
except (AttributeError, KeyError):
#If mutagen can't figure out the number of channels, we'll just leave it out...
# If mutagen can't figure out the number of channels, we'll just leave it out...
pass
#Try to extract the number of tracks on the album if we can (the "track total")
# Try to extract the number of tracks on the album if we can (the "track total")
try:
track_number = audio_file["tracknumber"]
if isinstance(track_number, list): # Sometimes tracknumber is a list, ugh
if isinstance(track_number, list): # Sometimes tracknumber is a list, ugh
track_number = track_number[0]
track_number_tokens = track_number
if '/' in track_number:
track_number_tokens = track_number.split('/')
if "/" in track_number:
track_number_tokens = track_number.split("/")
track_number = track_number_tokens[0]
elif '-' in track_number:
track_number_tokens = track_number.split('-')
elif "-" in track_number:
track_number_tokens = track_number.split("-")
track_number = track_number_tokens[0]
metadata["track_number"] = track_number
track_total = track_number_tokens[1]
metadata["track_total"] = track_total
except (AttributeError, KeyError, IndexError):
#If we couldn't figure out the track_number or track_total, just ignore it...
# If we couldn't figure out the track_number or track_total, just ignore it...
pass
#We normalize the mutagen tags slightly here, so in case mutagen changes,
#we find the
# We normalize the mutagen tags slightly here, so in case mutagen changes,
# we find the
mutagen_to_airtime_mapping = {
'title': 'track_title',
'artist': 'artist_name',
'album': 'album_title',
'bpm': 'bpm',
'composer': 'composer',
'conductor': 'conductor',
'copyright': 'copyright',
'comment': 'comment',
'encoded_by': 'encoder',
'genre': 'genre',
'isrc': 'isrc',
'label': 'label',
'organization': 'label',
"title": "track_title",
"artist": "artist_name",
"album": "album_title",
"bpm": "bpm",
"composer": "composer",
"conductor": "conductor",
"copyright": "copyright",
"comment": "comment",
"encoded_by": "encoder",
"genre": "genre",
"isrc": "isrc",
"label": "label",
"organization": "label",
#'length': 'length',
'language': 'language',
'last_modified':'last_modified',
'mood': 'mood',
'bit_rate': 'bit_rate',
'replay_gain': 'replaygain',
"language": "language",
"last_modified": "last_modified",
"mood": "mood",
"bit_rate": "bit_rate",
"replay_gain": "replaygain",
#'tracknumber': 'track_number',
#'track_total': 'track_total',
'website': 'website',
'date': 'year',
"website": "website",
"date": "year",
#'mime_type': 'mime',
}
@ -158,7 +166,7 @@ class MetadataAnalyzer(Analyzer):
if isinstance(metadata[airtime_tag], list):
if metadata[airtime_tag]:
metadata[airtime_tag] = metadata[airtime_tag][0]
else: # Handle empty lists
else: # Handle empty lists
metadata[airtime_tag] = ""
except KeyError:
@ -169,13 +177,15 @@ class MetadataAnalyzer(Analyzer):
@staticmethod
def _analyze_wave(filename, metadata):
try:
reader = wave.open(filename, 'rb')
reader = wave.open(filename, "rb")
metadata["channels"] = reader.getnchannels()
metadata["sample_rate"] = reader.getframerate()
length_seconds = float(reader.getnframes()) / float(metadata["sample_rate"])
#Converting the length in seconds (float) to a formatted time string
# Converting the length in seconds (float) to a formatted time string
track_length = datetime.timedelta(seconds=length_seconds)
metadata["length"] = str(track_length) #time.strftime("%H:%M:%S.%f", track_length)
metadata["length"] = str(
track_length
) # time.strftime("%H:%M:%S.%f", track_length)
metadata["length_seconds"] = length_seconds
metadata["cueout"] = metadata["length"]
except wave.Error as ex:

View File

@ -1,32 +1,47 @@
# -*- coding: utf-8 -*-
__author__ = 'asantoni'
__author__ = "asantoni"
import subprocess
import logging
from .analyzer import Analyzer
class UnplayableFileError(Exception):
pass
class PlayabilityAnalyzer(Analyzer):
''' This class checks if a file can actually be played with Liquidsoap. '''
LIQUIDSOAP_EXECUTABLE = 'liquidsoap'
class PlayabilityAnalyzer(Analyzer):
"""This class checks if a file can actually be played with Liquidsoap."""
LIQUIDSOAP_EXECUTABLE = "liquidsoap"
@staticmethod
def analyze(filename, metadata):
''' Checks if a file can be played by Liquidsoap.
"""Checks if a file can be played by Liquidsoap.
:param filename: The full path to the file to analyzer
:param metadata: A metadata dictionary where the results will be put
:return: The metadata dictionary
'''
command = [PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE, '-v', '-c', "output.dummy(audio_to_stereo(single(argv(1))))", '--', filename]
"""
command = [
PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE,
"-v",
"-c",
"output.dummy(audio_to_stereo(single(argv(1))))",
"--",
filename,
]
try:
subprocess.check_output(command, stderr=subprocess.STDOUT, close_fds=True)
except OSError as e: # liquidsoap was not found
logging.warn("Failed to run: %s - %s. %s" % (command[0], e.strerror, "Do you have liquidsoap installed?"))
except (subprocess.CalledProcessError, Exception) as e: # liquidsoap returned an error code
except OSError as e: # liquidsoap was not found
logging.warn(
"Failed to run: %s - %s. %s"
% (command[0], e.strerror, "Do you have liquidsoap installed?")
)
except (
subprocess.CalledProcessError,
Exception,
) as e: # liquidsoap returned an error code
logging.warn(e)
raise UnplayableFileError()

View File

@ -6,30 +6,39 @@ import re
class ReplayGainAnalyzer(Analyzer):
''' This class extracts the ReplayGain using a tool from the python-rgain package. '''
"""This class extracts the ReplayGain using a tool from the python-rgain package."""
REPLAYGAIN_EXECUTABLE = 'replaygain' # From the rgain3 python package
REPLAYGAIN_EXECUTABLE = "replaygain" # From the rgain3 python package
@staticmethod
def analyze(filename, metadata):
''' Extracts the Replaygain loudness normalization factor of a track.
"""Extracts the Replaygain loudness normalization factor of a track.
:param filename: The full path to the file to analyzer
:param metadata: A metadata dictionary where the results will be put
:return: The metadata dictionary
'''
''' The -d flag means do a dry-run, ie. don't modify the file directly.
'''
command = [ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE, '-d', filename]
"""
""" The -d flag means do a dry-run, ie. don't modify the file directly.
"""
command = [ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE, "-d", filename]
try:
results = subprocess.check_output(command, stderr=subprocess.STDOUT,
close_fds=True, universal_newlines=True)
gain_match = r'Calculating Replay Gain information \.\.\.(?:\n|.)*?:([\d.-]*) dB'
results = subprocess.check_output(
command,
stderr=subprocess.STDOUT,
close_fds=True,
universal_newlines=True,
)
gain_match = (
r"Calculating Replay Gain information \.\.\.(?:\n|.)*?:([\d.-]*) dB"
)
replaygain = re.search(gain_match, results).group(1)
metadata['replay_gain'] = float(replaygain)
metadata["replay_gain"] = float(replaygain)
except OSError as e: # replaygain was not found
logging.warn("Failed to run: %s - %s. %s" % (command[0], e.strerror, "Do you have python-rgain installed?"))
except subprocess.CalledProcessError as e: # replaygain returned an error code
except OSError as e: # replaygain was not found
logging.warn(
"Failed to run: %s - %s. %s"
% (command[0], e.strerror, "Do you have python-rgain installed?")
)
except subprocess.CalledProcessError as e: # replaygain returned an error code
logging.warn("%s %s %s", e.cmd, e.output, e.returncode)
except Exception as e:
logging.warn(e)

View File

@ -7,14 +7,15 @@ import queue
import time
import traceback
import pickle
import threading
import threading
from urllib.parse import urlparse
# Disable urllib3 warnings because these can cause a rare deadlock due to Python 2's crappy internal non-reentrant locking
# around POSIX stuff. See SAAS-714. The hasattr() is for compatibility with older versions of requests.
if hasattr(requests, 'packages'):
if hasattr(requests, "packages"):
requests.packages.urllib3.disable_warnings()
class PicklableHttpRequest:
def __init__(self, method, url, data, api_key):
self.method = method
@ -23,18 +24,23 @@ class PicklableHttpRequest:
self.api_key = api_key
def create_request(self):
return requests.Request(method=self.method, url=self.url, data=self.data,
auth=requests.auth.HTTPBasicAuth(self.api_key, ''))
return requests.Request(
method=self.method,
url=self.url,
data=self.data,
auth=requests.auth.HTTPBasicAuth(self.api_key, ""),
)
def process_http_requests(ipc_queue, http_retry_queue_path):
''' Runs in a separate thread and performs all the HTTP requests where we're
reporting extracted audio file metadata or errors back to the Airtime web application.
"""Runs in a separate thread and performs all the HTTP requests where we're
reporting extracted audio file metadata or errors back to the Airtime web application.
This process also checks every 5 seconds if there's failed HTTP requests that we
need to retry. We retry failed HTTP requests so that we don't lose uploads if the
web server is temporarily down.
This process also checks every 5 seconds if there's failed HTTP requests that we
need to retry. We retry failed HTTP requests so that we don't lose uploads if the
web server is temporarily down.
'''
"""
# Store any failed requests (eg. due to web server errors or downtime) to be
# retried later:
@ -45,7 +51,7 @@ def process_http_requests(ipc_queue, http_retry_queue_path):
# if airtime_analyzer is shut down while the web server is down or unreachable,
# and there were failed HTTP requests pending, waiting to be retried.
try:
with open(http_retry_queue_path, 'rb') as pickle_file:
with open(http_retry_queue_path, "rb") as pickle_file:
retry_queue = pickle.load(pickle_file)
except IOError as e:
if e.errno == 2:
@ -64,11 +70,16 @@ def process_http_requests(ipc_queue, http_retry_queue_path):
while not shutdown:
try:
request = ipc_queue.get(block=True, timeout=5)
if isinstance(request, str) and request == "shutdown": # Bit of a cheat
if (
isinstance(request, str) and request == "shutdown"
): # Bit of a cheat
shutdown = True
break
if not isinstance(request, PicklableHttpRequest):
raise TypeError("request must be a PicklableHttpRequest. Was of type " + type(request).__name__)
raise TypeError(
"request must be a PicklableHttpRequest. Was of type "
+ type(request).__name__
)
except queue.Empty:
request = None
@ -85,32 +96,40 @@ def process_http_requests(ipc_queue, http_retry_queue_path):
logging.info("Shutting down status_reporter")
# Pickle retry_queue to disk so that we don't lose uploads if we're shut down while
# while the web server is down or unreachable.
with open(http_retry_queue_path, 'wb') as pickle_file:
with open(http_retry_queue_path, "wb") as pickle_file:
pickle.dump(retry_queue, pickle_file)
return
except Exception as e: # Terrible top-level exception handler to prevent the thread from dying, just in case.
except Exception as e: # Terrible top-level exception handler to prevent the thread from dying, just in case.
if shutdown:
return
logging.exception("Unhandled exception in StatusReporter")
logging.exception(e)
logging.info("Restarting StatusReporter thread")
time.sleep(2) # Throttle it
time.sleep(2) # Throttle it
def send_http_request(picklable_request, retry_queue):
if not isinstance(picklable_request, PicklableHttpRequest):
raise TypeError("picklable_request must be a PicklableHttpRequest. Was of type " + type(picklable_request).__name__)
try:
raise TypeError(
"picklable_request must be a PicklableHttpRequest. Was of type "
+ type(picklable_request).__name__
)
try:
bare_request = picklable_request.create_request()
s = requests.Session()
prepared_request = s.prepare_request(bare_request)
r = s.send(prepared_request, timeout=StatusReporter._HTTP_REQUEST_TIMEOUT, verify=False) # SNI is a pain in the ass
r.raise_for_status() # Raise an exception if there was an http error code returned
r = s.send(
prepared_request, timeout=StatusReporter._HTTP_REQUEST_TIMEOUT, verify=False
) # SNI is a pain in the ass
r.raise_for_status() # Raise an exception if there was an http error code returned
logging.info("HTTP request sent successfully.")
except requests.exceptions.HTTPError as e:
if e.response.status_code == 422:
# Do no retry the request if there was a metadata validation error
logging.error("HTTP request failed due to an HTTP exception. Exception was: %s" % str(e))
logging.error(
"HTTP request failed due to an HTTP exception. Exception was: %s"
% str(e)
)
else:
# The request failed with an error 500 probably, so let's check if Airtime and/or
# the web server are broken. If not, then our request was probably causing an
@ -124,8 +143,10 @@ def send_http_request(picklable_request, retry_queue):
# You will have to find these bad requests in logs or you'll be
# notified by sentry.
except requests.exceptions.ConnectionError as e:
logging.error("HTTP request failed due to a connection error. Retrying later. %s" % str(e))
retry_queue.append(picklable_request) # Retry it later
logging.error(
"HTTP request failed due to a connection error. Retrying later. %s" % str(e)
)
retry_queue.append(picklable_request) # Retry it later
except Exception as e:
logging.error("HTTP request failed with unhandled exception. %s" % str(e))
logging.error(traceback.format_exc())
@ -134,12 +155,13 @@ def send_http_request(picklable_request, retry_queue):
# that breaks our code. I don't want us pickling data that potentially
# breaks airtime_analyzer.
def is_web_server_broken(url):
''' Do a naive test to check if the web server we're trying to access is down.
We use this to try to differentiate between error 500s that are coming
from (for example) a bug in the Airtime Media REST API and error 500s
caused by Airtime or the webserver itself being broken temporarily.
'''
"""Do a naive test to check if the web server we're trying to access is down.
We use this to try to differentiate between error 500s that are coming
from (for example) a bug in the Airtime Media REST API and error 500s
caused by Airtime or the webserver itself being broken temporarily.
"""
try:
test_req = requests.get(url, verify=False)
test_req.raise_for_status()
@ -147,35 +169,38 @@ def is_web_server_broken(url):
return True
else:
# The request worked fine, so the web server and Airtime are still up.
return False
return False
return False
class StatusReporter():
''' Reports the extracted audio file metadata and job status back to the
Airtime web application.
'''
class StatusReporter:
"""Reports the extracted audio file metadata and job status back to the
Airtime web application.
"""
_HTTP_REQUEST_TIMEOUT = 30
''' We use multiprocessing.Process again here because we need a thread for this stuff
""" We use multiprocessing.Process again here because we need a thread for this stuff
anyways, and Python gives us process isolation for free (crash safety).
'''
"""
_ipc_queue = queue.Queue()
#_http_thread = multiprocessing.Process(target=process_http_requests,
# _http_thread = multiprocessing.Process(target=process_http_requests,
# args=(_ipc_queue,))
_http_thread = None
@classmethod
def start_thread(self, http_retry_queue_path):
StatusReporter._http_thread = threading.Thread(target=process_http_requests,
args=(StatusReporter._ipc_queue,http_retry_queue_path))
StatusReporter._http_thread = threading.Thread(
target=process_http_requests,
args=(StatusReporter._ipc_queue, http_retry_queue_path),
)
StatusReporter._http_thread.start()
@classmethod
def stop_thread(self):
logging.info("Terminating status_reporter process")
#StatusReporter._http_thread.terminate() # Triggers SIGTERM on the child process
StatusReporter._ipc_queue.put("shutdown") # Special trigger
# StatusReporter._http_thread.terminate() # Triggers SIGTERM on the child process
StatusReporter._ipc_queue.put("shutdown") # Special trigger
StatusReporter._http_thread.join()
@classmethod
@ -184,30 +209,33 @@ class StatusReporter():
@classmethod
def report_success_to_callback_url(self, callback_url, api_key, audio_metadata):
''' Report the extracted metadata and status of the successfully imported file
to the callback URL (which should be the Airtime File Upload API)
'''
"""Report the extracted metadata and status of the successfully imported file
to the callback URL (which should be the Airtime File Upload API)
"""
put_payload = json.dumps(audio_metadata)
#r = requests.Request(method='PUT', url=callback_url, data=put_payload,
# r = requests.Request(method='PUT', url=callback_url, data=put_payload,
# auth=requests.auth.HTTPBasicAuth(api_key, ''))
'''
"""
r = requests.Request(method='PUT', url=callback_url, data=put_payload,
auth=requests.auth.HTTPBasicAuth(api_key, ''))
StatusReporter._send_http_request(r)
'''
"""
StatusReporter._send_http_request(PicklableHttpRequest(method='PUT', url=callback_url,
data=put_payload, api_key=api_key))
StatusReporter._send_http_request(
PicklableHttpRequest(
method="PUT", url=callback_url, data=put_payload, api_key=api_key
)
)
'''
"""
try:
r.raise_for_status() # Raise an exception if there was an http error code returned
except requests.exceptions.RequestException:
StatusReporter._ipc_queue.put(r.prepare())
'''
"""
'''
"""
# Encode the audio metadata as json and post it back to the callback_url
put_payload = json.dumps(audio_metadata)
logging.debug("sending http put with payload: " + put_payload)
@ -219,31 +247,38 @@ class StatusReporter():
#TODO: queue up failed requests and try them again later.
r.raise_for_status() # Raise an exception if there was an http error code returned
'''
"""
@classmethod
def report_failure_to_callback_url(self, callback_url, api_key, import_status, reason):
if not isinstance(import_status, int ):
raise TypeError("import_status must be an integer. Was of type " + type(import_status).__name__)
def report_failure_to_callback_url(
self, callback_url, api_key, import_status, reason
):
if not isinstance(import_status, int):
raise TypeError(
"import_status must be an integer. Was of type "
+ type(import_status).__name__
)
logging.debug("Reporting import failure to Airtime REST API...")
audio_metadata = dict()
audio_metadata["import_status"] = import_status
audio_metadata["comment"] = reason # hack attack
put_payload = json.dumps(audio_metadata)
#logging.debug("sending http put with payload: " + put_payload)
'''
# logging.debug("sending http put with payload: " + put_payload)
"""
r = requests.put(callback_url, data=put_payload,
auth=requests.auth.HTTPBasicAuth(api_key, ''),
timeout=StatusReporter._HTTP_REQUEST_TIMEOUT)
'''
StatusReporter._send_http_request(PicklableHttpRequest(method='PUT', url=callback_url,
data=put_payload, api_key=api_key))
'''
"""
StatusReporter._send_http_request(
PicklableHttpRequest(
method="PUT", url=callback_url, data=put_payload, api_key=api_key
)
)
"""
logging.debug("HTTP request returned status: " + str(r.status_code))
logging.debug(r.text) # log the response body
#TODO: queue up failed requests and try them again later.
r.raise_for_status() # raise an exception if there was an http error code returned
'''
"""

View File

@ -2,12 +2,14 @@
from nose.tools import *
import airtime_analyzer
def setup():
pass
def teardown():
pass
def test_basic():
pass

View File

@ -8,48 +8,58 @@ import datetime
from airtime_analyzer.analyzer_pipeline import AnalyzerPipeline
from airtime_analyzer import config_file
DEFAULT_AUDIO_FILE = u'tests/test_data/44100Hz-16bit-mono.mp3'
DEFAULT_IMPORT_DEST = u'Test Artist/Test Album/44100Hz-16bit-mono.mp3'
DEFAULT_AUDIO_FILE = u"tests/test_data/44100Hz-16bit-mono.mp3"
DEFAULT_IMPORT_DEST = u"Test Artist/Test Album/44100Hz-16bit-mono.mp3"
def setup():
pass
def teardown():
#Move the file back
# Move the file back
shutil.move(DEFAULT_IMPORT_DEST, DEFAULT_AUDIO_FILE)
assert os.path.exists(DEFAULT_AUDIO_FILE)
def test_basic():
filename = os.path.basename(DEFAULT_AUDIO_FILE)
q = Queue()
file_prefix = u''
file_prefix = u""
storage_backend = "file"
#This actually imports the file into the "./Test Artist" directory.
AnalyzerPipeline.run_analysis(q, DEFAULT_AUDIO_FILE, u'.', filename, storage_backend, file_prefix)
# This actually imports the file into the "./Test Artist" directory.
AnalyzerPipeline.run_analysis(
q, DEFAULT_AUDIO_FILE, u".", filename, storage_backend, file_prefix
)
metadata = q.get()
assert metadata['track_title'] == u'Test Title'
assert metadata['artist_name'] == u'Test Artist'
assert metadata['album_title'] == u'Test Album'
assert metadata['year'] == u'1999'
assert metadata['genre'] == u'Test Genre'
assert metadata['mime'] == 'audio/mp3' # Not unicode because MIMEs aren't.
assert abs(metadata['length_seconds'] - 3.9) < 0.1
assert metadata["length"] == str(datetime.timedelta(seconds=metadata["length_seconds"]))
assert metadata["track_title"] == u"Test Title"
assert metadata["artist_name"] == u"Test Artist"
assert metadata["album_title"] == u"Test Album"
assert metadata["year"] == u"1999"
assert metadata["genre"] == u"Test Genre"
assert metadata["mime"] == "audio/mp3" # Not unicode because MIMEs aren't.
assert abs(metadata["length_seconds"] - 3.9) < 0.1
assert metadata["length"] == str(
datetime.timedelta(seconds=metadata["length_seconds"])
)
assert os.path.exists(DEFAULT_IMPORT_DEST)
@raises(TypeError)
def test_wrong_type_queue_param():
AnalyzerPipeline.run_analysis(Queue(), u'', u'', u'')
AnalyzerPipeline.run_analysis(Queue(), u"", u"", u"")
@raises(TypeError)
def test_wrong_type_string_param2():
AnalyzerPipeline.run_analysis(Queue(), '', u'', u'')
AnalyzerPipeline.run_analysis(Queue(), "", u"", u"")
@raises(TypeError)
def test_wrong_type_string_param3():
AnalyzerPipeline.run_analysis(Queue(), u'', '', u'')
AnalyzerPipeline.run_analysis(Queue(), u"", "", u"")
@raises(TypeError)
def test_wrong_type_string_param4():
AnalyzerPipeline.run_analysis(Queue(), u'', u'', '')
AnalyzerPipeline.run_analysis(Queue(), u"", u"", "")

View File

@ -2,13 +2,16 @@
from nose.tools import *
from airtime_analyzer.analyzer import Analyzer
def setup():
pass
def teardown():
pass
@raises(NotImplementedError)
def test_analyze():
abstract_analyzer = Analyzer()
abstract_analyzer.analyze(u'foo', dict())
abstract_analyzer.analyze(u"foo", dict())

View File

@ -2,63 +2,97 @@
from nose.tools import *
from airtime_analyzer.cuepoint_analyzer import CuePointAnalyzer
def check_default_metadata(metadata):
''' Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect.
"""Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect.
:param metadata: a metadata dictionary
:return: Nothing
'''
"""
# We give silan some leeway here by specifying a tolerance
tolerance_seconds = 0.1
length_seconds = 3.9
assert abs(metadata['length_seconds'] - length_seconds) < tolerance_seconds
assert abs(float(metadata['cuein'])) < tolerance_seconds
assert abs(float(metadata['cueout']) - length_seconds) < tolerance_seconds
assert abs(metadata["length_seconds"] - length_seconds) < tolerance_seconds
assert abs(float(metadata["cuein"])) < tolerance_seconds
assert abs(float(metadata["cueout"]) - length_seconds) < tolerance_seconds
def test_missing_silan():
old_silan = CuePointAnalyzer.SILAN_EXECUTABLE
CuePointAnalyzer.SILAN_EXECUTABLE = 'foosdaf'
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
CuePointAnalyzer.SILAN_EXECUTABLE = old_silan # Need to put this back
CuePointAnalyzer.SILAN_EXECUTABLE = "foosdaf"
metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
)
CuePointAnalyzer.SILAN_EXECUTABLE = old_silan # Need to put this back
def test_invalid_filepath():
metadata = CuePointAnalyzer.analyze(u'non-existent-file', dict())
metadata = CuePointAnalyzer.analyze(u"non-existent-file", dict())
def test_mp3_utf8():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
)
check_default_metadata(metadata)
def test_mp3_dualmono():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-dualmono.mp3', dict())
metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-dualmono.mp3", dict()
)
check_default_metadata(metadata)
def test_mp3_jointstereo():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-jointstereo.mp3', dict())
metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-jointstereo.mp3", dict()
)
check_default_metadata(metadata)
def test_mp3_simplestereo():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-simplestereo.mp3', dict())
metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-simplestereo.mp3", dict()
)
check_default_metadata(metadata)
def test_mp3_stereo():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.mp3', dict())
metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.mp3", dict()
)
check_default_metadata(metadata)
def test_mp3_mono():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mono.mp3', dict())
metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-mono.mp3", dict()
)
check_default_metadata(metadata)
def test_ogg_stereo():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.ogg', dict())
metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.ogg", dict()
)
check_default_metadata(metadata)
def test_invalid_wma():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict())
metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo-invalid.wma", dict()
)
def test_m4a_stereo():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.m4a', dict())
metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.m4a", dict()
)
check_default_metadata(metadata)
def test_wav_stereo():
metadata = CuePointAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.wav', dict())
metadata = CuePointAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.wav", dict()
)
check_default_metadata(metadata)

View File

@ -8,109 +8,125 @@ import mock
from pprint import pprint
from airtime_analyzer.filemover_analyzer import FileMoverAnalyzer
DEFAULT_AUDIO_FILE = u'tests/test_data/44100Hz-16bit-mono.mp3'
DEFAULT_IMPORT_DEST = u'Test Artist/Test Album/44100Hz-16bit-mono.mp3'
DEFAULT_AUDIO_FILE = u"tests/test_data/44100Hz-16bit-mono.mp3"
DEFAULT_IMPORT_DEST = u"Test Artist/Test Album/44100Hz-16bit-mono.mp3"
def setup():
pass
def teardown():
pass
@raises(Exception)
def test_dont_use_analyze():
FileMoverAnalyzer.analyze(u'foo', dict())
FileMoverAnalyzer.analyze(u"foo", dict())
@raises(TypeError)
def test_move_wrong_string_param1():
FileMoverAnalyzer.move(42, '', '', dict())
FileMoverAnalyzer.move(42, "", "", dict())
@raises(TypeError)
def test_move_wrong_string_param2():
FileMoverAnalyzer.move(u'', 23, u'', dict())
FileMoverAnalyzer.move(u"", 23, u"", dict())
@raises(TypeError)
def test_move_wrong_string_param3():
FileMoverAnalyzer.move('', '', 5, dict())
FileMoverAnalyzer.move("", "", 5, dict())
@raises(TypeError)
def test_move_wrong_dict_param():
FileMoverAnalyzer.move('', '', '', 12345)
FileMoverAnalyzer.move("", "", "", 12345)
@raises(FileNotFoundError)
def test_move_wrong_string_param3():
FileMoverAnalyzer.move('', '', '', dict())
FileMoverAnalyzer.move("", "", "", dict())
def test_basic():
filename = os.path.basename(DEFAULT_AUDIO_FILE)
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, dict())
#Move the file back
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u".", filename, dict())
# Move the file back
shutil.move("./" + filename, DEFAULT_AUDIO_FILE)
assert os.path.exists(DEFAULT_AUDIO_FILE)
def test_basic_samefile():
filename = os.path.basename(DEFAULT_AUDIO_FILE)
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'tests/test_data', filename, dict())
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u"tests/test_data", filename, dict())
assert os.path.exists(DEFAULT_AUDIO_FILE)
def test_duplicate_file():
filename = os.path.basename(DEFAULT_AUDIO_FILE)
#Import the file once
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, dict())
#Copy it back to the original location
# Import the file once
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u".", filename, dict())
# Copy it back to the original location
shutil.copy("./" + filename, DEFAULT_AUDIO_FILE)
#Import it again. It shouldn't overwrite the old file and instead create a new
# Import it again. It shouldn't overwrite the old file and instead create a new
metadata = dict()
metadata = FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, metadata)
#Cleanup: move the file (eg. 44100Hz-16bit-mono.mp3) back
metadata = FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u".", filename, metadata)
# Cleanup: move the file (eg. 44100Hz-16bit-mono.mp3) back
shutil.move("./" + filename, DEFAULT_AUDIO_FILE)
#Remove the renamed duplicate, eg. 44100Hz-16bit-mono_03-26-2014-11-58.mp3
# Remove the renamed duplicate, eg. 44100Hz-16bit-mono_03-26-2014-11-58.mp3
os.remove(metadata["full_path"])
assert os.path.exists(DEFAULT_AUDIO_FILE)
''' If you import three copies of the same file, the behaviour is:
""" If you import three copies of the same file, the behaviour is:
- The filename is of the first file preserved.
- The filename of the second file has the timestamp attached to it.
- The filename of the third file has a UUID placed after the timestamp, but ONLY IF
it's imported within 1 second of the second file (ie. if the timestamp is the same).
'''
"""
def test_double_duplicate_files():
# Here we use mock to patch out the time.localtime() function so that it
# always returns the same value. This allows us to consistently simulate this test cases
# where the last two of the three files are imported at the same time as the timestamp.
with mock.patch('airtime_analyzer.filemover_analyzer.time') as mock_time:
mock_time.localtime.return_value = time.localtime()#date(2010, 10, 8)
with mock.patch("airtime_analyzer.filemover_analyzer.time") as mock_time:
mock_time.localtime.return_value = time.localtime() # date(2010, 10, 8)
mock_time.side_effect = lambda *args, **kw: time(*args, **kw)
filename = os.path.basename(DEFAULT_AUDIO_FILE)
#Import the file once
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename, dict())
#Copy it back to the original location
# Import the file once
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u".", filename, dict())
# Copy it back to the original location
shutil.copy("./" + filename, DEFAULT_AUDIO_FILE)
#Import it again. It shouldn't overwrite the old file and instead create a new
# Import it again. It shouldn't overwrite the old file and instead create a new
first_dup_metadata = dict()
first_dup_metadata = FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename,
first_dup_metadata)
#Copy it back again!
first_dup_metadata = FileMoverAnalyzer.move(
DEFAULT_AUDIO_FILE, u".", filename, first_dup_metadata
)
# Copy it back again!
shutil.copy("./" + filename, DEFAULT_AUDIO_FILE)
#Reimport for the third time, which should have the same timestamp as the second one
#thanks to us mocking out time.localtime()
# Reimport for the third time, which should have the same timestamp as the second one
# thanks to us mocking out time.localtime()
second_dup_metadata = dict()
second_dup_metadata = FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, u'.', filename,
second_dup_metadata)
#Cleanup: move the file (eg. 44100Hz-16bit-mono.mp3) back
second_dup_metadata = FileMoverAnalyzer.move(
DEFAULT_AUDIO_FILE, u".", filename, second_dup_metadata
)
# Cleanup: move the file (eg. 44100Hz-16bit-mono.mp3) back
shutil.move("./" + filename, DEFAULT_AUDIO_FILE)
#Remove the renamed duplicate, eg. 44100Hz-16bit-mono_03-26-2014-11-58.mp3
# Remove the renamed duplicate, eg. 44100Hz-16bit-mono_03-26-2014-11-58.mp3
os.remove(first_dup_metadata["full_path"])
os.remove(second_dup_metadata["full_path"])
assert os.path.exists(DEFAULT_AUDIO_FILE)
@raises(OSError)
def test_bad_permissions_destination_dir():
filename = os.path.basename(DEFAULT_AUDIO_FILE)
dest_dir = u'/sys/foobar' # /sys is using sysfs on Linux, which is unwritable
dest_dir = u"/sys/foobar" # /sys is using sysfs on Linux, which is unwritable
FileMoverAnalyzer.move(DEFAULT_AUDIO_FILE, dest_dir, filename, dict())
#Move the file back
# Move the file back
shutil.move(os.path.join(dest_dir, filename), DEFAULT_AUDIO_FILE)
assert os.path.exists(DEFAULT_AUDIO_FILE)

View File

@ -6,78 +6,101 @@ import mock
from nose.tools import *
from airtime_analyzer.metadata_analyzer import MetadataAnalyzer
def setup():
pass
def teardown():
pass
def check_default_metadata(metadata):
assert metadata['track_title'] == 'Test Title'
assert metadata['artist_name'] == 'Test Artist'
assert metadata['album_title'] == 'Test Album'
assert metadata['year'] == '1999'
assert metadata['genre'] == 'Test Genre'
assert metadata['track_number'] == '1'
assert metadata["length"] == str(datetime.timedelta(seconds=metadata["length_seconds"]))
assert metadata["track_title"] == "Test Title"
assert metadata["artist_name"] == "Test Artist"
assert metadata["album_title"] == "Test Album"
assert metadata["year"] == "1999"
assert metadata["genre"] == "Test Genre"
assert metadata["track_number"] == "1"
assert metadata["length"] == str(
datetime.timedelta(seconds=metadata["length_seconds"])
)
def test_mp3_mono():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-mono.mp3', dict())
metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-mono.mp3", dict()
)
check_default_metadata(metadata)
assert metadata['channels'] == 1
assert metadata['bit_rate'] == 63998
assert abs(metadata['length_seconds'] - 3.9) < 0.1
assert metadata['mime'] == 'audio/mp3' # Not unicode because MIMEs aren't.
assert metadata['track_total'] == '10' # MP3s can have a track_total
#Mutagen doesn't extract comments from mp3s it seems
assert metadata["channels"] == 1
assert metadata["bit_rate"] == 63998
assert abs(metadata["length_seconds"] - 3.9) < 0.1
assert metadata["mime"] == "audio/mp3" # Not unicode because MIMEs aren't.
assert metadata["track_total"] == "10" # MP3s can have a track_total
# Mutagen doesn't extract comments from mp3s it seems
def test_mp3_jointstereo():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-jointstereo.mp3', dict())
metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-jointstereo.mp3", dict()
)
check_default_metadata(metadata)
assert metadata['channels'] == 2
assert metadata['bit_rate'] == 127998
assert abs(metadata['length_seconds'] - 3.9) < 0.1
assert metadata['mime'] == 'audio/mp3'
assert metadata['track_total'] == '10' # MP3s can have a track_total
assert metadata["channels"] == 2
assert metadata["bit_rate"] == 127998
assert abs(metadata["length_seconds"] - 3.9) < 0.1
assert metadata["mime"] == "audio/mp3"
assert metadata["track_total"] == "10" # MP3s can have a track_total
def test_mp3_simplestereo():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-simplestereo.mp3', dict())
metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-simplestereo.mp3", dict()
)
check_default_metadata(metadata)
assert metadata['channels'] == 2
assert metadata['bit_rate'] == 127998
assert abs(metadata['length_seconds'] - 3.9) < 0.1
assert metadata['mime'] == 'audio/mp3'
assert metadata['track_total'] == '10' # MP3s can have a track_total
assert metadata["channels"] == 2
assert metadata["bit_rate"] == 127998
assert abs(metadata["length_seconds"] - 3.9) < 0.1
assert metadata["mime"] == "audio/mp3"
assert metadata["track_total"] == "10" # MP3s can have a track_total
def test_mp3_dualmono():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-dualmono.mp3', dict())
metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-dualmono.mp3", dict()
)
check_default_metadata(metadata)
assert metadata['channels'] == 2
assert metadata['bit_rate'] == 127998
assert abs(metadata['length_seconds'] - 3.9) < 0.1
assert metadata['mime'] == 'audio/mp3'
assert metadata['track_total'] == '10' # MP3s can have a track_total
assert metadata["channels"] == 2
assert metadata["bit_rate"] == 127998
assert abs(metadata["length_seconds"] - 3.9) < 0.1
assert metadata["mime"] == "audio/mp3"
assert metadata["track_total"] == "10" # MP3s can have a track_total
def test_ogg_mono():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-mono.ogg', dict())
metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-mono.ogg", dict()
)
check_default_metadata(metadata)
assert metadata['channels'] == 1
assert metadata['bit_rate'] == 80000
assert abs(metadata['length_seconds'] - 3.8) < 0.1
assert metadata['mime'] == 'audio/vorbis'
assert metadata['comment'] == 'Test Comment'
assert metadata["channels"] == 1
assert metadata["bit_rate"] == 80000
assert abs(metadata["length_seconds"] - 3.8) < 0.1
assert metadata["mime"] == "audio/vorbis"
assert metadata["comment"] == "Test Comment"
def test_ogg_stereo():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo.ogg', dict())
metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-stereo.ogg", dict()
)
check_default_metadata(metadata)
assert metadata['channels'] == 2
assert metadata['bit_rate'] == 112000
assert abs(metadata['length_seconds'] - 3.8) < 0.1
assert metadata['mime'] == 'audio/vorbis'
assert metadata['comment'] == 'Test Comment'
assert metadata["channels"] == 2
assert metadata["bit_rate"] == 112000
assert abs(metadata["length_seconds"] - 3.8) < 0.1
assert metadata["mime"] == "audio/vorbis"
assert metadata["comment"] == "Test Comment"
''' faac and avconv can't seem to create a proper mono AAC file... ugh
""" faac and avconv can't seem to create a proper mono AAC file... ugh
def test_aac_mono():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-mono.m4a')
print("Mono AAC metadata:")
@ -88,78 +111,93 @@ def test_aac_mono():
assert abs(metadata['length_seconds'] - 3.8) < 0.1
assert metadata['mime'] == 'audio/mp4'
assert metadata['comment'] == 'Test Comment'
'''
"""
def test_aac_stereo():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo.m4a', dict())
metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-stereo.m4a", dict()
)
check_default_metadata(metadata)
assert metadata['channels'] == 2
assert metadata['bit_rate'] == 102619
assert abs(metadata['length_seconds'] - 3.8) < 0.1
assert metadata['mime'] == 'audio/mp4'
assert metadata['comment'] == 'Test Comment'
assert metadata["channels"] == 2
assert metadata["bit_rate"] == 102619
assert abs(metadata["length_seconds"] - 3.8) < 0.1
assert metadata["mime"] == "audio/mp4"
assert metadata["comment"] == "Test Comment"
def test_mp3_utf8():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
)
# Using a bunch of different UTF-8 codepages here. Test data is from:
# http://winrus.com/utf8-jap.htm
assert metadata['track_title'] == 'アイウエオカキクケコサシスセソタチツテ'
assert metadata['artist_name'] == 'てすと'
assert metadata['album_title'] == 'Ä ä Ü ü ß'
assert metadata['year'] == '1999'
assert metadata['genre'] == 'Я Б Г Д Ж Й'
assert metadata['track_number'] == '1'
assert metadata['channels'] == 2
assert metadata['bit_rate'] < 130000
assert metadata['bit_rate'] > 127000
assert abs(metadata['length_seconds'] - 3.9) < 0.1
assert metadata['mime'] == 'audio/mp3'
assert metadata['track_total'] == '10' # MP3s can have a track_total
assert metadata["track_title"] == "アイウエオカキクケコサシスセソタチツテ"
assert metadata["artist_name"] == "てすと"
assert metadata["album_title"] == "Ä ä Ü ü ß"
assert metadata["year"] == "1999"
assert metadata["genre"] == "Я Б Г Д Ж Й"
assert metadata["track_number"] == "1"
assert metadata["channels"] == 2
assert metadata["bit_rate"] < 130000
assert metadata["bit_rate"] > 127000
assert abs(metadata["length_seconds"] - 3.9) < 0.1
assert metadata["mime"] == "audio/mp3"
assert metadata["track_total"] == "10" # MP3s can have a track_total
def test_invalid_wma():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict())
assert metadata['mime'] == 'audio/x-ms-wma'
metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-stereo-invalid.wma", dict()
)
assert metadata["mime"] == "audio/x-ms-wma"
def test_wav_stereo():
metadata = MetadataAnalyzer.analyze('tests/test_data/44100Hz-16bit-stereo.wav', dict())
assert metadata['mime'] == 'audio/x-wav'
assert abs(metadata['length_seconds'] - 3.9) < 0.1
assert metadata['channels'] == 2
assert metadata['sample_rate'] == 44100
metadata = MetadataAnalyzer.analyze(
"tests/test_data/44100Hz-16bit-stereo.wav", dict()
)
assert metadata["mime"] == "audio/x-wav"
assert abs(metadata["length_seconds"] - 3.9) < 0.1
assert metadata["channels"] == 2
assert metadata["sample_rate"] == 44100
# Make sure the parameter checking works
@raises(FileNotFoundError)
def test_move_wrong_string_param1():
not_unicode = 'asdfasdf'
not_unicode = "asdfasdf"
MetadataAnalyzer.analyze(not_unicode, dict())
@raises(TypeError)
def test_move_wrong_metadata_dict():
not_a_dict = list()
MetadataAnalyzer.analyze('asdfasdf', not_a_dict)
MetadataAnalyzer.analyze("asdfasdf", not_a_dict)
# Test an mp3 file where the number of channels is invalid or missing:
def test_mp3_bad_channels():
filename = 'tests/test_data/44100Hz-16bit-mono.mp3'
'''
filename = "tests/test_data/44100Hz-16bit-mono.mp3"
"""
It'd be a pain in the ass to construct a real MP3 with an invalid number
of channels by hand because that value is stored in every MP3 frame in the file
'''
"""
audio_file = mutagen.File(filename, easy=True)
audio_file.info.mode = 1777
with mock.patch('airtime_analyzer.metadata_analyzer.mutagen') as mock_mutagen:
with mock.patch("airtime_analyzer.metadata_analyzer.mutagen") as mock_mutagen:
mock_mutagen.File.return_value = audio_file
#mock_mutagen.side_effect = lambda *args, **kw: audio_file #File(*args, **kw)
# mock_mutagen.side_effect = lambda *args, **kw: audio_file #File(*args, **kw)
metadata = MetadataAnalyzer.analyze(filename, dict())
check_default_metadata(metadata)
assert metadata['channels'] == 1
assert metadata['bit_rate'] == 63998
assert abs(metadata['length_seconds'] - 3.9) < 0.1
assert metadata['mime'] == 'audio/mp3' # Not unicode because MIMEs aren't.
assert metadata['track_total'] == '10' # MP3s can have a track_total
#Mutagen doesn't extract comments from mp3s it seems
assert metadata["channels"] == 1
assert metadata["bit_rate"] == 63998
assert abs(metadata["length_seconds"] - 3.9) < 0.1
assert metadata["mime"] == "audio/mp3" # Not unicode because MIMEs aren't.
assert metadata["track_total"] == "10" # MP3s can have a track_total
# Mutagen doesn't extract comments from mp3s it seems
def test_unparsable_file():
MetadataAnalyzer.analyze('tests/test_data/unparsable.txt', dict())
MetadataAnalyzer.analyze("tests/test_data/unparsable.txt", dict())

View File

@ -2,61 +2,97 @@
from nose.tools import *
from airtime_analyzer.playability_analyzer import *
def check_default_metadata(metadata):
''' Stub function for now in case we need it later.'''
"""Stub function for now in case we need it later."""
pass
def test_missing_liquidsoap():
old_ls = PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE
PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = 'foosdaf'
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = old_ls # Need to put this back
PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = "foosdaf"
metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
)
PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = old_ls # Need to put this back
@raises(UnplayableFileError)
def test_invalid_filepath():
metadata = PlayabilityAnalyzer.analyze(u'non-existent-file', dict())
metadata = PlayabilityAnalyzer.analyze(u"non-existent-file", dict())
def test_mp3_utf8():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
)
check_default_metadata(metadata)
def test_mp3_dualmono():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-dualmono.mp3', dict())
metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-dualmono.mp3", dict()
)
check_default_metadata(metadata)
def test_mp3_jointstereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-jointstereo.mp3', dict())
metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-jointstereo.mp3", dict()
)
check_default_metadata(metadata)
def test_mp3_simplestereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-simplestereo.mp3', dict())
metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-simplestereo.mp3", dict()
)
check_default_metadata(metadata)
def test_mp3_stereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.mp3', dict())
metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.mp3", dict()
)
check_default_metadata(metadata)
def test_mp3_mono():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mono.mp3', dict())
metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-mono.mp3", dict()
)
check_default_metadata(metadata)
def test_ogg_stereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.ogg', dict())
metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.ogg", dict()
)
check_default_metadata(metadata)
@raises(UnplayableFileError)
def test_invalid_wma():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict())
metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo-invalid.wma", dict()
)
def test_m4a_stereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.m4a', dict())
metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.m4a", dict()
)
check_default_metadata(metadata)
def test_wav_stereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.wav', dict())
metadata = PlayabilityAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.wav", dict()
)
check_default_metadata(metadata)
@raises(UnplayableFileError)
def test_unknown():
metadata = PlayabilityAnalyzer.analyze(u'http://www.google.com', dict())
check_default_metadata(metadata)
metadata = PlayabilityAnalyzer.analyze(u"http://www.google.com", dict())
check_default_metadata(metadata)

View File

@ -5,80 +5,134 @@ from airtime_analyzer.replaygain_analyzer import ReplayGainAnalyzer
def check_default_metadata(metadata):
''' Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect.
"""Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect.
:param metadata: a metadata dictionary
:return: Nothing
'''
'''
"""
"""
# We give python-rgain some leeway here by specifying a tolerance. It's not perfectly consistent across codecs...
assert abs(metadata['cuein']) < tolerance_seconds
assert abs(metadata['cueout'] - length_seconds) < tolerance_seconds
'''
"""
tolerance = 0.60
expected_replaygain = 5.2
print(metadata['replay_gain'])
assert abs(metadata['replay_gain'] - expected_replaygain) < tolerance
print(metadata["replay_gain"])
assert abs(metadata["replay_gain"] - expected_replaygain) < tolerance
def test_missing_replaygain():
old_rg = ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE
ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = 'foosdaf'
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = old_rg # Need to put this back
ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = "foosdaf"
metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
)
ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = old_rg # Need to put this back
def test_invalid_filepath():
metadata = ReplayGainAnalyzer.analyze(u'non-existent-file', dict())
metadata = ReplayGainAnalyzer.analyze(u"non-existent-file", dict())
def test_mp3_utf8():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo-utf8.mp3", dict()
)
check_default_metadata(metadata)
test_mp3_utf8.rgain = True
def test_mp3_dualmono():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-dualmono.mp3', dict())
metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-dualmono.mp3", dict()
)
check_default_metadata(metadata)
test_mp3_dualmono.rgain = True
def test_mp3_jointstereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-jointstereo.mp3', dict())
metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-jointstereo.mp3", dict()
)
check_default_metadata(metadata)
test_mp3_jointstereo.rgain = True
def test_mp3_simplestereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-simplestereo.mp3', dict())
metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-simplestereo.mp3", dict()
)
check_default_metadata(metadata)
test_mp3_simplestereo.rgain = True
def test_mp3_stereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.mp3', dict())
metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.mp3", dict()
)
check_default_metadata(metadata)
test_mp3_stereo.rgain = True
def test_mp3_mono():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mono.mp3', dict())
metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-mono.mp3", dict()
)
check_default_metadata(metadata)
test_mp3_mono.rgain = True
def test_ogg_stereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.ogg', dict())
metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.ogg", dict()
)
check_default_metadata(metadata)
test_ogg_stereo = True
def test_invalid_wma():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict())
metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo-invalid.wma", dict()
)
test_invalid_wma.rgain = True
def test_mp3_missing_id3_header():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mp3-missingid3header.mp3', dict())
metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-mp3-missingid3header.mp3", dict()
)
test_mp3_missing_id3_header.rgain = True
def test_m4a_stereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.m4a', dict())
metadata = ReplayGainAnalyzer.analyze(
u"tests/test_data/44100Hz-16bit-stereo.m4a", dict()
)
check_default_metadata(metadata)
test_m4a_stereo.rgain = True
''' WAVE is not supported by python-rgain yet
""" WAVE is not supported by python-rgain yet
def test_wav_stereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.wav', dict())
check_default_metadata(metadata)
test_wav_stereo.rgain = True
'''
"""

View File

@ -6,23 +6,28 @@ import socket
import requests
from requests.auth import AuthBase
def get_protocol(config):
positive_values = ['Yes', 'yes', 'True', 'true', True]
port = config['general'].get('base_port', 80)
force_ssl = config['general'].get('force_ssl', False)
positive_values = ["Yes", "yes", "True", "true", True]
port = config["general"].get("base_port", 80)
force_ssl = config["general"].get("force_ssl", False)
if force_ssl in positive_values:
protocol = 'https'
protocol = "https"
else:
protocol = config['general'].get('protocol')
protocol = config["general"].get("protocol")
if not protocol:
protocol = str(("http", "https")[int(port) == 443])
return protocol
class UrlParamDict(dict):
def __missing__(self, key):
return '{' + key + '}'
return "{" + key + "}"
class UrlException(Exception):
pass
class UrlException(Exception): pass
class IncompleteUrl(UrlException):
def __init__(self, url):
@ -31,6 +36,7 @@ class IncompleteUrl(UrlException):
def __str__(self):
return "Incomplete url: '{}'".format(self.url)
class UrlBadParam(UrlException):
def __init__(self, url, param):
self.url = url
@ -39,17 +45,20 @@ class UrlBadParam(UrlException):
def __str__(self):
return "Bad param '{}' passed into url: '{}'".format(self.param, self.url)
class KeyAuth(AuthBase):
def __init__(self, key):
self.key = key
def __call__(self, r):
r.headers['Authorization'] = "Api-Key {}".format(self.key)
r.headers["Authorization"] = "Api-Key {}".format(self.key)
return r
class ApcUrl:
""" A safe abstraction and testable for filling in parameters in
"""A safe abstraction and testable for filling in parameters in
api_client.cfg"""
def __init__(self, base_url):
self.base_url = base_url
@ -63,17 +72,18 @@ class ApcUrl:
return ApcUrl(temp_url)
def url(self):
if '{' in self.base_url:
if "{" in self.base_url:
raise IncompleteUrl(self.base_url)
else:
return self.base_url
class ApiRequest:
API_HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout
API_HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout
def __init__(self, name, url, logger=None, api_key=None):
self.name = name
self.url = url
self.url = url
self.__req = None
if logger is None:
self.logger = logging
@ -86,36 +96,45 @@ class ApiRequest:
self.logger.debug(final_url)
try:
if _post_data:
response = requests.post(final_url,
data=_post_data, auth=self.auth,
timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT)
response = requests.post(
final_url,
data=_post_data,
auth=self.auth,
timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT,
)
else:
response = requests.get(final_url, params=params, auth=self.auth,
timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT)
if 'application/json' in response.headers['content-type']:
response = requests.get(
final_url,
params=params,
auth=self.auth,
timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT,
)
if "application/json" in response.headers["content-type"]:
return response.json()
return response
except requests.exceptions.Timeout:
self.logger.error('HTTP request to %s timed out', final_url)
self.logger.error("HTTP request to %s timed out", final_url)
raise
def req(self, *args, **kwargs):
self.__req = lambda : self(*args, **kwargs)
self.__req = lambda: self(*args, **kwargs)
return self
def retry(self, n, delay=5):
"""Try to send request n times. If after n times it fails then
we finally raise exception"""
for i in range(0,n-1):
for i in range(0, n - 1):
try:
return self.__req()
except Exception:
time.sleep(delay)
return self.__req()
class RequestProvider:
""" Creates the available ApiRequest instance that can be read from
a config file """
"""Creates the available ApiRequest instance that can be read from
a config file"""
def __init__(self, cfg, endpoints):
self.config = cfg
self.requests = {}
@ -123,27 +142,29 @@ class RequestProvider:
self.config["general"]["base_dir"] = self.config["general"]["base_dir"][1:]
protocol = get_protocol(self.config)
base_port = self.config['general']['base_port']
base_url = self.config['general']['base_url']
base_dir = self.config['general']['base_dir']
api_base = self.config['api_base']
base_port = self.config["general"]["base_port"]
base_url = self.config["general"]["base_url"]
base_dir = self.config["general"]["base_dir"]
api_base = self.config["api_base"]
api_url = "{protocol}://{base_url}:{base_port}/{base_dir}{api_base}/{action}".format_map(
UrlParamDict(protocol=protocol,
base_url=base_url,
base_port=base_port,
base_dir=base_dir,
api_base=api_base
))
UrlParamDict(
protocol=protocol,
base_url=base_url,
base_port=base_port,
base_dir=base_dir,
api_base=api_base,
)
)
self.url = ApcUrl(api_url)
# Now we must discover the possible actions
for action_name, action_value in endpoints.items():
new_url = self.url.params(action=action_value)
if '{api_key}' in action_value:
new_url = new_url.params(api_key=self.config["general"]['api_key'])
self.requests[action_name] = ApiRequest(action_name,
new_url,
api_key=self.config['general']['api_key'])
if "{api_key}" in action_value:
new_url = new_url.params(api_key=self.config["general"]["api_key"])
self.requests[action_name] = ApiRequest(
action_name, new_url, api_key=self.config["general"]["api_key"]
)
def available_requests(self):
return list(self.requests.keys())
@ -157,15 +178,20 @@ class RequestProvider:
else:
return super(RequestProvider, self).__getattribute__(attr)
def time_in_seconds(time):
return time.hour * 60 * 60 + \
time.minute * 60 + \
time.second + \
time.microsecond / 1000000.0
return (
time.hour * 60 * 60
+ time.minute * 60
+ time.second
+ time.microsecond / 1000000.0
)
def time_in_milliseconds(time):
return time_in_seconds(time) * 1000
def fromisoformat(time_string):
"""
This is required for Python 3.6 support. datetime.time.fromisoformat was

View File

@ -26,58 +26,112 @@ api_config = {}
api_endpoints = {}
# URL to get the version number of the server API
api_endpoints['version_url'] = 'version/api_key/{api_key}'
#URL to register a components IP Address with the central web server
api_endpoints['register_component'] = 'register-component/format/json/api_key/{api_key}/component/{component}'
api_endpoints["version_url"] = "version/api_key/{api_key}"
# URL to register a components IP Address with the central web server
api_endpoints[
"register_component"
] = "register-component/format/json/api_key/{api_key}/component/{component}"
#media-monitor
api_endpoints['media_setup_url'] = 'media-monitor-setup/format/json/api_key/{api_key}'
api_endpoints['upload_recorded'] = 'upload-recorded/format/json/api_key/{api_key}/fileid/{fileid}/showinstanceid/{showinstanceid}'
api_endpoints['update_media_url'] = 'reload-metadata/format/json/api_key/{api_key}/mode/{mode}'
api_endpoints['list_all_db_files'] = 'list-all-files/format/json/api_key/{api_key}/dir_id/{dir_id}/all/{all}'
api_endpoints['list_all_watched_dirs'] = 'list-all-watched-dirs/format/json/api_key/{api_key}'
api_endpoints['add_watched_dir'] = 'add-watched-dir/format/json/api_key/{api_key}/path/{path}'
api_endpoints['remove_watched_dir'] = 'remove-watched-dir/format/json/api_key/{api_key}/path/{path}'
api_endpoints['set_storage_dir'] = 'set-storage-dir/format/json/api_key/{api_key}/path/{path}'
api_endpoints['update_fs_mount'] = 'update-file-system-mount/format/json/api_key/{api_key}'
api_endpoints['reload_metadata_group'] = 'reload-metadata-group/format/json/api_key/{api_key}'
api_endpoints['handle_watched_dir_missing'] = 'handle-watched-dir-missing/format/json/api_key/{api_key}/dir/{dir}'
#show-recorder
api_endpoints['show_schedule_url'] = 'recorded-shows/format/json/api_key/{api_key}'
api_endpoints['upload_file_url'] = 'rest/media'
api_endpoints['upload_retries'] = '3'
api_endpoints['upload_wait'] = '60'
#pypo
api_endpoints['export_url'] = 'schedule/api_key/{api_key}'
api_endpoints['get_media_url'] = 'get-media/file/{file}/api_key/{api_key}'
api_endpoints['update_item_url'] = 'notify-schedule-group-play/api_key/{api_key}/schedule_id/{schedule_id}'
api_endpoints['update_start_playing_url'] = 'notify-media-item-start-play/api_key/{api_key}/media_id/{media_id}/'
api_endpoints['get_stream_setting'] = 'get-stream-setting/format/json/api_key/{api_key}/'
api_endpoints['update_liquidsoap_status'] = 'update-liquidsoap-status/format/json/api_key/{api_key}/msg/{msg}/stream_id/{stream_id}/boot_time/{boot_time}'
api_endpoints['update_source_status'] = 'update-source-status/format/json/api_key/{api_key}/sourcename/{sourcename}/status/{status}'
api_endpoints['check_live_stream_auth'] = 'check-live-stream-auth/format/json/api_key/{api_key}/username/{username}/password/{password}/djtype/{djtype}'
api_endpoints['get_bootstrap_info'] = 'get-bootstrap-info/format/json/api_key/{api_key}'
api_endpoints['get_files_without_replay_gain'] = 'get-files-without-replay-gain/api_key/{api_key}/dir_id/{dir_id}'
api_endpoints['update_replay_gain_value'] = 'update-replay-gain-value/format/json/api_key/{api_key}'
api_endpoints['notify_webstream_data'] = 'notify-webstream-data/api_key/{api_key}/media_id/{media_id}/format/json'
api_endpoints['notify_liquidsoap_started'] = 'rabbitmq-do-push/api_key/{api_key}/format/json'
api_endpoints['get_stream_parameters'] = 'get-stream-parameters/api_key/{api_key}/format/json'
api_endpoints['push_stream_stats'] = 'push-stream-stats/api_key/{api_key}/format/json'
api_endpoints['update_stream_setting_table'] = 'update-stream-setting-table/api_key/{api_key}/format/json'
api_endpoints['get_files_without_silan_value'] = 'get-files-without-silan-value/api_key/{api_key}'
api_endpoints['update_cue_values_by_silan'] = 'update-cue-values-by-silan/api_key/{api_key}'
api_endpoints['update_metadata_on_tunein'] = 'update-metadata-on-tunein/api_key/{api_key}'
api_config['api_base'] = 'api'
api_config['bin_dir'] = '/usr/lib/airtime/api_clients/'
# media-monitor
api_endpoints["media_setup_url"] = "media-monitor-setup/format/json/api_key/{api_key}"
api_endpoints[
"upload_recorded"
] = "upload-recorded/format/json/api_key/{api_key}/fileid/{fileid}/showinstanceid/{showinstanceid}"
api_endpoints[
"update_media_url"
] = "reload-metadata/format/json/api_key/{api_key}/mode/{mode}"
api_endpoints[
"list_all_db_files"
] = "list-all-files/format/json/api_key/{api_key}/dir_id/{dir_id}/all/{all}"
api_endpoints[
"list_all_watched_dirs"
] = "list-all-watched-dirs/format/json/api_key/{api_key}"
api_endpoints[
"add_watched_dir"
] = "add-watched-dir/format/json/api_key/{api_key}/path/{path}"
api_endpoints[
"remove_watched_dir"
] = "remove-watched-dir/format/json/api_key/{api_key}/path/{path}"
api_endpoints[
"set_storage_dir"
] = "set-storage-dir/format/json/api_key/{api_key}/path/{path}"
api_endpoints[
"update_fs_mount"
] = "update-file-system-mount/format/json/api_key/{api_key}"
api_endpoints[
"reload_metadata_group"
] = "reload-metadata-group/format/json/api_key/{api_key}"
api_endpoints[
"handle_watched_dir_missing"
] = "handle-watched-dir-missing/format/json/api_key/{api_key}/dir/{dir}"
# show-recorder
api_endpoints["show_schedule_url"] = "recorded-shows/format/json/api_key/{api_key}"
api_endpoints["upload_file_url"] = "rest/media"
api_endpoints["upload_retries"] = "3"
api_endpoints["upload_wait"] = "60"
# pypo
api_endpoints["export_url"] = "schedule/api_key/{api_key}"
api_endpoints["get_media_url"] = "get-media/file/{file}/api_key/{api_key}"
api_endpoints[
"update_item_url"
] = "notify-schedule-group-play/api_key/{api_key}/schedule_id/{schedule_id}"
api_endpoints[
"update_start_playing_url"
] = "notify-media-item-start-play/api_key/{api_key}/media_id/{media_id}/"
api_endpoints[
"get_stream_setting"
] = "get-stream-setting/format/json/api_key/{api_key}/"
api_endpoints[
"update_liquidsoap_status"
] = "update-liquidsoap-status/format/json/api_key/{api_key}/msg/{msg}/stream_id/{stream_id}/boot_time/{boot_time}"
api_endpoints[
"update_source_status"
] = "update-source-status/format/json/api_key/{api_key}/sourcename/{sourcename}/status/{status}"
api_endpoints[
"check_live_stream_auth"
] = "check-live-stream-auth/format/json/api_key/{api_key}/username/{username}/password/{password}/djtype/{djtype}"
api_endpoints["get_bootstrap_info"] = "get-bootstrap-info/format/json/api_key/{api_key}"
api_endpoints[
"get_files_without_replay_gain"
] = "get-files-without-replay-gain/api_key/{api_key}/dir_id/{dir_id}"
api_endpoints[
"update_replay_gain_value"
] = "update-replay-gain-value/format/json/api_key/{api_key}"
api_endpoints[
"notify_webstream_data"
] = "notify-webstream-data/api_key/{api_key}/media_id/{media_id}/format/json"
api_endpoints[
"notify_liquidsoap_started"
] = "rabbitmq-do-push/api_key/{api_key}/format/json"
api_endpoints[
"get_stream_parameters"
] = "get-stream-parameters/api_key/{api_key}/format/json"
api_endpoints["push_stream_stats"] = "push-stream-stats/api_key/{api_key}/format/json"
api_endpoints[
"update_stream_setting_table"
] = "update-stream-setting-table/api_key/{api_key}/format/json"
api_endpoints[
"get_files_without_silan_value"
] = "get-files-without-silan-value/api_key/{api_key}"
api_endpoints[
"update_cue_values_by_silan"
] = "update-cue-values-by-silan/api_key/{api_key}"
api_endpoints[
"update_metadata_on_tunein"
] = "update-metadata-on-tunein/api_key/{api_key}"
api_config["api_base"] = "api"
api_config["bin_dir"] = "/usr/lib/airtime/api_clients/"
################################################################################
# Airtime API Version 1 Client
################################################################################
class AirtimeApiClient(object):
def __init__(self, logger=None,config_path='/etc/airtime/airtime.conf'):
if logger is None: self.logger = logging
else: self.logger = logger
def __init__(self, logger=None, config_path="/etc/airtime/airtime.conf"):
if logger is None:
self.logger = logging
else:
self.logger = logger
# loading config file
try:
@ -85,16 +139,18 @@ class AirtimeApiClient(object):
self.config.update(api_config)
self.services = RequestProvider(self.config, api_endpoints)
except Exception as e:
self.logger.exception('Error loading config file: %s', config_path)
self.logger.exception("Error loading config file: %s", config_path)
sys.exit(1)
def __get_airtime_version(self):
try: return self.services.version_url()['airtime_version']
except Exception: return -1
try:
return self.services.version_url()["airtime_version"]
except Exception:
return -1
def __get_api_version(self):
try:
return self.services.version_url()['api_version']
return self.services.version_url()["api_version"]
except Exception as e:
self.logger.exception(e)
return -1
@ -105,25 +161,30 @@ class AirtimeApiClient(object):
# logger.info('Airtime version found: ' + str(version))
if api_version == -1:
if verbose:
logger.info('Unable to get Airtime API version number.\n')
logger.info("Unable to get Airtime API version number.\n")
return False
elif api_version[0:3] != AIRTIME_API_VERSION[0:3]:
if verbose:
logger.info('Airtime API version found: ' + str(api_version))
logger.info('pypo is only compatible with API version: ' + AIRTIME_API_VERSION)
logger.info("Airtime API version found: " + str(api_version))
logger.info(
"pypo is only compatible with API version: " + AIRTIME_API_VERSION
)
return False
else:
if verbose:
logger.info('Airtime API version found: ' + str(api_version))
logger.info('pypo is only compatible with API version: ' + AIRTIME_API_VERSION)
logger.info("Airtime API version found: " + str(api_version))
logger.info(
"pypo is only compatible with API version: " + AIRTIME_API_VERSION
)
return True
def get_schedule(self):
# TODO : properly refactor this routine
# For now the return type is a little messed up for compatibility reasons
try: return (True, self.services.export_url())
except: return (False, None)
try:
return (True, self.services.export_url())
except:
return (False, None)
def notify_liquidsoap_started(self):
try:
@ -132,9 +193,9 @@ class AirtimeApiClient(object):
self.logger.exception(e)
def notify_media_item_start_playing(self, media_id):
""" This is a callback from liquidsoap, we use this to notify
"""This is a callback from liquidsoap, we use this to notify
about the currently playing *song*. We get passed a JSON string
which we handed to liquidsoap in get_liquidsoap_data(). """
which we handed to liquidsoap in get_liquidsoap_data()."""
try:
return self.services.update_start_playing_url(media_id=media_id)
except Exception as e:
@ -150,7 +211,7 @@ class AirtimeApiClient(object):
def upload_recorded_show(self, files, show_id):
logger = self.logger
response = ''
response = ""
retries = int(self.config["upload_retries"])
retries_wait = int(self.config["upload_wait"])
@ -165,7 +226,9 @@ class AirtimeApiClient(object):
logger.debug(ApiRequest.API_HTTP_REQUEST_TIMEOUT)
try:
request = requests.post(url, files=files, timeout=float(ApiRequest.API_HTTP_REQUEST_TIMEOUT))
request = requests.post(
url, files=files, timeout=float(ApiRequest.API_HTTP_REQUEST_TIMEOUT)
)
response = request.json()
logger.debug(response)
@ -199,7 +262,7 @@ class AirtimeApiClient(object):
except Exception as e:
self.logger.exception(e)
#wait some time before next retry
# wait some time before next retry
time.sleep(retries_wait)
return response
@ -207,42 +270,49 @@ class AirtimeApiClient(object):
def check_live_stream_auth(self, username, password, dj_type):
try:
return self.services.check_live_stream_auth(
username=username, password=password, djtype=dj_type)
username=username, password=password, djtype=dj_type
)
except Exception as e:
self.logger.exception(e)
return {}
def construct_url(self,config_action_key):
def construct_url(self, config_action_key):
"""Constructs the base url for every request"""
# TODO : Make other methods in this class use this this method.
if self.config["general"]["base_dir"].startswith("/"):
self.config["general"]["base_dir"] = self.config["general"]["base_dir"][1:]
protocol = get_protocol(self.config)
url = "%s://%s:%s/%s%s/%s" % \
(protocol,
self.config["general"]["base_url"], str(self.config["general"]["base_port"]),
self.config["general"]["base_dir"], self.config["api_base"],
self.config[config_action_key])
url = "%s://%s:%s/%s%s/%s" % (
protocol,
self.config["general"]["base_url"],
str(self.config["general"]["base_port"]),
self.config["general"]["base_dir"],
self.config["api_base"],
self.config[config_action_key],
)
url = url.replace("%%api_key%%", self.config["general"]["api_key"])
return url
def construct_rest_url(self,config_action_key):
def construct_rest_url(self, config_action_key):
"""Constructs the base url for RESTful requests"""
if self.config["general"]["base_dir"].startswith("/"):
self.config["general"]["base_dir"] = self.config["general"]["base_dir"][1:]
protocol = get_protocol(self.config)
url = "%s://%s:@%s:%s/%s/%s" % \
(protocol, self.config["general"]["api_key"],
self.config["general"]["base_url"], str(self.config["general"]["base_port"]),
self.config["general"]["base_dir"],
self.config[config_action_key])
url = "%s://%s:@%s:%s/%s/%s" % (
protocol,
self.config["general"]["api_key"],
self.config["general"]["base_url"],
str(self.config["general"]["base_port"]),
self.config["general"]["base_dir"],
self.config[config_action_key],
)
return url
"""
Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen
"""
def setup_media_monitor(self):
return self.services.media_setup_url()
@ -264,49 +334,55 @@ class AirtimeApiClient(object):
# filter but here we prefer a little more verbosity to help
# debugging
for action in action_list:
if not 'mode' in action:
self.logger.debug("Warning: Trying to send a request element without a 'mode'")
self.logger.debug("Here is the the request: '%s'" % str(action) )
if not "mode" in action:
self.logger.debug(
"Warning: Trying to send a request element without a 'mode'"
)
self.logger.debug("Here is the the request: '%s'" % str(action))
else:
# We alias the value of is_record to true or false no
# matter what it is based on if it's absent in the action
if 'is_record' not in action:
action['is_record'] = 0
if "is_record" not in action:
action["is_record"] = 0
valid_actions.append(action)
# Note that we must prefix every key with: mdX where x is a number
# Is there a way to format the next line a little better? The
# parenthesis make the code almost unreadable
md_list = dict((("md%d" % i), json.dumps(md)) \
for i,md in enumerate(valid_actions))
md_list = dict(
(("md%d" % i), json.dumps(md)) for i, md in enumerate(valid_actions)
)
# For testing we add the following "dry" parameter to tell the
# controller not to actually do any changes
if dry: md_list['dry'] = 1
if dry:
md_list["dry"] = 1
self.logger.info("Pumping out %d requests..." % len(valid_actions))
return self.services.reload_metadata_group(_post_data=md_list)
#returns a list of all db files for a given directory in JSON format:
#{"files":["path/to/file1", "path/to/file2"]}
#Note that these are relative paths to the given directory. The full
#path is not returned.
# returns a list of all db files for a given directory in JSON format:
# {"files":["path/to/file1", "path/to/file2"]}
# Note that these are relative paths to the given directory. The full
# path is not returned.
def list_all_db_files(self, dir_id, all_files=True):
logger = self.logger
try:
all_files = "1" if all_files else "0"
response = self.services.list_all_db_files(dir_id=dir_id,
all=all_files)
response = self.services.list_all_db_files(dir_id=dir_id, all=all_files)
except Exception as e:
response = {}
logger.error("Exception: %s", e)
try:
return response["files"]
except KeyError:
self.logger.error("Could not find index 'files' in dictionary: %s",
str(response))
self.logger.error(
"Could not find index 'files' in dictionary: %s", str(response)
)
return []
"""
Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen
"""
def list_all_watched_dirs(self):
return self.services.list_all_watched_dirs()
@ -314,6 +390,7 @@ class AirtimeApiClient(object):
Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen
"""
def add_watched_dir(self, path):
return self.services.add_watched_dir(path=base64.b64encode(path))
@ -321,6 +398,7 @@ class AirtimeApiClient(object):
Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen
"""
def remove_watched_dir(self, path):
return self.services.remove_watched_dir(path=base64.b64encode(path))
@ -328,6 +406,7 @@ class AirtimeApiClient(object):
Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen
"""
def set_storage_dir(self, path):
return self.services.set_storage_dir(path=base64.b64encode(path))
@ -335,15 +414,16 @@ class AirtimeApiClient(object):
Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen
"""
def get_stream_setting(self):
return self.services.get_stream_setting()
def register_component(self, component):
""" Purpose of this method is to contact the server with a "Hey its
"""Purpose of this method is to contact the server with a "Hey its
me!" message. This will allow the server to register the component's
(component = media-monitor, pypo etc.) ip address, and later use it
to query monit via monit's http service, or download log files via a
http server. """
http server."""
return self.services.register_component(component=component)
def notify_liquidsoap_status(self, msg, stream_id, time):
@ -351,24 +431,24 @@ class AirtimeApiClient(object):
try:
post_data = {"msg_post": msg}
#encoded_msg is no longer used server_side!!
encoded_msg = urllib.parse.quote('dummy')
self.services.update_liquidsoap_status.req(post_data,
msg=encoded_msg,
stream_id=stream_id,
boot_time=time).retry(5)
# encoded_msg is no longer used server_side!!
encoded_msg = urllib.parse.quote("dummy")
self.services.update_liquidsoap_status.req(
post_data, msg=encoded_msg, stream_id=stream_id, boot_time=time
).retry(5)
except Exception as e:
self.logger.exception(e)
def notify_source_status(self, sourcename, status):
try:
return self.services.update_source_status.req(sourcename=sourcename,
status=status).retry(5)
return self.services.update_source_status.req(
sourcename=sourcename, status=status
).retry(5)
except Exception as e:
self.logger.exception(e)
def get_bootstrap_info(self):
""" Retrieve infomations needed on bootstrap time """
"""Retrieve infomations needed on bootstrap time"""
return self.services.get_bootstrap_info()
def get_files_without_replay_gain_value(self, dir_id):
@ -377,7 +457,7 @@ class AirtimeApiClient(object):
calculated. This list of files is downloaded into a file and the path
to this file is the return value.
"""
#http://localhost/api/get-files-without-replay-gain/dir_id/1
# http://localhost/api/get-files-without-replay-gain/dir_id/1
try:
return self.services.get_files_without_replay_gain(dir_id=dir_id)
except Exception as e:
@ -401,25 +481,31 @@ class AirtimeApiClient(object):
'pairs' is a list of pairs in (x, y), where x is the file's database
row id and y is the file's replay_gain value in dB
"""
self.logger.debug(self.services.update_replay_gain_value(
_post_data={'data': json.dumps(pairs)}))
self.logger.debug(
self.services.update_replay_gain_value(
_post_data={"data": json.dumps(pairs)}
)
)
def update_cue_values_by_silan(self, pairs):
"""
'pairs' is a list of pairs in (x, y), where x is the file's database
row id and y is the file's cue values in dB
"""
return self.services.update_cue_values_by_silan(_post_data={'data': json.dumps(pairs)})
return self.services.update_cue_values_by_silan(
_post_data={"data": json.dumps(pairs)}
)
def notify_webstream_data(self, data, media_id):
"""
Update the server with the latest metadata we've received from the
external webstream
"""
self.logger.info( self.services.notify_webstream_data.req(
_post_data={'data':data}, media_id=str(media_id)).retry(5))
self.logger.info(
self.services.notify_webstream_data.req(
_post_data={"data": data}, media_id=str(media_id)
).retry(5)
)
def get_stream_parameters(self):
response = self.services.get_stream_parameters()
@ -428,12 +514,16 @@ class AirtimeApiClient(object):
def push_stream_stats(self, data):
# TODO : users of this method should do their own error handling
response = self.services.push_stream_stats(_post_data={'data': json.dumps(data)})
response = self.services.push_stream_stats(
_post_data={"data": json.dumps(data)}
)
return response
def update_stream_setting_table(self, data):
try:
response = self.services.update_stream_setting_table(_post_data={'data': json.dumps(data)})
response = self.services.update_stream_setting_table(
_post_data={"data": json.dumps(data)}
)
return response
except Exception as e:
self.logger.exception(e)

View File

@ -18,17 +18,18 @@ LIBRETIME_API_VERSION = "2.0"
api_config = {}
api_endpoints = {}
api_endpoints['version_url'] = 'version/'
api_endpoints['schedule_url'] = 'schedule/'
api_endpoints['webstream_url'] = 'webstreams/{id}/'
api_endpoints['show_instance_url'] = 'show-instances/{id}/'
api_endpoints['show_url'] = 'shows/{id}/'
api_endpoints['file_url'] = 'files/{id}/'
api_endpoints['file_download_url'] = 'files/{id}/download/'
api_config['api_base'] = 'api/v2'
api_endpoints["version_url"] = "version/"
api_endpoints["schedule_url"] = "schedule/"
api_endpoints["webstream_url"] = "webstreams/{id}/"
api_endpoints["show_instance_url"] = "show-instances/{id}/"
api_endpoints["show_url"] = "shows/{id}/"
api_endpoints["file_url"] = "files/{id}/"
api_endpoints["file_download_url"] = "files/{id}/download/"
api_config["api_base"] = "api/v2"
class AirtimeApiClient:
def __init__(self, logger=None, config_path='/etc/airtime/airtime.conf'):
def __init__(self, logger=None, config_path="/etc/airtime/airtime.conf"):
if logger is None:
self.logger = logging
else:
@ -39,87 +40,89 @@ class AirtimeApiClient:
self.config.update(api_config)
self.services = RequestProvider(self.config, api_endpoints)
except Exception as e:
self.logger.exception('Error loading config file: %s', config_path)
self.logger.exception("Error loading config file: %s", config_path)
sys.exit(1)
def get_schedule(self):
current_time = datetime.datetime.utcnow()
end_time = current_time + datetime.timedelta(hours=1)
str_current = current_time.isoformat(timespec='seconds')
str_end = end_time.isoformat(timespec='seconds')
data = self.services.schedule_url(params={
'ends__range': ('{}Z,{}Z'.format(str_current, str_end)),
})
result = {'media': {} }
for item in data:
start = isoparse(item['starts'])
key = start.strftime('%YYYY-%mm-%dd-%HH-%MM-%SS')
end = isoparse(item['ends'])
show_instance = self.services.show_instance_url(id=item['instance_id'])
show = self.services.show_url(id=show_instance['show_id'])
result['media'][key] = {
'start': start.strftime('%Y-%m-%d-%H-%M-%S'),
'end': end.strftime('%Y-%m-%d-%H-%M-%S'),
'row_id': item['id']
str_current = current_time.isoformat(timespec="seconds")
str_end = end_time.isoformat(timespec="seconds")
data = self.services.schedule_url(
params={
"ends__range": ("{}Z,{}Z".format(str_current, str_end)),
}
current = result['media'][key]
if item['file']:
current['independent_event'] = False
current['type'] = 'file'
current['id'] = item['file_id']
)
result = {"media": {}}
for item in data:
start = isoparse(item["starts"])
key = start.strftime("%YYYY-%mm-%dd-%HH-%MM-%SS")
end = isoparse(item["ends"])
fade_in = time_in_milliseconds(fromisoformat(item['fade_in']))
fade_out = time_in_milliseconds(fromisoformat(item['fade_out']))
show_instance = self.services.show_instance_url(id=item["instance_id"])
show = self.services.show_url(id=show_instance["show_id"])
cue_in = time_in_seconds(fromisoformat(item['cue_in']))
cue_out = time_in_seconds(fromisoformat(item['cue_out']))
result["media"][key] = {
"start": start.strftime("%Y-%m-%d-%H-%M-%S"),
"end": end.strftime("%Y-%m-%d-%H-%M-%S"),
"row_id": item["id"],
}
current = result["media"][key]
if item["file"]:
current["independent_event"] = False
current["type"] = "file"
current["id"] = item["file_id"]
current['fade_in'] = fade_in
current['fade_out'] = fade_out
current['cue_in'] = cue_in
current['cue_out'] = cue_out
fade_in = time_in_milliseconds(fromisoformat(item["fade_in"]))
fade_out = time_in_milliseconds(fromisoformat(item["fade_out"]))
info = self.services.file_url(id=item['file_id'])
current['metadata'] = info
current['uri'] = item['file']
current['filesize'] = info['filesize']
elif item['stream']:
current['independent_event'] = True
current['id'] = item['stream_id']
info = self.services.webstream_url(id=item['stream_id'])
current['uri'] = info['url']
current['type'] = 'stream_buffer_start'
cue_in = time_in_seconds(fromisoformat(item["cue_in"]))
cue_out = time_in_seconds(fromisoformat(item["cue_out"]))
current["fade_in"] = fade_in
current["fade_out"] = fade_out
current["cue_in"] = cue_in
current["cue_out"] = cue_out
info = self.services.file_url(id=item["file_id"])
current["metadata"] = info
current["uri"] = item["file"]
current["filesize"] = info["filesize"]
elif item["stream"]:
current["independent_event"] = True
current["id"] = item["stream_id"]
info = self.services.webstream_url(id=item["stream_id"])
current["uri"] = info["url"]
current["type"] = "stream_buffer_start"
# Stream events are instantaneous
current['end'] = current['start']
current["end"] = current["start"]
result['{}_0'.format(key)] = {
'id': current['id'],
'type': 'stream_output_start',
'start': current['start'],
'end': current['start'],
'uri': current['uri'],
'row_id': current['row_id'],
'independent_event': current['independent_event'],
result["{}_0".format(key)] = {
"id": current["id"],
"type": "stream_output_start",
"start": current["start"],
"end": current["start"],
"uri": current["uri"],
"row_id": current["row_id"],
"independent_event": current["independent_event"],
}
result[end.isoformat()] = {
'type': 'stream_buffer_end',
'start': current['end'],
'end': current['end'],
'uri': current['uri'],
'row_id': current['row_id'],
'independent_event': current['independent_event'],
"type": "stream_buffer_end",
"start": current["end"],
"end": current["end"],
"uri": current["uri"],
"row_id": current["row_id"],
"independent_event": current["independent_event"],
}
result['{}_0'.format(end.isoformat())] = {
'type': 'stream_output_end',
'start': current['end'],
'end': current['end'],
'uri': current['uri'],
'row_id': current['row_id'],
'independent_event': current['independent_event'],
result["{}_0".format(end.isoformat())] = {
"type": "stream_output_end",
"start": current["end"],
"end": current["end"],
"uri": current["uri"],
"row_id": current["row_id"],
"independent_event": current["independent_event"],
}
return result

View File

@ -9,17 +9,19 @@ script_path = os.path.dirname(os.path.realpath(__file__))
print(script_path)
os.chdir(script_path)
setup(name='api_clients',
version='2.0.0',
description='LibreTime API Client',
url='http://github.com/LibreTime/Libretime',
author='LibreTime Contributors',
license='AGPLv3',
packages=['api_clients'],
scripts=[],
install_requires=[
'configobj',
'python-dateutil',
],
zip_safe=False,
data_files=[])
setup(
name="api_clients",
version="2.0.0",
description="LibreTime API Client",
url="http://github.com/LibreTime/Libretime",
author="LibreTime Contributors",
license="AGPLv3",
packages=["api_clients"],
scripts=[],
install_requires=[
"configobj",
"python-dateutil",
],
zip_safe=False,
data_files=[],
)

View File

@ -2,6 +2,7 @@
import unittest
from api_clients.utils import ApcUrl, UrlBadParam, IncompleteUrl
class TestApcUrl(unittest.TestCase):
def test_init(self):
url = "/testing"
@ -10,22 +11,23 @@ class TestApcUrl(unittest.TestCase):
def test_params_1(self):
u = ApcUrl("/testing/{key}")
self.assertEqual(u.params(key='val').url(), '/testing/val')
self.assertEqual(u.params(key="val").url(), "/testing/val")
def test_params_2(self):
u = ApcUrl('/testing/{key}/{api}/more_testing')
full_url = u.params(key="AAA",api="BBB").url()
self.assertEqual(full_url, '/testing/AAA/BBB/more_testing')
u = ApcUrl("/testing/{key}/{api}/more_testing")
full_url = u.params(key="AAA", api="BBB").url()
self.assertEqual(full_url, "/testing/AAA/BBB/more_testing")
def test_params_ex(self):
u = ApcUrl("/testing/{key}")
with self.assertRaises(UrlBadParam):
u.params(bad_key='testing')
u.params(bad_key="testing")
def test_url(self):
u = "one/two/three"
self.assertEqual( ApcUrl(u).url(), u )
self.assertEqual(ApcUrl(u).url(), u)
def test_url_ex(self):
u = ApcUrl('/{one}/{two}/three').params(two='testing')
with self.assertRaises(IncompleteUrl): u.url()
u = ApcUrl("/{one}/{two}/three").params(two="testing")
with self.assertRaises(IncompleteUrl):
u.url()

View File

@ -4,39 +4,43 @@ import json
from mock import MagicMock, patch
from api_clients.utils import ApcUrl, ApiRequest
class ResponseInfo:
@property
def headers(self):
return {'content-type': 'application/json'}
return {"content-type": "application/json"}
def json(self):
return {'ok', 'ok'}
return {"ok", "ok"}
class TestApiRequest(unittest.TestCase):
def test_init(self):
u = ApiRequest('request_name', ApcUrl('/test/ing'))
u = ApiRequest("request_name", ApcUrl("/test/ing"))
self.assertEqual(u.name, "request_name")
def test_call_json(self):
ret = {'ok':'ok'}
ret = {"ok": "ok"}
read = MagicMock()
read.headers = {'content-type': 'application/json'}
read.headers = {"content-type": "application/json"}
read.json = MagicMock(return_value=ret)
u = 'http://localhost/testing'
with patch('requests.get') as mock_method:
u = "http://localhost/testing"
with patch("requests.get") as mock_method:
mock_method.return_value = read
request = ApiRequest('mm', ApcUrl(u))()
request = ApiRequest("mm", ApcUrl(u))()
self.assertEqual(request, ret)
def test_call_html(self):
ret = '<html><head></head><body></body></html>'
ret = "<html><head></head><body></body></html>"
read = MagicMock()
read.headers = {'content-type': 'application/html'}
read.headers = {"content-type": "application/html"}
read.text = MagicMock(return_value=ret)
u = 'http://localhost/testing'
with patch('requests.get') as mock_method:
u = "http://localhost/testing"
with patch("requests.get") as mock_method:
mock_method.return_value = read
request = ApiRequest('mm', ApcUrl(u))()
request = ApiRequest("mm", ApcUrl(u))()
self.assertEqual(request.text(), ret)
if __name__ == '__main__': unittest.main()
if __name__ == "__main__":
unittest.main()

View File

@ -6,18 +6,19 @@ from configobj import ConfigObj
from api_clients.version1 import api_config
from api_clients.utils import RequestProvider
class TestRequestProvider(unittest.TestCase):
def setUp(self):
self.cfg = api_config
self.cfg['general'] = {}
self.cfg['general']['base_dir'] = '/test'
self.cfg['general']['base_port'] = 80
self.cfg['general']['base_url'] = 'localhost'
self.cfg['general']['api_key'] = 'TEST_KEY'
self.cfg['api_base'] = 'api'
self.cfg["general"] = {}
self.cfg["general"]["base_dir"] = "/test"
self.cfg["general"]["base_port"] = 80
self.cfg["general"]["base_url"] = "localhost"
self.cfg["general"]["api_key"] = "TEST_KEY"
self.cfg["api_base"] = "api"
def test_test(self):
self.assertTrue('general' in self.cfg)
self.assertTrue("general" in self.cfg)
def test_init(self):
rp = RequestProvider(self.cfg, {})
@ -25,12 +26,14 @@ class TestRequestProvider(unittest.TestCase):
def test_contains(self):
methods = {
'upload_recorded': '/1/',
'update_media_url': '/2/',
'list_all_db_files': '/3/',
"upload_recorded": "/1/",
"update_media_url": "/2/",
"list_all_db_files": "/3/",
}
rp = RequestProvider(self.cfg, methods)
for meth in methods:
self.assertTrue(meth in rp.requests)
if __name__ == '__main__': unittest.main()
if __name__ == "__main__":
unittest.main()

View File

@ -4,13 +4,14 @@ import configparser
import unittest
from api_clients import utils
def get_force_ssl(value, useConfigParser):
config = {}
if useConfigParser:
config = configparser.ConfigParser()
config['general'] = {
'base_port': 80,
'force_ssl': value,
config["general"] = {
"base_port": 80,
"force_ssl": value,
}
return utils.get_protocol(config)
@ -27,65 +28,65 @@ class TestTime(unittest.TestCase):
class TestGetProtocol(unittest.TestCase):
def test_dict_config_empty_http(self):
config = {'general': {}}
config = {"general": {}}
protocol = utils.get_protocol(config)
self.assertEqual(protocol, 'http')
self.assertEqual(protocol, "http")
def test_dict_config_http(self):
config = {
'general': {
'base_port': 80,
"general": {
"base_port": 80,
},
}
protocol = utils.get_protocol(config)
self.assertEqual(protocol, 'http')
self.assertEqual(protocol, "http")
def test_dict_config_https(self):
config = {
'general': {
'base_port': 443,
"general": {
"base_port": 443,
},
}
protocol = utils.get_protocol(config)
self.assertEqual(protocol, 'https')
self.assertEqual(protocol, "https")
def test_dict_config_force_https(self):
postive_values = ['yes', 'Yes', 'True', 'true', True]
negative_values = ['no', 'No', 'False', 'false', False]
postive_values = ["yes", "Yes", "True", "true", True]
negative_values = ["no", "No", "False", "false", False]
for value in postive_values:
self.assertEqual(get_force_ssl(value, False), 'https')
self.assertEqual(get_force_ssl(value, False), "https")
for value in negative_values:
self.assertEqual(get_force_ssl(value, False), 'http')
self.assertEqual(get_force_ssl(value, False), "http")
def test_configparser_config_empty_http(self):
config = configparser.ConfigParser()
config['general'] = {}
config["general"] = {}
protocol = utils.get_protocol(config)
self.assertEqual(protocol, 'http')
self.assertEqual(protocol, "http")
def test_configparser_config_http(self):
config = configparser.ConfigParser()
config['general'] = {
'base_port': 80,
config["general"] = {
"base_port": 80,
}
protocol = utils.get_protocol(config)
self.assertEqual(protocol, 'http')
self.assertEqual(protocol, "http")
def test_configparser_config_https(self):
config = configparser.ConfigParser()
config['general'] = {
'base_port': 443,
config["general"] = {
"base_port": 443,
}
protocol = utils.get_protocol(config)
self.assertEqual(protocol, 'https')
self.assertEqual(protocol, "https")
def test_configparser_config_force_https(self):
postive_values = ['yes', 'Yes', 'True', 'true', True]
negative_values = ['no', 'No', 'False', 'false', False]
postive_values = ["yes", "Yes", "True", "true", True]
negative_values = ["no", "No", "False", "false", False]
for value in postive_values:
self.assertEqual(get_force_ssl(value, True), 'https')
self.assertEqual(get_force_ssl(value, True), "https")
for value in negative_values:
self.assertEqual(get_force_ssl(value, True), 'http')
self.assertEqual(get_force_ssl(value, True), "http")
def test_fromisoformat(self):
time = {
@ -96,4 +97,6 @@ class TestGetProtocol(unittest.TestCase):
result = utils.fromisoformat(time_string)
self.assertEqual(result, expected)
if __name__ == '__main__': unittest.main()
if __name__ == "__main__":
unittest.main()

View File

@ -9,14 +9,18 @@ if os.geteuid() != 0:
print("Please run this as root.")
sys.exit(1)
def get_current_script_dir():
current_script_dir = os.path.realpath(__file__)
index = current_script_dir.rindex('/')
return current_script_dir[0:index]
current_script_dir = os.path.realpath(__file__)
index = current_script_dir.rindex("/")
return current_script_dir[0:index]
try:
current_script_dir = get_current_script_dir()
shutil.copy(current_script_dir+"/../airtime-icecast-status.xsl", "/usr/share/icecast2/web")
shutil.copy(
current_script_dir + "/../airtime-icecast-status.xsl", "/usr/share/icecast2/web"
)
except Exception as e:
print("exception: {}".format(e))

View File

@ -2,5 +2,5 @@
# -*- coding: utf-8 -*-
import runpy
# Run the liquidsoap python module
runpy.run_module('liquidsoap')
# Run the liquidsoap python module
runpy.run_module("liquidsoap")

View File

@ -3,4 +3,3 @@
import runpy
runpy.run_module("pypo", run_name="__main__")

View File

@ -27,27 +27,75 @@ import json
from configobj import ConfigObj
# custom imports
#from util import *
# from util import *
from api_clients import version1 as api_client
LOG_LEVEL = logging.INFO
LOG_PATH = '/var/log/airtime/pypo/notify.log'
LOG_PATH = "/var/log/airtime/pypo/notify.log"
# help screeen / info
usage = "%prog [options]" + " - notification gateway"
parser = OptionParser(usage=usage)
# Options
parser.add_option("-d", "--data", help="Pass JSON data from Liquidsoap into this script.", metavar="data")
parser.add_option("-m", "--media-id", help="ID of the file that is currently playing.", metavar="media_id")
parser.add_option("-e", "--error", action="store", dest="error", type="string", help="Liquidsoap error msg.", metavar="error_msg")
parser.add_option(
"-d",
"--data",
help="Pass JSON data from Liquidsoap into this script.",
metavar="data",
)
parser.add_option(
"-m",
"--media-id",
help="ID of the file that is currently playing.",
metavar="media_id",
)
parser.add_option(
"-e",
"--error",
action="store",
dest="error",
type="string",
help="Liquidsoap error msg.",
metavar="error_msg",
)
parser.add_option("-s", "--stream-id", help="ID stream", metavar="stream_id")
parser.add_option("-c", "--connect", help="Liquidsoap connected", action="store_true", metavar="connect")
parser.add_option("-t", "--time", help="Liquidsoap boot up time", action="store", dest="time", metavar="time", type="string")
parser.add_option("-x", "--source-name", help="source connection name", metavar="source_name")
parser.add_option("-y", "--source-status", help="source connection status", metavar="source_status")
parser.add_option("-w", "--webstream", help="JSON metadata associated with webstream", metavar="json_data")
parser.add_option("-n", "--liquidsoap-started", help="notify liquidsoap started", metavar="json_data", action="store_true", default=False)
parser.add_option(
"-c",
"--connect",
help="Liquidsoap connected",
action="store_true",
metavar="connect",
)
parser.add_option(
"-t",
"--time",
help="Liquidsoap boot up time",
action="store",
dest="time",
metavar="time",
type="string",
)
parser.add_option(
"-x", "--source-name", help="source connection name", metavar="source_name"
)
parser.add_option(
"-y", "--source-status", help="source connection status", metavar="source_status"
)
parser.add_option(
"-w",
"--webstream",
help="JSON metadata associated with webstream",
metavar="json_data",
)
parser.add_option(
"-n",
"--liquidsoap-started",
help="notify liquidsoap started",
metavar="json_data",
action="store_true",
default=False,
)
# parse options
@ -55,12 +103,15 @@ parser.add_option("-n", "--liquidsoap-started", help="notify liquidsoap started"
# Set up logging
logging.captureWarnings(True)
logFormatter = logging.Formatter("%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s")
logFormatter = logging.Formatter(
"%(asctime)s [%(module)s] [%(levelname)-5.5s] %(message)s"
)
rootLogger = logging.getLogger()
rootLogger.setLevel(LOG_LEVEL)
fileHandler = logging.handlers.RotatingFileHandler(filename=LOG_PATH, maxBytes=1024*1024*30,
backupCount=8)
fileHandler = logging.handlers.RotatingFileHandler(
filename=LOG_PATH, maxBytes=1024 * 1024 * 30, backupCount=8
)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
@ -69,15 +120,15 @@ consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
logger = rootLogger
#need to wait for Python 2.7 for this..
#logging.captureWarnings(True)
# need to wait for Python 2.7 for this..
# logging.captureWarnings(True)
# loading config file
try:
config = ConfigObj('/etc/airtime/airtime.conf')
config = ConfigObj("/etc/airtime/airtime.conf")
except Exception as e:
logger.error('Error loading config file: %s', e)
logger.error("Error loading config file: %s", e)
sys.exit()
@ -90,39 +141,41 @@ class Notify:
self.api_client.notify_liquidsoap_started()
def notify_media_start_playing(self, media_id):
logger.debug('#################################################')
logger.debug('# Calling server to update about what\'s playing #')
logger.debug('#################################################')
logger.debug("#################################################")
logger.debug("# Calling server to update about what's playing #")
logger.debug("#################################################")
response = self.api_client.notify_media_item_start_playing(media_id)
logger.debug("Response: " + json.dumps(response))
# @pram time: time that LS started
def notify_liquidsoap_status(self, msg, stream_id, time):
logger.info('#################################################')
logger.info('# Calling server to update liquidsoap status #')
logger.info('#################################################')
logger.info('msg = ' + str(msg))
logger.info("#################################################")
logger.info("# Calling server to update liquidsoap status #")
logger.info("#################################################")
logger.info("msg = " + str(msg))
response = self.api_client.notify_liquidsoap_status(msg, stream_id, time)
logger.info("Response: " + json.dumps(response))
def notify_source_status(self, source_name, status):
logger.debug('#################################################')
logger.debug('# Calling server to update source status #')
logger.debug('#################################################')
logger.debug('msg = ' + str(source_name) + ' : ' + str(status))
logger.debug("#################################################")
logger.debug("# Calling server to update source status #")
logger.debug("#################################################")
logger.debug("msg = " + str(source_name) + " : " + str(status))
response = self.api_client.notify_source_status(source_name, status)
logger.debug("Response: " + json.dumps(response))
def notify_webstream_data(self, data, media_id):
logger.debug('#################################################')
logger.debug('# Calling server to update webstream data #')
logger.debug('#################################################')
logger.debug("#################################################")
logger.debug("# Calling server to update webstream data #")
logger.debug("#################################################")
response = self.api_client.notify_webstream_data(data, media_id)
logger.debug("Response: " + json.dumps(response))
def run_with_options(self, options):
if options.error and options.stream_id:
self.notify_liquidsoap_status(options.error, options.stream_id, options.time)
self.notify_liquidsoap_status(
options.error, options.stream_id, options.time
)
elif options.connect and options.stream_id:
self.notify_liquidsoap_status("OK", options.stream_id, options.time)
elif options.source_name and options.source_status:
@ -134,15 +187,17 @@ class Notify:
elif options.liquidsoap_started:
self.notify_liquidsoap_started()
else:
logger.debug("Unrecognized option in options({}). Doing nothing".format(options))
logger.debug(
"Unrecognized option in options({}). Doing nothing".format(options)
)
if __name__ == '__main__':
if __name__ == "__main__":
print()
print('#########################################')
print('# *** pypo *** #')
print('# pypo notification gateway #')
print('#########################################')
print("#########################################")
print("# *** pypo *** #")
print("# pypo notification gateway #")
print("#########################################")
# initialize
try:
@ -150,4 +205,3 @@ if __name__ == '__main__':
n.run_with_options(options)
except Exception as e:
print(traceback.format_exc())

View File

@ -7,9 +7,10 @@ import time
import traceback
from api_clients.version1 import AirtimeApiClient
def generate_liquidsoap_config(ss):
data = ss['msg']
fh = open('/etc/airtime/liquidsoap.cfg', 'w')
data = ss["msg"]
fh = open("/etc/airtime/liquidsoap.cfg", "w")
fh.write("################################################\n")
fh.write("# THIS FILE IS AUTO GENERATED. DO NOT CHANGE!! #\n")
fh.write("################################################\n")
@ -17,17 +18,17 @@ def generate_liquidsoap_config(ss):
for key, value in data.items():
try:
if not "port" in key and not "bitrate" in key: # Stupid hack
if not "port" in key and not "bitrate" in key: # Stupid hack
raise ValueError()
str_buffer = "%s = %s\n" % (key, int(value))
except ValueError:
try: # Is it a boolean?
if value=="true" or value=="false":
try: # Is it a boolean?
if value == "true" or value == "false":
str_buffer = "%s = %s\n" % (key, value.lower())
else:
raise ValueError() # Just drop into the except below
except: #Everything else is a string
str_buffer = "%s = \"%s\"\n" % (key, value)
raise ValueError() # Just drop into the except below
except: # Everything else is a string
str_buffer = '%s = "%s"\n' % (key, value)
fh.write(str_buffer)
# ignore squashes unused variable errors from Liquidsoap
@ -38,8 +39,9 @@ def generate_liquidsoap_config(ss):
fh.write('auth_path = "%s/liquidsoap_auth.py"\n' % auth_path)
fh.close()
def run():
logging.basicConfig(format='%(message)s')
logging.basicConfig(format="%(message)s")
attempts = 0
max_attempts = 10
successful = False

View File

@ -9,16 +9,16 @@ dj_type = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
source_type = ''
if dj_type == '--master':
source_type = 'master'
elif dj_type == '--dj':
source_type = 'dj'
source_type = ""
if dj_type == "--master":
source_type = "master"
elif dj_type == "--dj":
source_type = "dj"
response = api_clients.check_live_stream_auth(username, password, source_type)
if 'msg' in response and response['msg'] == True:
print(response['msg'])
if "msg" in response and response["msg"] == True:
print(response["msg"])
sys.exit(0)
else:
print(False)

View File

@ -4,17 +4,16 @@ import telnetlib
import sys
try:
config = ConfigObj('/etc/airtime/airtime.conf')
LS_HOST = config['pypo']['ls_host']
LS_PORT = config['pypo']['ls_port']
config = ConfigObj("/etc/airtime/airtime.conf")
LS_HOST = config["pypo"]["ls_host"]
LS_PORT = config["pypo"]["ls_port"]
tn = telnetlib.Telnet(LS_HOST, LS_PORT)
tn.write("master_harbor.stop\n")
tn.write("live_dj_harbor.stop\n")
tn.write('exit\n')
tn.write("exit\n")
tn.read_all()
except Exception as e:
print("Error loading config file: {}".format(e))
sys.exit()

View File

@ -18,6 +18,7 @@ from configobj import ConfigObj
from datetime import datetime
from optparse import OptionParser
import importlib
try:
from queue import Queue
except ImportError: # Python 2.7.5 (CentOS 7)

View File

@ -10,9 +10,10 @@ import time
from api_clients import version1 as api_client
class ListenerStat(Thread):
HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout
HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout
def __init__(self, config, logger=None):
Thread.__init__(self)
@ -28,50 +29,49 @@ class ListenerStat(Thread):
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
return "".join(rc)
def get_stream_parameters(self):
#[{"user":"", "password":"", "url":"", "port":""},{},{}]
# [{"user":"", "password":"", "url":"", "port":""},{},{}]
return self.api_client.get_stream_parameters()
def get_stream_server_xml(self, ip, url, is_shoutcast=False):
auth_string = "%(admin_user)s:%(admin_pass)s" % ip
encoded = base64.b64encode(auth_string.encode('utf-8'))
encoded = base64.b64encode(auth_string.encode("utf-8"))
header = {"Authorization":"Basic %s" % encoded.decode('ascii')}
header = {"Authorization": "Basic %s" % encoded.decode("ascii")}
if is_shoutcast:
#user agent is required for shoutcast auth, otherwise it returns 404.
# user agent is required for shoutcast auth, otherwise it returns 404.
user_agent = "Mozilla/5.0 (Linux; rv:22.0) Gecko/20130405 Firefox/22.0"
header["User-Agent"] = user_agent
req = urllib.request.Request(
#assuming that the icecast stats path is /admin/stats.xml
#need to fix this
# assuming that the icecast stats path is /admin/stats.xml
# need to fix this
url=url,
headers=header)
headers=header,
)
f = urllib.request.urlopen(req, timeout=ListenerStat.HTTP_REQUEST_TIMEOUT)
document = f.read()
return document
def get_icecast_stats(self, ip):
document = None
if "airtime.pro" in ip["host"].lower():
url = 'http://%(host)s:%(port)s/stats.xsl' % ip
url = "http://%(host)s:%(port)s/stats.xsl" % ip
document = self.get_stream_server_xml(ip, url)
else:
url = 'http://%(host)s:%(port)s/admin/stats.xml' % ip
url = "http://%(host)s:%(port)s/admin/stats.xml" % ip
document = self.get_stream_server_xml(ip, url)
dom = defusedxml.minidom.parseString(document)
sources = dom.getElementsByTagName("source")
mount_stats = None
for s in sources:
#drop the leading '/' character
# drop the leading '/' character
mount_name = s.getAttribute("mount")[1:]
if mount_name == ip["mount"]:
timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
@ -80,14 +80,16 @@ class ListenerStat(Thread):
if len(listeners):
num_listeners = self.get_node_text(listeners[0].childNodes)
mount_stats = {"timestamp":timestamp, \
"num_listeners": num_listeners, \
"mount_name": mount_name}
mount_stats = {
"timestamp": timestamp,
"num_listeners": num_listeners,
"mount_name": mount_name,
}
return mount_stats
def get_shoutcast_stats(self, ip):
url = 'http://%(host)s:%(port)s/admin.cgi?sid=1&mode=viewxml' % ip
url = "http://%(host)s:%(port)s/admin.cgi?sid=1&mode=viewxml" % ip
document = self.get_stream_server_xml(ip, url, is_shoutcast=True)
dom = defusedxml.minidom.parseString(document)
current_listeners = dom.getElementsByTagName("CURRENTLISTENERS")
@ -97,34 +99,37 @@ class ListenerStat(Thread):
if len(current_listeners):
num_listeners = self.get_node_text(current_listeners[0].childNodes)
mount_stats = {"timestamp":timestamp, \
"num_listeners": num_listeners, \
"mount_name": "shoutcast"}
mount_stats = {
"timestamp": timestamp,
"num_listeners": num_listeners,
"mount_name": "shoutcast",
}
return mount_stats
def get_stream_stats(self, stream_parameters):
stats = []
#iterate over stream_parameters which is a list of dicts. Each dict
#represents one Airtime stream (currently this limit is 3).
#Note that there can be optimizations done, since if all three
#streams are the same server, we will still initiate 3 separate
#connections
# iterate over stream_parameters which is a list of dicts. Each dict
# represents one Airtime stream (currently this limit is 3).
# Note that there can be optimizations done, since if all three
# streams are the same server, we will still initiate 3 separate
# connections
for k, v in stream_parameters.items():
if v["enable"] == 'true':
if v["enable"] == "true":
try:
if v["output"] == "icecast":
mount_stats = self.get_icecast_stats(v)
if mount_stats: stats.append(mount_stats)
if mount_stats:
stats.append(mount_stats)
else:
stats.append(self.get_shoutcast_stats(v))
self.update_listener_stat_error(v["mount"], 'OK')
self.update_listener_stat_error(v["mount"], "OK")
except Exception as e:
try:
self.update_listener_stat_error(v["mount"], str(e))
except Exception as e:
self.logger.error('Exception: %s', e)
self.logger.error("Exception: %s", e)
return stats
@ -132,15 +137,15 @@ class ListenerStat(Thread):
self.api_client.push_stream_stats(stats)
def update_listener_stat_error(self, stream_id, error):
keyname = '%s_listener_stat_error' % stream_id
keyname = "%s_listener_stat_error" % stream_id
data = {keyname: error}
self.api_client.update_stream_setting_table(data)
def run(self):
#Wake up every 120 seconds and gather icecast statistics. Note that we
#are currently querying the server every 2 minutes for list of
#mountpoints as well. We could remove this query if we hooked into
#rabbitmq events, and listened for these changes instead.
# Wake up every 120 seconds and gather icecast statistics. Note that we
# are currently querying the server every 2 minutes for list of
# mountpoints as well. We could remove this query if we hooked into
# rabbitmq events, and listened for these changes instead.
while True:
try:
stream_parameters = self.get_stream_parameters()
@ -149,25 +154,27 @@ class ListenerStat(Thread):
if stats:
self.push_stream_stats(stats)
except Exception as e:
self.logger.error('Exception: %s', e)
self.logger.error("Exception: %s", e)
time.sleep(120)
self.logger.info('ListenerStat thread exiting')
self.logger.info("ListenerStat thread exiting")
if __name__ == "__main__":
# create logger
logger = logging.getLogger('std_out')
logger = logging.getLogger("std_out")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
#ch = logging.StreamHandler()
#ch.setLevel(logging.DEBUG)
# ch = logging.StreamHandler()
# ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(lineno)s - %(levelname)s - %(message)s')
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(lineno)s - %(levelname)s - %(message)s"
)
# add formatter to ch
#ch.setFormatter(formatter)
# ch.setFormatter(formatter)
# add ch to logger
#logger.addHandler(ch)
# logger.addHandler(ch)
#ls = ListenerStat(logger=logger)
#ls.run()
# ls = ListenerStat(logger=logger)
# ls.run()

View File

@ -2,6 +2,7 @@
import re
from packaging.version import Version, parse
def version_cmp(version1, version2):
version1 = parse(version1)
version2 = parse(version2)
@ -11,12 +12,14 @@ def version_cmp(version1, version2):
return 0
return -1
def date_interval_to_seconds(interval):
"""
Convert timedelta object into int representing the number of seconds. If
number of seconds is less than 0, then return 0.
"""
seconds = (interval.microseconds + \
(interval.seconds + interval.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
seconds = (
interval.microseconds + (interval.seconds + interval.days * 24 * 3600) * 10 ** 6
) / float(10 ** 6)
return seconds

View File

@ -23,20 +23,24 @@ from .timeout import ls_timeout
def keyboardInterruptHandler(signum, frame):
logger = logging.getLogger()
logger.info('\nKeyboard Interrupt\n')
logger.info("\nKeyboard Interrupt\n")
sys.exit(0)
signal.signal(signal.SIGINT, keyboardInterruptHandler)
logging.captureWarnings(True)
POLL_INTERVAL = 400
class PypoFetch(Thread):
def __init__(self, pypoFetch_q, pypoPush_q, media_q, telnet_lock, pypo_liquidsoap, config):
class PypoFetch(Thread):
def __init__(
self, pypoFetch_q, pypoPush_q, media_q, telnet_lock, pypo_liquidsoap, config
):
Thread.__init__(self)
#Hacky...
# Hacky...
PypoFetch.ref = self
self.v1_api_client = v1_api_client.AirtimeApiClient()
@ -76,6 +80,7 @@ class PypoFetch(Thread):
Handle a message from RabbitMQ, put it into our yucky global var.
Hopefully there is a better way to do this.
"""
def handle_message(self, message):
try:
self.logger.info("Received event from Pypo Message Handler: %s" % message)
@ -85,50 +90,52 @@ class PypoFetch(Thread):
except (UnicodeDecodeError, AttributeError):
pass
m = json.loads(message)
command = m['event_type']
command = m["event_type"]
self.logger.info("Handling command: " + command)
if command == 'update_schedule':
self.schedule_data = m['schedule']
if command == "update_schedule":
self.schedule_data = m["schedule"]
self.process_schedule(self.schedule_data)
elif command == 'reset_liquidsoap_bootstrap':
elif command == "reset_liquidsoap_bootstrap":
self.set_bootstrap_variables()
elif command == 'update_stream_setting':
elif command == "update_stream_setting":
self.logger.info("Updating stream setting...")
self.regenerate_liquidsoap_conf(m['setting'])
elif command == 'update_stream_format':
self.regenerate_liquidsoap_conf(m["setting"])
elif command == "update_stream_format":
self.logger.info("Updating stream format...")
self.update_liquidsoap_stream_format(m['stream_format'])
elif command == 'update_station_name':
self.update_liquidsoap_stream_format(m["stream_format"])
elif command == "update_station_name":
self.logger.info("Updating station name...")
self.update_liquidsoap_station_name(m['station_name'])
elif command == 'update_transition_fade':
self.update_liquidsoap_station_name(m["station_name"])
elif command == "update_transition_fade":
self.logger.info("Updating transition_fade...")
self.update_liquidsoap_transition_fade(m['transition_fade'])
elif command == 'switch_source':
self.update_liquidsoap_transition_fade(m["transition_fade"])
elif command == "switch_source":
self.logger.info("switch_on_source show command received...")
self.pypo_liquidsoap.\
get_telnet_dispatcher().\
switch_source(m['sourcename'], m['status'])
elif command == 'disconnect_source':
self.pypo_liquidsoap.get_telnet_dispatcher().switch_source(
m["sourcename"], m["status"]
)
elif command == "disconnect_source":
self.logger.info("disconnect_on_source show command received...")
self.pypo_liquidsoap.get_telnet_dispatcher().\
disconnect_source(m['sourcename'])
self.pypo_liquidsoap.get_telnet_dispatcher().disconnect_source(
m["sourcename"]
)
else:
self.logger.info("Unknown command: %s" % command)
# update timeout value
if command == 'update_schedule':
if command == "update_schedule":
self.listener_timeout = POLL_INTERVAL
else:
self.listener_timeout = self.last_update_schedule_timestamp - time.time() + POLL_INTERVAL
self.listener_timeout = (
self.last_update_schedule_timestamp - time.time() + POLL_INTERVAL
)
if self.listener_timeout < 0:
self.listener_timeout = 0
self.logger.info("New timeout: %s" % self.listener_timeout)
except Exception as e:
self.logger.exception("Exception in handling Message Handler message")
def switch_source_temp(self, sourcename, status):
self.logger.debug('Switching source: %s to "%s" status', sourcename, status)
command = "streams."
@ -149,25 +156,28 @@ class PypoFetch(Thread):
"""
Initialize Liquidsoap environment
"""
def set_bootstrap_variables(self):
self.logger.debug('Getting information needed on bootstrap from Airtime')
self.logger.debug("Getting information needed on bootstrap from Airtime")
try:
info = self.v1_api_client.get_bootstrap_info()
except Exception as e:
self.logger.exception('Unable to get bootstrap info.. Exiting pypo...')
self.logger.exception("Unable to get bootstrap info.. Exiting pypo...")
self.logger.debug('info:%s', info)
self.logger.debug("info:%s", info)
commands = []
for k, v in info['switch_status'].items():
for k, v in info["switch_status"].items():
commands.append(self.switch_source_temp(k, v))
stream_format = info['stream_label']
station_name = info['station_name']
fade = info['transition_fade']
stream_format = info["stream_label"]
station_name = info["station_name"]
fade = info["transition_fade"]
commands.append(('vars.stream_metadata_type %s\n' % stream_format).encode('utf-8'))
commands.append(('vars.station_name %s\n' % station_name).encode('utf-8'))
commands.append(('vars.default_dj_fade %s\n' % fade).encode('utf-8'))
commands.append(
("vars.stream_metadata_type %s\n" % stream_format).encode("utf-8")
)
commands.append(("vars.station_name %s\n" % station_name).encode("utf-8"))
commands.append(("vars.default_dj_fade %s\n" % fade).encode("utf-8"))
self.pypo_liquidsoap.get_telnet_dispatcher().telnet_send(commands)
self.pypo_liquidsoap.clear_all_queues()
@ -182,21 +192,24 @@ class PypoFetch(Thread):
will be thrown."""
self.telnet_lock.acquire(False)
self.logger.info("Restarting Liquidsoap")
subprocess.call('kill -9 `pidof airtime-liquidsoap`', shell=True, close_fds=True)
subprocess.call(
"kill -9 `pidof airtime-liquidsoap`", shell=True, close_fds=True
)
#Wait here and poll Liquidsoap until it has started up
# Wait here and poll Liquidsoap until it has started up
self.logger.info("Waiting for Liquidsoap to start")
while True:
try:
tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port'])
tn.write('exit\n'.encode('utf-8'))
tn = telnetlib.Telnet(
self.config["ls_host"], self.config["ls_port"]
)
tn.write("exit\n".encode("utf-8"))
tn.read_all()
self.logger.info("Liquidsoap is up and running")
break
except Exception as e:
#sleep 0.5 seconds and try again
# sleep 0.5 seconds and try again
time.sleep(0.5)
except Exception as e:
@ -208,11 +221,11 @@ class PypoFetch(Thread):
"""
NOTE: This function is quite short after it was refactored.
"""
def regenerate_liquidsoap_conf(self, setting):
self.restart_liquidsoap()
self.update_liquidsoap_connection_status()
@ls_timeout
def update_liquidsoap_connection_status(self):
"""
@ -222,20 +235,22 @@ class PypoFetch(Thread):
try:
self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port'])
tn = telnetlib.Telnet(self.config["ls_host"], self.config["ls_port"])
# update the boot up time of Liquidsoap. Since Liquidsoap is not restarting,
# we are manually adjusting the bootup time variable so the status msg will get
# updated.
current_time = time.time()
boot_up_time_command = ("vars.bootup_time " + str(current_time) + "\n").encode('utf-8')
boot_up_time_command = (
"vars.bootup_time " + str(current_time) + "\n"
).encode("utf-8")
self.logger.info(boot_up_time_command)
tn.write(boot_up_time_command)
connection_status = ("streams.connection_status\n").encode('utf-8')
connection_status = ("streams.connection_status\n").encode("utf-8")
self.logger.info(connection_status)
tn.write(connection_status)
tn.write('exit\n'.encode('utf-8'))
tn.write("exit\n".encode("utf-8"))
output = tn.read_all()
except Exception as e:
@ -253,12 +268,13 @@ class PypoFetch(Thread):
fake_time = current_time + 1
for s in streams:
info = s.split(':')
info = s.split(":")
stream_id = info[0]
status = info[1]
if(status == "true"):
self.v1_api_client.notify_liquidsoap_status("OK", stream_id, str(fake_time))
if status == "true":
self.v1_api_client.notify_liquidsoap_status(
"OK", stream_id, str(fake_time)
)
@ls_timeout
def update_liquidsoap_stream_format(self, stream_format):
@ -266,11 +282,11 @@ class PypoFetch(Thread):
# TODO: THIS LIQUIDSOAP STUFF NEEDS TO BE MOVED TO PYPO-PUSH!!!
try:
self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port'])
command = ('vars.stream_metadata_type %s\n' % stream_format).encode('utf-8')
tn = telnetlib.Telnet(self.config["ls_host"], self.config["ls_port"])
command = ("vars.stream_metadata_type %s\n" % stream_format).encode("utf-8")
self.logger.info(command)
tn.write(command)
tn.write('exit\n'.encode('utf-8'))
tn.write("exit\n".encode("utf-8"))
tn.read_all()
except Exception as e:
self.logger.exception(e)
@ -283,11 +299,11 @@ class PypoFetch(Thread):
# TODO: THIS LIQUIDSOAP STUFF NEEDS TO BE MOVED TO PYPO-PUSH!!!
try:
self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port'])
command = ('vars.default_dj_fade %s\n' % fade).encode('utf-8')
tn = telnetlib.Telnet(self.config["ls_host"], self.config["ls_port"])
command = ("vars.default_dj_fade %s\n" % fade).encode("utf-8")
self.logger.info(command)
tn.write(command)
tn.write('exit\n'.encode('utf-8'))
tn.write("exit\n".encode("utf-8"))
tn.read_all()
except Exception as e:
self.logger.exception(e)
@ -301,11 +317,11 @@ class PypoFetch(Thread):
try:
try:
self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.config['ls_host'], self.config['ls_port'])
command = ('vars.station_name %s\n' % station_name).encode('utf-8')
tn = telnetlib.Telnet(self.config["ls_host"], self.config["ls_port"])
command = ("vars.station_name %s\n" % station_name).encode("utf-8")
self.logger.info(command)
tn.write(command)
tn.write('exit\n'.encode('utf-8'))
tn.write("exit\n".encode("utf-8"))
tn.read_all()
except Exception as e:
self.logger.exception(e)
@ -322,6 +338,7 @@ class PypoFetch(Thread):
to the cache dir (Folder-structure: cache/YYYY-MM-DD-hh-mm-ss)
- runs the cleanup routine, to get rid of unused cached files
"""
def process_schedule(self, schedule_data):
self.last_update_schedule_timestamp = time.time()
self.logger.debug(schedule_data)
@ -343,20 +360,21 @@ class PypoFetch(Thread):
media_copy = {}
for key in media:
media_item = media[key]
if (media_item['type'] == 'file'):
if media_item["type"] == "file":
fileExt = self.sanity_check_media_item(media_item)
dst = os.path.join(download_dir, f'{media_item["id"]}{fileExt}')
media_item['dst'] = dst
media_item['file_ready'] = False
media_item["dst"] = dst
media_item["file_ready"] = False
media_filtered[key] = media_item
media_item['start'] = datetime.strptime(media_item['start'],
"%Y-%m-%d-%H-%M-%S")
media_item['end'] = datetime.strptime(media_item['end'],
"%Y-%m-%d-%H-%M-%S")
media_item["start"] = datetime.strptime(
media_item["start"], "%Y-%m-%d-%H-%M-%S"
)
media_item["end"] = datetime.strptime(
media_item["end"], "%Y-%m-%d-%H-%M-%S"
)
media_copy[key] = media_item
self.media_prepare_queue.put(copy.copy(media_filtered))
except Exception as e:
self.logger.exception(e)
@ -365,37 +383,36 @@ class PypoFetch(Thread):
self.logger.debug("Pushing to pypo-push")
self.push_queue.put(media_copy)
# cleanup
try:
self.cache_cleanup(media)
except Exception as e:
self.logger.exception(e)
#do basic validation of file parameters. Useful for debugging
#purposes
# do basic validation of file parameters. Useful for debugging
# purposes
def sanity_check_media_item(self, media_item):
start = datetime.strptime(media_item['start'], "%Y-%m-%d-%H-%M-%S")
end = datetime.strptime(media_item['end'], "%Y-%m-%d-%H-%M-%S")
start = datetime.strptime(media_item["start"], "%Y-%m-%d-%H-%M-%S")
end = datetime.strptime(media_item["end"], "%Y-%m-%d-%H-%M-%S")
mime = media_item['metadata']['mime']
mime = media_item["metadata"]["mime"]
mimetypes.init(["%s/mime.types" % os.path.dirname(os.path.realpath(__file__))])
mime_ext = mimetypes.guess_extension(mime, strict=False)
length1 = pure.date_interval_to_seconds(end - start)
length2 = media_item['cue_out'] - media_item['cue_in']
length2 = media_item["cue_out"] - media_item["cue_in"]
if abs(length2 - length1) > 1:
self.logger.error("end - start length: %s", length1)
self.logger.error("cue_out - cue_in length: %s", length2)
self.logger.error("Two lengths are not equal!!!")
media_item['file_ext'] = mime_ext
media_item["file_ext"] = mime_ext
return mime_ext
def is_file_opened(self, path):
#Capture stderr to avoid polluting py-interpreter.log
# Capture stderr to avoid polluting py-interpreter.log
proc = Popen(["lsof", path], stdout=PIPE, stderr=PIPE)
out = proc.communicate()[0].strip()
return bool(out)
@ -411,10 +428,14 @@ class PypoFetch(Thread):
for mkey in media:
media_item = media[mkey]
if media_item['type'] == 'file':
if media_item["type"] == "file":
if "file_ext" not in media_item.keys():
media_item["file_ext"] = mimetypes.guess_extension(media_item['metadata']['mime'], strict=False)
scheduled_file_set.add("{}{}".format(media_item["id"], media_item["file_ext"]))
media_item["file_ext"] = mimetypes.guess_extension(
media_item["metadata"]["mime"], strict=False
)
scheduled_file_set.add(
"{}{}".format(media_item["id"], media_item["file_ext"])
)
expired_files = cached_file_set - scheduled_file_set
@ -424,9 +445,9 @@ class PypoFetch(Thread):
path = os.path.join(self.cache_dir, f)
self.logger.debug("Removing %s" % path)
#check if this file is opened (sometimes Liquidsoap is still
#playing the file due to our knowledge of the track length
#being incorrect!)
# check if this file is opened (sometimes Liquidsoap is still
# playing the file due to our knowledge of the track length
# being incorrect!)
if not self.is_file_opened(path):
os.remove(path)
self.logger.info("File '%s' removed" % path)
@ -441,7 +462,7 @@ class PypoFetch(Thread):
self.process_schedule(self.schedule_data)
return True
except Exception as e:
self.logger.error('Unable to fetch schedule')
self.logger.error("Unable to fetch schedule")
self.logger.exception(e)
return False
@ -462,11 +483,11 @@ class PypoFetch(Thread):
Timer(120, self.update_metadata_on_tunein).start()
def main(self):
#Make sure all Liquidsoap queues are empty. This is important in the
#case where we've just restarted the pypo scheduler, but Liquidsoap still
#is playing tracks. In this case let's just restart everything from scratch
#so that we can repopulate our dictionary that keeps track of what
#Liquidsoap is playing much more easily.
# Make sure all Liquidsoap queues are empty. This is important in the
# case where we've just restarted the pypo scheduler, but Liquidsoap still
# is playing tracks. In this case let's just restart everything from scratch
# so that we can repopulate our dictionary that keeps track of what
# Liquidsoap is playing much more easily.
self.pypo_liquidsoap.clear_all_queues()
self.set_bootstrap_variables()
@ -500,7 +521,9 @@ class PypoFetch(Thread):
Currently we are checking every POLL_INTERVAL seconds
"""
message = self.fetch_queue.get(block=True, timeout=self.listener_timeout)
message = self.fetch_queue.get(
block=True, timeout=self.listener_timeout
)
manual_fetch_needed = False
self.handle_message(message)
except Empty as e:
@ -513,7 +536,7 @@ class PypoFetch(Thread):
if manual_fetch_needed:
self.persistent_manual_schedule_fetch(max_attempts=5)
except Exception as e:
self.logger.exception('Failed to manually fetch the schedule.')
self.logger.exception("Failed to manually fetch the schedule.")
loops += 1
@ -522,4 +545,4 @@ class PypoFetch(Thread):
Entry point of the thread
"""
self.main()
self.logger.info('PypoFetch thread exiting')
self.logger.info("PypoFetch thread exiting")

View File

@ -18,13 +18,12 @@ import hashlib
from requests.exceptions import ConnectionError, HTTPError, Timeout
from api_clients import version2 as api_client
CONFIG_PATH = '/etc/airtime/airtime.conf'
CONFIG_PATH = "/etc/airtime/airtime.conf"
logging.captureWarnings(True)
class PypoFile(Thread):
def __init__(self, schedule_queue, config):
Thread.__init__(self)
self.logger = logging.getLogger()
@ -38,10 +37,10 @@ class PypoFile(Thread):
"""
Copy media_item from local library directory to local cache directory.
"""
src = media_item['uri']
dst = media_item['dst']
src = media_item["uri"]
dst = media_item["dst"]
src_size = media_item['filesize']
src_size = media_item["filesize"]
dst_exists = True
try:
@ -59,34 +58,44 @@ class PypoFile(Thread):
# become an issue here... This needs proper cache management.
# https://github.com/LibreTime/libretime/issues/756#issuecomment-477853018
# https://github.com/LibreTime/libretime/pull/845
self.logger.debug("file %s already exists in local cache as %s, skipping copying..." % (src, dst))
self.logger.debug(
"file %s already exists in local cache as %s, skipping copying..."
% (src, dst)
)
else:
do_copy = True
media_item['file_ready'] = not do_copy
media_item["file_ready"] = not do_copy
if do_copy:
self.logger.info("copying from %s to local cache %s" % (src, dst))
try:
with open(dst, "wb") as handle:
self.logger.info(media_item)
response = self.api_client.services.file_download_url(id=media_item['id'])
response = self.api_client.services.file_download_url(
id=media_item["id"]
)
if not response.ok:
self.logger.error(response)
raise Exception("%s - Error occurred downloading file" % response.status_code)
raise Exception(
"%s - Error occurred downloading file"
% response.status_code
)
for chunk in response.iter_content(chunk_size=1024):
handle.write(chunk)
#make file world readable and owner writable
# make file world readable and owner writable
os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
if media_item['filesize'] == 0:
file_size = self.report_file_size_and_md5_to_airtime(dst, media_item["id"], host, username)
if media_item["filesize"] == 0:
file_size = self.report_file_size_and_md5_to_airtime(
dst, media_item["id"], host, username
)
media_item["filesize"] = file_size
media_item['file_ready'] = True
media_item["file_ready"] = True
except Exception as e:
self.logger.error("Could not copy from %s to %s" % (src, dst))
self.logger.error(e)
@ -95,7 +104,7 @@ class PypoFile(Thread):
try:
file_size = os.path.getsize(file_path)
with open(file_path, 'rb') as fh:
with open(file_path, "rb") as fh:
m = hashlib.md5()
while True:
data = fh.read(8192)
@ -105,15 +114,21 @@ class PypoFile(Thread):
md5_hash = m.hexdigest()
except (OSError, IOError) as e:
file_size = 0
self.logger.error("Error getting file size and md5 hash for file id %s" % file_id)
self.logger.error(
"Error getting file size and md5 hash for file id %s" % file_id
)
self.logger.error(e)
# Make PUT request to Airtime to update the file size and hash
error_msg = "Could not update media file %s with file size and md5 hash" % file_id
error_msg = (
"Could not update media file %s with file size and md5 hash" % file_id
)
try:
put_url = "%s://%s:%s/rest/media/%s" % (host[0], host[1], host[2], file_id)
payload = json.dumps({'filesize': file_size, 'md5': md5_hash})
response = requests.put(put_url, data=payload, auth=requests.auth.HTTPBasicAuth(api_key, ''))
payload = json.dumps({"filesize": file_size, "md5": md5_hash})
response = requests.put(
put_url, data=payload, auth=requests.auth.HTTPBasicAuth(api_key, "")
)
if not response.ok:
self.logger.error(error_msg)
except (ConnectionError, Timeout):
@ -160,7 +175,9 @@ class PypoFile(Thread):
try:
config.readfp(open(config_path))
except IOError as e:
logging.debug("Failed to open config file at %s: %s" % (config_path, e.strerror))
logging.debug(
"Failed to open config file at %s: %s" % (config_path, e.strerror)
)
sys.exit()
except Exception as e:
logging.debug(e.strerror)
@ -189,12 +206,12 @@ class PypoFile(Thread):
except Empty as e:
pass
media_item = self.get_highest_priority_media_item(self.media)
if media_item is not None:
self.copy_file(media_item)
except Exception as e:
import traceback
top = traceback.format_exc()
self.logger.error(str(e))
self.logger.error(top)
@ -204,9 +221,10 @@ class PypoFile(Thread):
"""
Entry point of the thread
"""
try: self.main()
try:
self.main()
except Exception as e:
top = traceback.format_exc()
self.logger.error('PypoFile Exception: %s', top)
self.logger.error("PypoFile Exception: %s", top)
time.sleep(5)
self.logger.info('PypoFile thread exiting')
self.logger.info("PypoFile thread exiting")

View File

@ -11,12 +11,17 @@ import time
from queue import Empty
import signal
def keyboardInterruptHandler(signum, frame):
logger = logging.getLogger()
logger.info('\nKeyboard Interrupt\n')
logger.info("\nKeyboard Interrupt\n")
sys.exit(0)
signal.signal(signal.SIGINT, keyboardInterruptHandler)
class PypoLiqQueue(Thread):
def __init__(self, q, pypo_liquidsoap, logger):
Thread.__init__(self)
@ -35,18 +40,20 @@ class PypoLiqQueue(Thread):
self.logger.info("waiting indefinitely for schedule")
media_schedule = self.queue.get(block=True)
else:
self.logger.info("waiting %ss until next scheduled item" % \
time_until_next_play)
media_schedule = self.queue.get(block=True, \
timeout=time_until_next_play)
self.logger.info(
"waiting %ss until next scheduled item" % time_until_next_play
)
media_schedule = self.queue.get(
block=True, timeout=time_until_next_play
)
except Empty as e:
#Time to push a scheduled item.
# Time to push a scheduled item.
media_item = schedule_deque.popleft()
self.pypo_liquidsoap.play(media_item)
if len(schedule_deque):
time_until_next_play = \
self.date_interval_to_seconds(
schedule_deque[0]['start'] - datetime.utcnow())
time_until_next_play = self.date_interval_to_seconds(
schedule_deque[0]["start"] - datetime.utcnow()
)
if time_until_next_play < 0:
time_until_next_play = 0
else:
@ -54,7 +61,7 @@ class PypoLiqQueue(Thread):
else:
self.logger.info("New schedule received: %s", media_schedule)
#new schedule received. Replace old one with this.
# new schedule received. Replace old one with this.
schedule_deque.clear()
keys = sorted(media_schedule.keys())
@ -63,28 +70,28 @@ class PypoLiqQueue(Thread):
if len(keys):
time_until_next_play = self.date_interval_to_seconds(
media_schedule[keys[0]]['start'] -
datetime.utcnow())
media_schedule[keys[0]]["start"] - datetime.utcnow()
)
else:
time_until_next_play = None
def date_interval_to_seconds(self, interval):
"""
Convert timedelta object into int representing the number of seconds. If
number of seconds is less than 0, then return 0.
"""
seconds = (interval.microseconds + \
(interval.seconds + interval.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
if seconds < 0: seconds = 0
seconds = (
interval.microseconds
+ (interval.seconds + interval.days * 24 * 3600) * 10 ** 6
) / float(10 ** 6)
if seconds < 0:
seconds = 0
return seconds
def run(self):
try: self.main()
try:
self.main()
except Exception as e:
self.logger.error('PypoLiqQueue Exception: %s', traceback.format_exc())
self.logger.error("PypoLiqQueue Exception: %s", traceback.format_exc())

View File

@ -8,27 +8,25 @@ from datetime import timedelta
from . import eventtypes
import time
class PypoLiquidsoap():
class PypoLiquidsoap:
def __init__(self, logger, telnet_lock, host, port):
self.logger = logger
self.liq_queue_tracker = {
"s0": None,
"s1": None,
"s2": None,
"s3": None,
"s4": None,
}
"s0": None,
"s1": None,
"s2": None,
"s3": None,
"s4": None,
}
self.telnet_liquidsoap = TelnetLiquidsoap(telnet_lock, \
logger,\
host,\
port,\
list(self.liq_queue_tracker.keys()))
self.telnet_liquidsoap = TelnetLiquidsoap(
telnet_lock, logger, host, port, list(self.liq_queue_tracker.keys())
)
def get_telnet_dispatcher(self):
return self.telnet_liquidsoap
def play(self, media_item):
if media_item["type"] == eventtypes.FILE:
self.handle_file_type(media_item)
@ -37,28 +35,32 @@ class PypoLiquidsoap():
elif media_item["type"] == eventtypes.STREAM_BUFFER_START:
self.telnet_liquidsoap.start_web_stream_buffer(media_item)
elif media_item["type"] == eventtypes.STREAM_OUTPUT_START:
if media_item['row_id'] != self.telnet_liquidsoap.current_prebuffering_stream_id:
#this is called if the stream wasn't scheduled sufficiently ahead of time
#so that the prebuffering stage could take effect. Let's do the prebuffering now.
if (
media_item["row_id"]
!= self.telnet_liquidsoap.current_prebuffering_stream_id
):
# this is called if the stream wasn't scheduled sufficiently ahead of time
# so that the prebuffering stage could take effect. Let's do the prebuffering now.
self.telnet_liquidsoap.start_web_stream_buffer(media_item)
self.telnet_liquidsoap.start_web_stream(media_item)
elif media_item['type'] == eventtypes.STREAM_BUFFER_END:
elif media_item["type"] == eventtypes.STREAM_BUFFER_END:
self.telnet_liquidsoap.stop_web_stream_buffer()
elif media_item['type'] == eventtypes.STREAM_OUTPUT_END:
elif media_item["type"] == eventtypes.STREAM_OUTPUT_END:
self.telnet_liquidsoap.stop_web_stream_output()
else: raise UnknownMediaItemType(str(media_item))
else:
raise UnknownMediaItemType(str(media_item))
def handle_file_type(self, media_item):
"""
Wait 200 seconds (2000 iterations) for file to become ready,
Wait 200 seconds (2000 iterations) for file to become ready,
otherwise give up on it.
"""
iter_num = 0
while not media_item['file_ready'] and iter_num < 2000:
while not media_item["file_ready"] and iter_num < 2000:
time.sleep(0.1)
iter_num += 1
if media_item['file_ready']:
if media_item["file_ready"]:
available_queue = self.find_available_queue()
try:
@ -68,27 +70,29 @@ class PypoLiquidsoap():
self.logger.error(e)
raise
else:
self.logger.warn("File %s did not become ready in less than 5 seconds. Skipping...", media_item['dst'])
self.logger.warn(
"File %s did not become ready in less than 5 seconds. Skipping...",
media_item["dst"],
)
def handle_event_type(self, media_item):
if media_item['event_type'] == "kick_out":
if media_item["event_type"] == "kick_out":
self.telnet_liquidsoap.disconnect_source("live_dj")
elif media_item['event_type'] == "switch_off":
elif media_item["event_type"] == "switch_off":
self.telnet_liquidsoap.switch_source("live_dj", "off")
def is_media_item_finished(self, media_item):
if media_item is None:
return True
else:
return datetime.utcnow() > media_item['end']
return datetime.utcnow() > media_item["end"]
def find_available_queue(self):
available_queue = None
for i in self.liq_queue_tracker:
mi = self.liq_queue_tracker[i]
if mi == None or self.is_media_item_finished(mi):
#queue "i" is available. Push to this queue
# queue "i" is available. Push to this queue
available_queue = i
if available_queue == None:
@ -96,7 +100,6 @@ class PypoLiquidsoap():
return available_queue
def verify_correct_present_media(self, scheduled_now):
"""
verify whether Liquidsoap is currently playing the correct files.
@ -122,11 +125,13 @@ class PypoLiquidsoap():
"""
try:
scheduled_now_files = \
[x for x in scheduled_now if x["type"] == eventtypes.FILE]
scheduled_now_files = [
x for x in scheduled_now if x["type"] == eventtypes.FILE
]
scheduled_now_webstream = \
[x for x in scheduled_now if x["type"] == eventtypes.STREAM_OUTPUT_START]
scheduled_now_webstream = [
x for x in scheduled_now if x["type"] == eventtypes.STREAM_OUTPUT_START
]
schedule_ids = set([x["row_id"] for x in scheduled_now_files])
@ -141,19 +146,21 @@ class PypoLiquidsoap():
to_be_removed = set()
to_be_added = set()
#Iterate over the new files, and compare them to currently scheduled
#tracks. If already in liquidsoap queue still need to make sure they don't
#have different attributes
#if replay gain changes, it shouldn't change the amplification of the currently playing song
# Iterate over the new files, and compare them to currently scheduled
# tracks. If already in liquidsoap queue still need to make sure they don't
# have different attributes
# if replay gain changes, it shouldn't change the amplification of the currently playing song
for i in scheduled_now_files:
if i["row_id"] in row_id_map:
mi = row_id_map[i["row_id"]]
correct = mi['start'] == i['start'] and \
mi['end'] == i['end'] and \
mi['row_id'] == i['row_id']
correct = (
mi["start"] == i["start"]
and mi["end"] == i["end"]
and mi["row_id"] == i["row_id"]
)
if not correct:
#need to re-add
# need to re-add
self.logger.info("Track %s found to have new attr." % i)
to_be_removed.add(i["row_id"])
to_be_added.add(i["row_id"])
@ -162,37 +169,38 @@ class PypoLiquidsoap():
to_be_added.update(schedule_ids - liq_queue_ids)
if to_be_removed:
self.logger.info("Need to remove items from Liquidsoap: %s" % \
to_be_removed)
self.logger.info(
"Need to remove items from Liquidsoap: %s" % to_be_removed
)
#remove files from Liquidsoap's queue
# remove files from Liquidsoap's queue
for i in self.liq_queue_tracker:
mi = self.liq_queue_tracker[i]
if mi is not None and mi["row_id"] in to_be_removed:
self.stop(i)
if to_be_added:
self.logger.info("Need to add items to Liquidsoap *now*: %s" % \
to_be_added)
self.logger.info(
"Need to add items to Liquidsoap *now*: %s" % to_be_added
)
for i in scheduled_now_files:
if i["row_id"] in to_be_added:
self.modify_cue_point(i)
self.play(i)
#handle webstreams
# handle webstreams
current_stream_id = self.telnet_liquidsoap.get_current_stream_id()
if scheduled_now_webstream:
if int(current_stream_id) != int(scheduled_now_webstream[0]["row_id"]):
self.play(scheduled_now_webstream[0])
elif current_stream_id != "-1":
#something is playing and it shouldn't be.
# something is playing and it shouldn't be.
self.telnet_liquidsoap.stop_web_stream_buffer()
self.telnet_liquidsoap.stop_web_stream_output()
except KeyError as e:
self.logger.error("Error: Malformed event in schedule. " + str(e))
def stop(self, queue):
self.telnet_liquidsoap.queue_remove(queue)
self.liq_queue_tracker[queue] = None
@ -209,24 +217,32 @@ class PypoLiquidsoap():
tnow = datetime.utcnow()
link_start = link['start']
link_start = link["start"]
diff_td = tnow - link_start
diff_sec = self.date_interval_to_seconds(diff_td)
if diff_sec > 0:
self.logger.debug("media item was supposed to start %s ago. Preparing to start..", diff_sec)
original_cue_in_td = timedelta(seconds=float(link['cue_in']))
link['cue_in'] = self.date_interval_to_seconds(original_cue_in_td) + diff_sec
self.logger.debug(
"media item was supposed to start %s ago. Preparing to start..",
diff_sec,
)
original_cue_in_td = timedelta(seconds=float(link["cue_in"]))
link["cue_in"] = (
self.date_interval_to_seconds(original_cue_in_td) + diff_sec
)
def date_interval_to_seconds(self, interval):
"""
Convert timedelta object into int representing the number of seconds. If
number of seconds is less than 0, then return 0.
"""
seconds = (interval.microseconds + \
(interval.seconds + interval.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
if seconds < 0: seconds = 0
seconds = (
interval.microseconds
+ (interval.seconds + interval.days * 24 * 3600) * 10 ** 6
) / float(10 ** 6)
if seconds < 0:
seconds = 0
return seconds
@ -237,5 +253,6 @@ class PypoLiquidsoap():
class UnknownMediaItemType(Exception):
pass
class NoQueueAvailableException(Exception):
pass

View File

@ -6,6 +6,7 @@ import os
import sys
from threading import Thread
import time
# For RabbitMQ
from kombu.connection import Connection
from kombu.messaging import Exchange, Queue
@ -26,17 +27,18 @@ class RabbitConsumer(ConsumerMixin):
def get_consumers(self, Consumer, channel):
return [
Consumer(self.queues, callbacks=[self.on_message], accept=['text/plain']),
Consumer(self.queues, callbacks=[self.on_message], accept=["text/plain"]),
]
def on_message(self, body, message):
self.handler.handle_message(message.payload)
message.ack()
class PypoMessageHandler(Thread):
def __init__(self, pq, rq, config):
Thread.__init__(self)
self.logger = logging.getLogger('message_h')
self.logger = logging.getLogger("message_h")
self.pypo_queue = pq
self.recorder_queue = rq
self.config = config
@ -44,13 +46,17 @@ class PypoMessageHandler(Thread):
def init_rabbit_mq(self):
self.logger.info("Initializing RabbitMQ stuff")
try:
schedule_exchange = Exchange("airtime-pypo", "direct", durable=True, auto_delete=True)
schedule_exchange = Exchange(
"airtime-pypo", "direct", durable=True, auto_delete=True
)
schedule_queue = Queue("pypo-fetch", exchange=schedule_exchange, key="foo")
with Connection(self.config["host"], \
self.config["user"], \
self.config["password"], \
self.config["vhost"], \
heartbeat = 5) as connection:
with Connection(
self.config["host"],
self.config["user"],
self.config["password"],
self.config["vhost"],
heartbeat=5,
) as connection:
rabbit = RabbitConsumer(connection, [schedule_queue], self)
rabbit.run()
except Exception as e:
@ -60,6 +66,7 @@ class PypoMessageHandler(Thread):
Handle a message from RabbitMQ, put it into our yucky global var.
Hopefully there is a better way to do this.
"""
def handle_message(self, message):
try:
self.logger.info("Received event from RabbitMQ: %s" % message)
@ -69,36 +76,36 @@ class PypoMessageHandler(Thread):
except (UnicodeDecodeError, AttributeError):
pass
m = json.loads(message)
command = m['event_type']
command = m["event_type"]
self.logger.info("Handling command: " + command)
if command == 'update_schedule':
if command == "update_schedule":
self.logger.info("Updating schedule...")
self.pypo_queue.put(message)
elif command == 'reset_liquidsoap_bootstrap':
elif command == "reset_liquidsoap_bootstrap":
self.logger.info("Resetting bootstrap vars...")
self.pypo_queue.put(message)
elif command == 'update_stream_setting':
elif command == "update_stream_setting":
self.logger.info("Updating stream setting...")
self.pypo_queue.put(message)
elif command == 'update_stream_format':
elif command == "update_stream_format":
self.logger.info("Updating stream format...")
self.pypo_queue.put(message)
elif command == 'update_station_name':
elif command == "update_station_name":
self.logger.info("Updating station name...")
self.pypo_queue.put(message)
elif command == 'switch_source':
elif command == "switch_source":
self.logger.info("switch_source command received...")
self.pypo_queue.put(message)
elif command == 'update_transition_fade':
elif command == "update_transition_fade":
self.logger.info("Updating trasition fade...")
self.pypo_queue.put(message)
elif command == 'disconnect_source':
elif command == "disconnect_source":
self.logger.info("disconnect_source command received...")
self.pypo_queue.put(message)
elif command == 'update_recorder_schedule':
elif command == "update_recorder_schedule":
self.recorder_queue.put(message)
elif command == 'cancel_recording':
elif command == "cancel_recording":
self.recorder_queue.put(message)
else:
self.logger.info("Unknown command: %s" % command)
@ -109,9 +116,11 @@ class PypoMessageHandler(Thread):
try:
self.init_rabbit_mq()
except Exception as e:
self.logger.error('Exception: %s', e)
self.logger.error("Exception: %s", e)
self.logger.error("traceback: %s", traceback.format_exc())
self.logger.error("Error connecting to RabbitMQ Server. Trying again in few seconds")
self.logger.error(
"Error connecting to RabbitMQ Server. Trying again in few seconds"
)
time.sleep(5)
"""
@ -119,7 +128,7 @@ class PypoMessageHandler(Thread):
Wait for schedule updates from RabbitMQ, but in case there aren't any,
poll the server to get the upcoming schedule.
"""
def run(self):
while True:
self.main()

View File

@ -29,10 +29,12 @@ PUSH_INTERVAL = 2
def is_stream(media_item):
return media_item['type'] == 'stream_output_start'
return media_item["type"] == "stream_output_start"
def is_file(media_item):
return media_item['type'] == 'file'
return media_item["type"] == "file"
class PypoPush(Thread):
def __init__(self, q, telnet_lock, pypo_liquidsoap, config):
@ -44,20 +46,19 @@ class PypoPush(Thread):
self.config = config
self.pushed_objects = {}
self.logger = logging.getLogger('push')
self.logger = logging.getLogger("push")
self.current_prebuffering_stream_id = None
self.queue_id = 0
self.future_scheduled_queue = Queue()
self.pypo_liquidsoap = pypo_liquidsoap
self.plq = PypoLiqQueue(self.future_scheduled_queue, \
self.pypo_liquidsoap, \
self.logger)
self.plq = PypoLiqQueue(
self.future_scheduled_queue, self.pypo_liquidsoap, self.logger
)
self.plq.daemon = True
self.plq.start()
def main(self):
loops = 0
heartbeat_period = math.floor(30 / PUSH_INTERVAL)
@ -72,10 +73,11 @@ class PypoPush(Thread):
raise
else:
self.logger.debug(media_schedule)
#separate media_schedule list into currently_playing and
#scheduled_for_future lists
currently_playing, scheduled_for_future = \
self.separate_present_future(media_schedule)
# separate media_schedule list into currently_playing and
# scheduled_for_future lists
currently_playing, scheduled_for_future = self.separate_present_future(
media_schedule
)
self.pypo_liquidsoap.verify_correct_present_media(currently_playing)
self.future_scheduled_queue.put(scheduled_for_future)
@ -85,7 +87,6 @@ class PypoPush(Thread):
loops = 0
loops += 1
def separate_present_future(self, media_schedule):
tnow = datetime.utcnow()
@ -96,7 +97,7 @@ class PypoPush(Thread):
for mkey in sorted_keys:
media_item = media_schedule[mkey]
diff_td = tnow - media_item['start']
diff_td = tnow - media_item["start"]
diff_sec = self.date_interval_to_seconds(diff_td)
if diff_sec >= 0:
@ -111,8 +112,10 @@ class PypoPush(Thread):
Convert timedelta object into int representing the number of seconds. If
number of seconds is less than 0, then return 0.
"""
seconds = (interval.microseconds + \
(interval.seconds + interval.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
seconds = (
interval.microseconds
+ (interval.seconds + interval.days * 24 * 3600) * 10 ** 6
) / float(10 ** 6)
return seconds
@ -120,18 +123,18 @@ class PypoPush(Thread):
def stop_web_stream_all(self):
try:
self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.config['LS_HOST'], self.config['LS_PORT'])
tn = telnetlib.Telnet(self.config["LS_HOST"], self.config["LS_PORT"])
#msg = 'dynamic_source.read_stop_all xxx\n'
msg = 'http.stop\n'
# msg = 'dynamic_source.read_stop_all xxx\n'
msg = "http.stop\n"
self.logger.debug(msg)
tn.write(msg)
msg = 'dynamic_source.output_stop\n'
msg = "dynamic_source.output_stop\n"
self.logger.debug(msg)
tn.write(msg)
msg = 'dynamic_source.id -1\n'
msg = "dynamic_source.id -1\n"
self.logger.debug(msg)
tn.write(msg)
@ -145,10 +148,10 @@ class PypoPush(Thread):
def run(self):
while True:
try: self.main()
try:
self.main()
except Exception as e:
top = traceback.format_exc()
self.logger.error('Pypo Push Exception: %s', top)
self.logger.error("Pypo Push Exception: %s", top)
time.sleep(5)
self.logger.info('PypoPush thread exiting')
self.logger.info("PypoPush thread exiting")

View File

@ -24,6 +24,7 @@ import mutagen
from api_clients import version1 as v1_api_client
from api_clients import version2 as api_client
def api_client(logger):
"""
api_client returns the correct instance of AirtimeApiClient. Although there is only one
@ -31,15 +32,17 @@ def api_client(logger):
"""
return v1_api_client.AirtimeApiClient(logger)
# loading config file
try:
config = ConfigObj('/etc/airtime/airtime.conf')
config = ConfigObj("/etc/airtime/airtime.conf")
except Exception as e:
print("Error loading config file: {}".format(e))
sys.exit()
# TODO : add docstrings everywhere in this module
def getDateTimeObj(time):
# TODO : clean up for this function later.
# - use tuples to parse result from split (instead of indices)
@ -49,17 +52,20 @@ def getDateTimeObj(time):
# shadowed
# - add docstring to document all behaviour of this function
timeinfo = time.split(" ")
date = [ int(x) for x in timeinfo[0].split("-") ]
my_time = [ int(x) for x in timeinfo[1].split(":") ]
return datetime.datetime(date[0], date[1], date[2], my_time[0], my_time[1], my_time[2], 0, None)
date = [int(x) for x in timeinfo[0].split("-")]
my_time = [int(x) for x in timeinfo[1].split(":")]
return datetime.datetime(
date[0], date[1], date[2], my_time[0], my_time[1], my_time[2], 0, None
)
PUSH_INTERVAL = 2
class ShowRecorder(Thread):
def __init__ (self, show_instance, show_name, filelength, start_time):
class ShowRecorder(Thread):
def __init__(self, show_instance, show_name, filelength, start_time):
Thread.__init__(self)
self.logger = logging.getLogger('recorder')
self.logger = logging.getLogger("recorder")
self.api_client = api_client(self.logger)
self.filelength = filelength
self.start_time = start_time
@ -75,35 +81,41 @@ class ShowRecorder(Thread):
if config["pypo"]["record_file_type"] in ["mp3", "ogg"]:
filetype = config["pypo"]["record_file_type"]
else:
filetype = "ogg";
filetype = "ogg"
joined_path = os.path.join(config["pypo"]["base_recorded_files"], filename)
filepath = "%s.%s" % (joined_path, filetype)
br = config["pypo"]["record_bitrate"]
sr = config["pypo"]["record_samplerate"]
c = config["pypo"]["record_channels"]
c = config["pypo"]["record_channels"]
ss = config["pypo"]["record_sample_size"]
#-f:16,2,44100
#-b:256
command = "ecasound -f:%s,%s,%s -i alsa -o %s,%s000 -t:%s" % \
(ss, c, sr, filepath, br, length)
# -f:16,2,44100
# -b:256
command = "ecasound -f:%s,%s,%s -i alsa -o %s,%s000 -t:%s" % (
ss,
c,
sr,
filepath,
br,
length,
)
args = command.split(" ")
self.logger.info("starting record")
self.logger.info("command " + command)
self.p = Popen(args,stdout=PIPE,stderr=PIPE)
self.p = Popen(args, stdout=PIPE, stderr=PIPE)
#blocks at the following line until the child process
#quits
# blocks at the following line until the child process
# quits
self.p.wait()
outmsgs = self.p.stdout.readlines()
for msg in outmsgs:
m = re.search('^ERROR',msg)
m = re.search("^ERROR", msg)
if not m == None:
self.logger.info('Recording error is found: %s', outmsgs)
self.logger.info("Recording error is found: %s", outmsgs)
self.logger.info("finishing record, return code %s", self.p.returncode)
code = self.p.returncode
@ -112,21 +124,25 @@ class ShowRecorder(Thread):
return code, filepath
def cancel_recording(self):
#send signal interrupt (2)
# send signal interrupt (2)
self.logger.info("Show manually cancelled!")
if (self.p is not None):
if self.p is not None:
self.p.send_signal(signal.SIGINT)
#if self.p is defined, then the child process ecasound is recording
# if self.p is defined, then the child process ecasound is recording
def is_recording(self):
return (self.p is not None)
return self.p is not None
def upload_file(self, filepath):
filename = os.path.split(filepath)[1]
# files is what requests actually expects
files = {'file': open(filepath, "rb"), 'name': filename, 'show_instance': self.show_instance}
files = {
"file": open(filepath, "rb"),
"name": filename,
"show_instance": self.show_instance,
}
self.api_client.upload_recorded_show(files, self.show_instance)
@ -136,27 +152,25 @@ class ShowRecorder(Thread):
self.start_time, self.show_name, self.show_instance
"""
try:
full_date, full_time = self.start_time.split(" ",1)
full_date, full_time = self.start_time.split(" ", 1)
# No idea why we translated - to : before
#full_time = full_time.replace(":","-")
# full_time = full_time.replace(":","-")
self.logger.info("time: %s" % full_time)
artist = "Airtime Show Recorder"
#set some metadata for our file daemon
recorded_file = mutagen.File(filepath, easy = True)
recorded_file['artist'] = artist
recorded_file['date'] = full_date
recorded_file['title'] = "%s-%s-%s" % (self.show_name,
full_date, full_time)
#You cannot pass ints into the metadata of a file. Even tracknumber needs to be a string
recorded_file['tracknumber'] = self.show_instance
# set some metadata for our file daemon
recorded_file = mutagen.File(filepath, easy=True)
recorded_file["artist"] = artist
recorded_file["date"] = full_date
recorded_file["title"] = "%s-%s-%s" % (self.show_name, full_date, full_time)
# You cannot pass ints into the metadata of a file. Even tracknumber needs to be a string
recorded_file["tracknumber"] = self.show_instance
recorded_file.save()
except Exception as e:
top = traceback.format_exc()
self.logger.error('Exception: %s', e)
self.logger.error("Exception: %s", e)
self.logger.error("traceback: %s", top)
def run(self):
code, filepath = self.record_show()
@ -174,14 +188,15 @@ class ShowRecorder(Thread):
self.logger.info("problem recording show")
os.remove(filepath)
class Recorder(Thread):
def __init__(self, q):
Thread.__init__(self)
self.logger = logging.getLogger('recorder')
self.logger = logging.getLogger("recorder")
self.api_client = api_client(self.logger)
self.sr = None
self.shows_to_record = {}
self.server_timezone = ''
self.server_timezone = ""
self.queue = q
self.loops = 0
self.logger.info("RecorderFetch: init complete")
@ -189,7 +204,7 @@ class Recorder(Thread):
success = False
while not success:
try:
self.api_client.register_component('show-recorder')
self.api_client.register_component("show-recorder")
success = True
except Exception as e:
self.logger.error(str(e))
@ -205,7 +220,7 @@ class Recorder(Thread):
msg = json.loads(message)
command = msg["event_type"]
self.logger.info("Received msg from Pypo Message Handler: %s", msg)
if command == 'cancel_recording':
if command == "cancel_recording":
if self.currently_recording():
self.cancel_recording()
else:
@ -218,14 +233,18 @@ class Recorder(Thread):
def process_recorder_schedule(self, m):
self.logger.info("Parsing recording show schedules...")
temp_shows_to_record = {}
shows = m['shows']
shows = m["shows"]
for show in shows:
show_starts = getDateTimeObj(show['starts'])
show_end = getDateTimeObj(show['ends'])
show_starts = getDateTimeObj(show["starts"])
show_end = getDateTimeObj(show["ends"])
time_delta = show_end - show_starts
temp_shows_to_record[show['starts']] = [time_delta,
show['instance_id'], show['name'], m['server_timezone']]
temp_shows_to_record[show["starts"]] = [
time_delta,
show["instance_id"],
show["name"],
m["server_timezone"],
]
self.shows_to_record = temp_shows_to_record
def get_time_till_next_show(self):
@ -237,7 +256,7 @@ class Recorder(Thread):
next_show = getDateTimeObj(start_time)
delta = next_show - tnow
s = '%s.%s' % (delta.seconds, delta.microseconds)
s = "%s.%s" % (delta.seconds, delta.microseconds)
out = float(s)
if out < 5:
@ -257,7 +276,8 @@ class Recorder(Thread):
return False
def start_record(self):
if len(self.shows_to_record) == 0: return None
if len(self.shows_to_record) == 0:
return None
try:
delta = self.get_time_till_next_show()
if delta < 5:
@ -273,16 +293,25 @@ class Recorder(Thread):
T = pytz.timezone(server_timezone)
start_time_on_UTC = getDateTimeObj(start_time)
start_time_on_server = start_time_on_UTC.replace(tzinfo=pytz.utc).astimezone(T)
start_time_formatted = '%(year)d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:%(sec)02d' % \
{'year': start_time_on_server.year, 'month': start_time_on_server.month, 'day': start_time_on_server.day, \
'hour': start_time_on_server.hour, 'min': start_time_on_server.minute, 'sec': start_time_on_server.second}
start_time_on_server = start_time_on_UTC.replace(
tzinfo=pytz.utc
).astimezone(T)
start_time_formatted = (
"%(year)d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:%(sec)02d"
% {
"year": start_time_on_server.year,
"month": start_time_on_server.month,
"day": start_time_on_server.day,
"hour": start_time_on_server.hour,
"min": start_time_on_server.minute,
"sec": start_time_on_server.second,
}
)
seconds_waiting = 0
#avoiding CC-5299
while(True):
# avoiding CC-5299
while True:
if self.currently_recording():
self.logger.info("Previous record not finished, sleeping 100ms")
seconds_waiting = seconds_waiting + 0.1
@ -290,16 +319,21 @@ class Recorder(Thread):
else:
show_length_seconds = show_length.seconds - seconds_waiting
self.sr = ShowRecorder(show_instance, show_name, show_length_seconds, start_time_formatted)
self.sr = ShowRecorder(
show_instance,
show_name,
show_length_seconds,
start_time_formatted,
)
self.sr.start()
break
#remove show from shows to record.
# remove show from shows to record.
del self.shows_to_record[start_time]
#self.time_till_next_show = self.get_time_till_next_show()
except Exception as e :
# self.time_till_next_show = self.get_time_till_next_show()
except Exception as e:
top = traceback.format_exc()
self.logger.error('Exception: %s', e)
self.logger.error("Exception: %s", e)
self.logger.error("traceback: %s", top)
def run(self):
@ -318,7 +352,7 @@ class Recorder(Thread):
self.process_recorder_schedule(temp)
self.logger.info("Bootstrap recorder schedule received: %s", temp)
except Exception as e:
self.logger.error( traceback.format_exc() )
self.logger.error(traceback.format_exc())
self.logger.error(e)
self.logger.info("Bootstrap complete: got initial copy of the schedule")
@ -338,16 +372,16 @@ class Recorder(Thread):
self.process_recorder_schedule(temp)
self.logger.info("updated recorder schedule received: %s", temp)
except Exception as e:
self.logger.error( traceback.format_exc() )
self.logger.error(traceback.format_exc())
self.logger.error(e)
try: self.handle_message()
try:
self.handle_message()
except Exception as e:
self.logger.error( traceback.format_exc() )
self.logger.error('Pypo Recorder Exception: %s', e)
self.logger.error(traceback.format_exc())
self.logger.error("Pypo Recorder Exception: %s", e)
time.sleep(PUSH_INTERVAL)
self.loops += 1
except Exception as e :
except Exception as e:
top = traceback.format_exc()
self.logger.error('Exception: %s', e)
self.logger.error("Exception: %s", e)
self.logger.error("traceback: %s", top)

View File

@ -4,32 +4,36 @@ import telnetlib
from .timeout import ls_timeout
import traceback
def create_liquidsoap_annotation(media):
# We need liq_start_next value in the annotate. That is the value that controls overlap duration of crossfade.
filename = media['dst']
annotation = ('annotate:media_id="%s",liq_start_next="0",liq_fade_in="%s",' + \
'liq_fade_out="%s",liq_cue_in="%s",liq_cue_out="%s",' + \
'schedule_table_id="%s",replay_gain="%s dB"') % \
(media['id'],
float(media['fade_in']) / 1000,
float(media['fade_out']) / 1000,
float(media['cue_in']),
float(media['cue_out']),
media['row_id'],
media['replay_gain'])
filename = media["dst"]
annotation = (
'annotate:media_id="%s",liq_start_next="0",liq_fade_in="%s",'
+ 'liq_fade_out="%s",liq_cue_in="%s",liq_cue_out="%s",'
+ 'schedule_table_id="%s",replay_gain="%s dB"'
) % (
media["id"],
float(media["fade_in"]) / 1000,
float(media["fade_out"]) / 1000,
float(media["cue_in"]),
float(media["cue_out"]),
media["row_id"],
media["replay_gain"],
)
# Override the the artist/title that Liquidsoap extracts from a file's metadata
# with the metadata we get from Airtime. (You can modify metadata in Airtime's library,
# which doesn't get saved back to the file.)
if 'metadata' in media:
if "metadata" in media:
if 'artist_name' in media['metadata']:
artist_name = media['metadata']['artist_name']
if "artist_name" in media["metadata"]:
artist_name = media["metadata"]["artist_name"]
if isinstance(artist_name, str):
annotation += ',artist="%s"' % (artist_name.replace('"', '\\"'))
if 'track_title' in media['metadata']:
track_title = media['metadata']['track_title']
if "track_title" in media["metadata"]:
track_title = media["metadata"]["track_title"]
if isinstance(track_title, str):
annotation += ',title="%s"' % (track_title.replace('"', '\\"'))
@ -37,8 +41,8 @@ def create_liquidsoap_annotation(media):
return annotation
class TelnetLiquidsoap:
class TelnetLiquidsoap:
def __init__(self, telnet_lock, logger, ls_host, ls_port, queues):
self.telnet_lock = telnet_lock
self.ls_host = ls_host
@ -53,9 +57,9 @@ class TelnetLiquidsoap:
def __is_empty(self, queue_id):
return True
tn = self.__connect()
msg = '%s.queue\nexit\n' % queue_id
tn.write(msg.encode('utf-8'))
output = tn.read_all().decode('utf-8').splitlines()
msg = "%s.queue\nexit\n" % queue_id
tn.write(msg.encode("utf-8"))
output = tn.read_all().decode("utf-8").splitlines()
if len(output) == 3:
return len(output[0]) == 0
else:
@ -68,12 +72,12 @@ class TelnetLiquidsoap:
tn = self.__connect()
for i in self.queues:
msg = 'queues.%s_skip\n' % i
msg = "queues.%s_skip\n" % i
self.logger.debug(msg)
tn.write(msg.encode('utf-8'))
tn.write(msg.encode("utf-8"))
tn.write("exit\n".encode('utf-8'))
self.logger.debug(tn.read_all().decode('utf-8'))
tn.write("exit\n".encode("utf-8"))
self.logger.debug(tn.read_all().decode("utf-8"))
except Exception:
raise
finally:
@ -85,18 +89,17 @@ class TelnetLiquidsoap:
self.telnet_lock.acquire()
tn = self.__connect()
msg = 'queues.%s_skip\n' % queue_id
msg = "queues.%s_skip\n" % queue_id
self.logger.debug(msg)
tn.write(msg.encode('utf-8'))
tn.write(msg.encode("utf-8"))
tn.write("exit\n".encode('utf-8'))
self.logger.debug(tn.read_all().decode('utf-8'))
tn.write("exit\n".encode("utf-8"))
self.logger.debug(tn.read_all().decode("utf-8"))
except Exception:
raise
finally:
self.telnet_lock.release()
@ls_timeout
def queue_push(self, queue_id, media_item):
try:
@ -107,40 +110,39 @@ class TelnetLiquidsoap:
tn = self.__connect()
annotation = create_liquidsoap_annotation(media_item)
msg = '%s.push %s\n' % (queue_id, annotation)
msg = "%s.push %s\n" % (queue_id, annotation)
self.logger.debug(msg)
tn.write(msg.encode('utf-8'))
tn.write(msg.encode("utf-8"))
show_name = media_item['show_name']
msg = 'vars.show_name %s\n' % show_name
tn.write(msg.encode('utf-8'))
show_name = media_item["show_name"]
msg = "vars.show_name %s\n" % show_name
tn.write(msg.encode("utf-8"))
self.logger.debug(msg)
tn.write("exit\n".encode('utf-8'))
self.logger.debug(tn.read_all().decode('utf-8'))
tn.write("exit\n".encode("utf-8"))
self.logger.debug(tn.read_all().decode("utf-8"))
except Exception:
raise
finally:
self.telnet_lock.release()
@ls_timeout
def stop_web_stream_buffer(self):
try:
self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.ls_host, self.ls_port)
#dynamic_source.stop http://87.230.101.24:80/top100station.mp3
# dynamic_source.stop http://87.230.101.24:80/top100station.mp3
msg = 'http.stop\n'
msg = "http.stop\n"
self.logger.debug(msg)
tn.write(msg.encode('utf-8'))
tn.write(msg.encode("utf-8"))
msg = 'dynamic_source.id -1\n'
msg = "dynamic_source.id -1\n"
self.logger.debug(msg)
tn.write(msg.encode('utf-8'))
tn.write(msg.encode("utf-8"))
tn.write("exit\n".encode('utf-8'))
self.logger.debug(tn.read_all().decode('utf-8'))
tn.write("exit\n".encode("utf-8"))
self.logger.debug(tn.read_all().decode("utf-8"))
except Exception as e:
self.logger.error(str(e))
@ -153,14 +155,14 @@ class TelnetLiquidsoap:
try:
self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.ls_host, self.ls_port)
#dynamic_source.stop http://87.230.101.24:80/top100station.mp3
# dynamic_source.stop http://87.230.101.24:80/top100station.mp3
msg = 'dynamic_source.output_stop\n'
msg = "dynamic_source.output_stop\n"
self.logger.debug(msg)
tn.write(msg.encode('utf-8'))
tn.write(msg.encode("utf-8"))
tn.write("exit\n".encode('utf-8'))
self.logger.debug(tn.read_all().decode('utf-8'))
tn.write("exit\n".encode("utf-8"))
self.logger.debug(tn.read_all().decode("utf-8"))
except Exception as e:
self.logger.error(str(e))
@ -174,16 +176,16 @@ class TelnetLiquidsoap:
self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.ls_host, self.ls_port)
#TODO: DO we need this?
msg = 'streams.scheduled_play_start\n'
tn.write(msg.encode('utf-8'))
# TODO: DO we need this?
msg = "streams.scheduled_play_start\n"
tn.write(msg.encode("utf-8"))
msg = 'dynamic_source.output_start\n'
msg = "dynamic_source.output_start\n"
self.logger.debug(msg)
tn.write(msg.encode('utf-8'))
tn.write(msg.encode("utf-8"))
tn.write("exit\n".encode('utf-8'))
self.logger.debug(tn.read_all().decode('utf-8'))
tn.write("exit\n".encode("utf-8"))
self.logger.debug(tn.read_all().decode("utf-8"))
self.current_prebuffering_stream_id = None
except Exception as e:
@ -198,18 +200,18 @@ class TelnetLiquidsoap:
self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.ls_host, self.ls_port)
msg = 'dynamic_source.id %s\n' % media_item['row_id']
msg = "dynamic_source.id %s\n" % media_item["row_id"]
self.logger.debug(msg)
tn.write(msg.encode('utf-8'))
tn.write(msg.encode("utf-8"))
msg = 'http.restart %s\n' % media_item['uri']
msg = "http.restart %s\n" % media_item["uri"]
self.logger.debug(msg)
tn.write(msg.encode('utf-8'))
tn.write(msg.encode("utf-8"))
tn.write("exit\n".encode('utf-8'))
self.logger.debug(tn.read_all().decode('utf-8'))
tn.write("exit\n".encode("utf-8"))
self.logger.debug(tn.read_all().decode("utf-8"))
self.current_prebuffering_stream_id = media_item['row_id']
self.current_prebuffering_stream_id = media_item["row_id"]
except Exception as e:
self.logger.error(str(e))
self.logger.error(traceback.format_exc())
@ -222,12 +224,12 @@ class TelnetLiquidsoap:
self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.ls_host, self.ls_port)
msg = 'dynamic_source.get_id\n'
msg = "dynamic_source.get_id\n"
self.logger.debug(msg)
tn.write(msg.encode('utf-8'))
tn.write(msg.encode("utf-8"))
tn.write("exit\n".encode('utf-8'))
stream_id = tn.read_all().decode('utf-8').splitlines()[0]
tn.write("exit\n".encode("utf-8"))
stream_id = tn.read_all().decode("utf-8").splitlines()[0]
self.logger.debug("stream_id: %s" % stream_id)
return stream_id
@ -239,20 +241,20 @@ class TelnetLiquidsoap:
@ls_timeout
def disconnect_source(self, sourcename):
self.logger.debug('Disconnecting source: %s', sourcename)
self.logger.debug("Disconnecting source: %s", sourcename)
command = ""
if(sourcename == "master_dj"):
if sourcename == "master_dj":
command += "master_harbor.stop\n"
elif(sourcename == "live_dj"):
elif sourcename == "live_dj":
command += "live_dj_harbor.stop\n"
try:
self.telnet_lock.acquire()
tn = telnetlib.Telnet(self.ls_host, self.ls_port)
self.logger.info(command)
tn.write(command.encode('utf-8'))
tn.write('exit\n'.encode('utf-8'))
tn.read_all().decode('utf-8')
tn.write(command.encode("utf-8"))
tn.write("exit\n".encode("utf-8"))
tn.read_all().decode("utf-8")
except Exception as e:
self.logger.error(traceback.format_exc())
finally:
@ -267,18 +269,17 @@ class TelnetLiquidsoap:
for i in commands:
self.logger.info(i)
if type(i) is str:
i = i.encode('utf-8')
i = i.encode("utf-8")
tn.write(i)
tn.write('exit\n'.encode('utf-8'))
tn.read_all().decode('utf-8')
tn.write("exit\n".encode("utf-8"))
tn.read_all().decode("utf-8")
except Exception as e:
self.logger.error(str(e))
self.logger.error(traceback.format_exc())
finally:
self.telnet_lock.release()
def switch_source(self, sourcename, status):
self.logger.debug('Switching source: %s to "%s" status', sourcename, status)
command = "streams."
@ -296,15 +297,15 @@ class TelnetLiquidsoap:
self.telnet_send([command])
class DummyTelnetLiquidsoap:
class DummyTelnetLiquidsoap:
def __init__(self, telnet_lock, logger):
self.telnet_lock = telnet_lock
self.liquidsoap_mock_queues = {}
self.logger = logger
for i in range(4):
self.liquidsoap_mock_queues["s"+str(i)] = []
self.liquidsoap_mock_queues["s" + str(i)] = []
@ls_timeout
def queue_push(self, queue_id, media_item):
@ -313,6 +314,7 @@ class DummyTelnetLiquidsoap:
self.logger.info("Pushing %s to queue %s" % (media_item, queue_id))
from datetime import datetime
print("Time now: {:s}".format(datetime.utcnow()))
annotation = create_liquidsoap_annotation(media_item)
@ -329,6 +331,7 @@ class DummyTelnetLiquidsoap:
self.logger.info("Purging queue %s" % queue_id)
from datetime import datetime
print("Time now: {:s}".format(datetime.utcnow()))
except Exception:
@ -336,5 +339,6 @@ class DummyTelnetLiquidsoap:
finally:
self.telnet_lock.release()
class QueueNotEmptyException(Exception):
pass

View File

@ -13,14 +13,17 @@ import logging
from datetime import datetime
from datetime import timedelta
def keyboardInterruptHandler(signum, frame):
logger = logging.getLogger()
logger.info('\nKeyboard Interrupt\n')
logger.info("\nKeyboard Interrupt\n")
sys.exit(0)
signal.signal(signal.SIGINT, keyboardInterruptHandler)
# configure logging
format = '%(levelname)s - %(pathname)s - %(lineno)s - %(asctime)s - %(message)s'
format = "%(levelname)s - %(pathname)s - %(lineno)s - %(asctime)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=format)
logging.captureWarnings(True)
@ -30,19 +33,18 @@ pypoPush_q = Queue()
pypoLiq_q = Queue()
liq_queue_tracker = {
"s0": None,
"s1": None,
"s2": None,
"s3": None,
}
"s0": None,
"s1": None,
"s2": None,
"s3": None,
}
#dummy_telnet_liquidsoap = DummyTelnetLiquidsoap(telnet_lock, logging)
dummy_telnet_liquidsoap = TelnetLiquidsoap(telnet_lock, logging, \
"localhost", \
1234)
# dummy_telnet_liquidsoap = DummyTelnetLiquidsoap(telnet_lock, logging)
dummy_telnet_liquidsoap = TelnetLiquidsoap(telnet_lock, logging, "localhost", 1234)
plq = PypoLiqQueue(pypoLiq_q, telnet_lock, logging, liq_queue_tracker, \
dummy_telnet_liquidsoap)
plq = PypoLiqQueue(
pypoLiq_q, telnet_lock, logging, liq_queue_tracker, dummy_telnet_liquidsoap
)
plq.daemon = True
plq.start()
@ -54,47 +56,43 @@ media_schedule = {}
start_dt = datetime.utcnow() + timedelta(seconds=1)
end_dt = datetime.utcnow() + timedelta(seconds=6)
media_schedule[start_dt] = {"id": 5, \
"type":"file", \
"row_id":9, \
"uri":"", \
"dst":"/home/martin/Music/ipod/Hot Chocolate - You Sexy Thing.mp3", \
"fade_in":0, \
"fade_out":0, \
"cue_in":0, \
"cue_out":300, \
"start": start_dt, \
"end": end_dt, \
"show_name":"Untitled", \
"replay_gain": 0, \
"independent_event": True \
}
media_schedule[start_dt] = {
"id": 5,
"type": "file",
"row_id": 9,
"uri": "",
"dst": "/home/martin/Music/ipod/Hot Chocolate - You Sexy Thing.mp3",
"fade_in": 0,
"fade_out": 0,
"cue_in": 0,
"cue_out": 300,
"start": start_dt,
"end": end_dt,
"show_name": "Untitled",
"replay_gain": 0,
"independent_event": True,
}
start_dt = datetime.utcnow() + timedelta(seconds=2)
end_dt = datetime.utcnow() + timedelta(seconds=6)
media_schedule[start_dt] = {"id": 5, \
"type":"file", \
"row_id":9, \
"uri":"", \
"dst":"/home/martin/Music/ipod/Good Charlotte - bloody valentine.mp3", \
"fade_in":0, \
"fade_out":0, \
"cue_in":0, \
"cue_out":300, \
"start": start_dt, \
"end": end_dt, \
"show_name":"Untitled", \
"replay_gain": 0, \
"independent_event": True \
}
media_schedule[start_dt] = {
"id": 5,
"type": "file",
"row_id": 9,
"uri": "",
"dst": "/home/martin/Music/ipod/Good Charlotte - bloody valentine.mp3",
"fade_in": 0,
"fade_out": 0,
"cue_in": 0,
"cue_out": 300,
"start": start_dt,
"end": end_dt,
"show_name": "Untitled",
"replay_gain": 0,
"independent_event": True,
}
pypoLiq_q.put(media_schedule)
plq.join()

View File

@ -2,12 +2,13 @@
import threading
from . import pypofetch
def __timeout(func, timeout_duration, default, args, kwargs):
def __timeout(func, timeout_duration, default, args, kwargs):
class InterruptableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = default
def run(self):
self.result = func(*args, **kwargs)
@ -21,10 +22,10 @@ def __timeout(func, timeout_duration, default, args, kwargs):
it.join(timeout_duration)
if it.isAlive():
"""Restart Liquidsoap and try the command one more time. If it
"""Restart Liquidsoap and try the command one more time. If it
fails again then there is something critically wrong..."""
if first_attempt:
#restart liquidsoap
# restart liquidsoap
pypofetch.PypoFetch.ref.restart_liquidsoap()
else:
raise Exception("Thread did not terminate")
@ -33,7 +34,9 @@ def __timeout(func, timeout_duration, default, args, kwargs):
first_attempt = False
def ls_timeout(f, timeout=15, default=None):
def new_f(*args, **kwargs):
return __timeout(f, timeout, default, args, kwargs)
return new_f

View File

@ -10,64 +10,63 @@ print(script_path)
os.chdir(script_path)
# Allows us to avoid installing the upstart init script when deploying on Airtime Pro:
if '--no-init-script' in sys.argv:
if "--no-init-script" in sys.argv:
data_files = []
sys.argv.remove('--no-init-script') # super hax
sys.argv.remove("--no-init-script") # super hax
else:
pypo_files = []
for root, dirnames, filenames in os.walk('pypo'):
for root, dirnames, filenames in os.walk("pypo"):
for filename in filenames:
pypo_files.append(os.path.join(root, filename))
data_files = [
('/etc/init', ['install/upstart/airtime-playout.conf.template']),
('/etc/init', ['install/upstart/airtime-liquidsoap.conf.template']),
('/etc/init.d', ['install/sysvinit/airtime-playout']),
('/etc/init.d', ['install/sysvinit/airtime-liquidsoap']),
('/var/log/airtime/pypo', []),
('/var/log/airtime/pypo-liquidsoap', []),
('/var/tmp/airtime/pypo', []),
('/var/tmp/airtime/pypo/cache', []),
('/var/tmp/airtime/pypo/files', []),
('/var/tmp/airtime/pypo/tmp', []),
]
("/etc/init", ["install/upstart/airtime-playout.conf.template"]),
("/etc/init", ["install/upstart/airtime-liquidsoap.conf.template"]),
("/etc/init.d", ["install/sysvinit/airtime-playout"]),
("/etc/init.d", ["install/sysvinit/airtime-liquidsoap"]),
("/var/log/airtime/pypo", []),
("/var/log/airtime/pypo-liquidsoap", []),
("/var/tmp/airtime/pypo", []),
("/var/tmp/airtime/pypo/cache", []),
("/var/tmp/airtime/pypo/files", []),
("/var/tmp/airtime/pypo/tmp", []),
]
print(data_files)
setup(name='airtime-playout',
version='1.0',
description='Airtime Playout Engine',
url='http://github.com/sourcefabric/Airtime',
author='sourcefabric',
license='AGPLv3',
packages=['pypo', 'pypo.media', 'pypo.media.update',
'liquidsoap'],
package_data={'': ['**/*.liq', '*.cfg', '*.types']},
scripts=[
'bin/airtime-playout',
'bin/airtime-liquidsoap',
'bin/pyponotify'
],
install_requires=[
'amqplib',
'anyjson',
'argparse',
'configobj',
'docopt',
'future',
'kombu',
'mutagen',
'PyDispatcher',
'pyinotify',
'pytz',
'requests',
'defusedxml',
'packaging',
],
zip_safe=False,
data_files=data_files)
setup(
name="airtime-playout",
version="1.0",
description="Airtime Playout Engine",
url="http://github.com/sourcefabric/Airtime",
author="sourcefabric",
license="AGPLv3",
packages=["pypo", "pypo.media", "pypo.media.update", "liquidsoap"],
package_data={"": ["**/*.liq", "*.cfg", "*.types"]},
scripts=["bin/airtime-playout", "bin/airtime-liquidsoap", "bin/pyponotify"],
install_requires=[
"amqplib",
"anyjson",
"argparse",
"configobj",
"docopt",
"future",
"kombu",
"mutagen",
"PyDispatcher",
"pyinotify",
"pytz",
"requests",
"defusedxml",
"packaging",
],
zip_safe=False,
data_files=data_files,
)
# Reload the initctl config so that playout services works
if data_files:
print("Reloading initctl configuration")
#call(['initctl', 'reload-configuration'])
print("Run \"sudo service airtime-playout start\" and \"sudo service airtime-liquidsoap start\"")
# call(['initctl', 'reload-configuration'])
print(
'Run "sudo service airtime-playout start" and "sudo service airtime-liquidsoap start"'
)

View File

@ -9,7 +9,7 @@ import json
import shutil
import commands
#sys.path.append('/usr/lib/airtime/media-monitor/mm2/')
# sys.path.append('/usr/lib/airtime/media-monitor/mm2/')
from mm2.media.monitor.pure import is_file_supported
# create logger
@ -22,86 +22,97 @@ logging.disable(50)
# add ch to logger
logger.addHandler(ch)
if (os.geteuid() != 0):
print 'Must be a root user.'
if os.geteuid() != 0:
print "Must be a root user."
sys.exit()
# loading config file
try:
config = ConfigObj('/etc/airtime/airtime.conf')
config = ConfigObj("/etc/airtime/airtime.conf")
except Exception, e:
print('Error loading config file: %s', e)
print ("Error loading config file: %s", e)
sys.exit()
api_client = apc.AirtimeApiClient(config)
#helper functions
# helper functions
# copy or move files
# flag should be 'copy' or 'move'
def copy_or_move_files_to(paths, dest, flag):
try:
for path in paths:
if (path[0] == "/" or path[0] == "~"):
if path[0] == "/" or path[0] == "~":
path = os.path.realpath(path)
else:
path = currentDir+path
path = apc.encode_to(path, 'utf-8')
dest = apc.encode_to(dest, 'utf-8')
if(os.path.exists(path)):
if(os.path.isdir(path)):
path = currentDir + path
path = apc.encode_to(path, "utf-8")
dest = apc.encode_to(dest, "utf-8")
if os.path.exists(path):
if os.path.isdir(path):
path = format_dir_string(path)
#construct full path
# construct full path
sub_path = []
for temp in os.listdir(path):
sub_path.append(path+temp)
sub_path.append(path + temp)
copy_or_move_files_to(sub_path, dest, flag)
elif(os.path.isfile(path)):
#copy file to dest
if(is_file_supported(path)):
destfile = dest+os.path.basename(path)
if(flag == 'copy'):
print "Copying %(src)s to %(dest)s..." % {'src':path, 'dest':destfile}
elif os.path.isfile(path):
# copy file to dest
if is_file_supported(path):
destfile = dest + os.path.basename(path)
if flag == "copy":
print "Copying %(src)s to %(dest)s..." % {
"src": path,
"dest": destfile,
}
shutil.copyfile(path, destfile)
elif(flag == 'move'):
print "Moving %(src)s to %(dest)s..." % {'src':path, 'dest':destfile}
elif flag == "move":
print "Moving %(src)s to %(dest)s..." % {
"src": path,
"dest": destfile,
}
shutil.move(path, destfile)
else:
print "Cannot find file or path: %s" % path
except Exception as e:
print "Error: ", e
print "Error: ", e
def format_dir_string(path):
if(path[-1] != '/'):
path = path+'/'
if path[-1] != "/":
path = path + "/"
return path
def helper_get_stor_dir():
try:
res = api_client.list_all_watched_dirs()
except Exception, e:
return res
if(res['dirs']['1'][-1] != '/'):
out = res['dirs']['1']+'/'
if res["dirs"]["1"][-1] != "/":
out = res["dirs"]["1"] + "/"
return out
else:
return res['dirs']['1']
return res["dirs"]["1"]
def checkOtherOption(args):
for i in args:
if(i[0] == '-'):
if i[0] == "-":
return True
def errorIfMultipleOption(args, msg=''):
if(checkOtherOption(args)):
if(msg != ''):
def errorIfMultipleOption(args, msg=""):
if checkOtherOption(args):
if msg != "":
raise OptionValueError(msg)
else:
raise OptionValueError("This option cannot be combined with other options")
def printHelp():
storage_dir = helper_get_stor_dir()
if(storage_dir is None):
if storage_dir is None:
storage_dir = "Unknown"
else:
storage_dir += "imported/"
@ -129,58 +140,70 @@ There are two ways to import audio files into Airtime:
parser.print_help()
print ""
def CopyAction(option, opt, value, parser):
errorIfMultipleOption(parser.rargs)
if(len(parser.rargs) == 0 ):
raise OptionValueError("No argument found. This option requires at least one argument.")
if len(parser.rargs) == 0:
raise OptionValueError(
"No argument found. This option requires at least one argument."
)
stor = helper_get_stor_dir()
if(stor is None):
if stor is None:
print "Unable to connect to the Airtime server."
return
dest = stor+"organize/"
copy_or_move_files_to(parser.rargs, dest, 'copy')
dest = stor + "organize/"
copy_or_move_files_to(parser.rargs, dest, "copy")
def MoveAction(option, opt, value, parser):
errorIfMultipleOption(parser.rargs)
if(len(parser.rargs) == 0 ):
raise OptionValueError("No argument found. This option requires at least one argument.")
if len(parser.rargs) == 0:
raise OptionValueError(
"No argument found. This option requires at least one argument."
)
stor = helper_get_stor_dir()
if(stor is None):
if stor is None:
exit("Unable to connect to the Airtime server.")
dest = stor+"organize/"
copy_or_move_files_to(parser.rargs, dest, 'move')
dest = stor + "organize/"
copy_or_move_files_to(parser.rargs, dest, "move")
def WatchAddAction(option, opt, value, parser):
errorIfMultipleOption(parser.rargs)
if(len(parser.rargs) > 1):
raise OptionValueError("Too many arguments. This option requires exactly one argument.")
elif(len(parser.rargs) == 0 ):
raise OptionValueError("No argument found. This option requires exactly one argument.")
if len(parser.rargs) > 1:
raise OptionValueError(
"Too many arguments. This option requires exactly one argument."
)
elif len(parser.rargs) == 0:
raise OptionValueError(
"No argument found. This option requires exactly one argument."
)
path = parser.rargs[0]
if (path[0] == "/" or path[0] == "~"):
if path[0] == "/" or path[0] == "~":
path = os.path.realpath(path)
else:
path = currentDir+path
path = apc.encode_to(path, 'utf-8')
if(os.path.isdir(path)):
#os.chmod(path, 0765)
path = currentDir + path
path = apc.encode_to(path, "utf-8")
if os.path.isdir(path):
# os.chmod(path, 0765)
try:
res = api_client.add_watched_dir(path)
except Exception, e:
exit("Unable to connect to the server.")
# success
if(res['msg']['code'] == 0):
if res["msg"]["code"] == 0:
print "%s added to watched folder list successfully" % path
else:
print "Adding a watched folder failed: %s" % res['msg']['error']
print "Adding a watched folder failed: %s" % res["msg"]["error"]
print "This error most likely caused by wrong permissions"
print "Try fixing this error by chmodding the parent directory(ies)"
else:
print "Given path is not a directory: %s" % path
def WatchListAction(option, opt, value, parser):
errorIfMultipleOption(parser.rargs)
if(len(parser.rargs) > 0):
if len(parser.rargs) > 0:
raise OptionValueError("This option doesn't take any arguments.")
try:
res = api_client.list_all_watched_dirs()
@ -188,120 +211,184 @@ def WatchListAction(option, opt, value, parser):
exit("Unable to connect to the Airtime server.")
dirs = res["dirs"].items()
# there will be always 1 which is storage folder
if(len(dirs) == 1):
print "No watch folders found"
if len(dirs) == 1:
print "No watch folders found"
else:
for key, v in dirs:
if(key != '1'):
if key != "1":
print v
def WatchRemoveAction(option, opt, value, parser):
errorIfMultipleOption(parser.rargs)
if(len(parser.rargs) > 1):
raise OptionValueError("Too many arguments. This option requires exactly one argument.")
elif(len(parser.rargs) == 0 ):
raise OptionValueError("No argument found. This option requires exactly one argument.")
if len(parser.rargs) > 1:
raise OptionValueError(
"Too many arguments. This option requires exactly one argument."
)
elif len(parser.rargs) == 0:
raise OptionValueError(
"No argument found. This option requires exactly one argument."
)
path = parser.rargs[0]
if (path[0] == "/" or path[0] == "~"):
if path[0] == "/" or path[0] == "~":
path = os.path.realpath(path)
else:
path = currentDir+path
path = apc.encode_to(path, 'utf-8')
if(os.path.isdir(path)):
path = currentDir + path
path = apc.encode_to(path, "utf-8")
if os.path.isdir(path):
try:
res = api_client.remove_watched_dir(path)
except Exception, e:
exit("Unable to connect to the Airtime server.")
# success
if(res['msg']['code'] == 0):
if res["msg"]["code"] == 0:
print "%s removed from watch folder list successfully." % path
else:
print "Removing the watch folder failed: %s" % res['msg']['error']
print "Removing the watch folder failed: %s" % res["msg"]["error"]
else:
print "The given path is not a directory: %s" % path
def StorageSetAction(option, opt, value, parser):
bypass = False
isF = '-f' in parser.rargs
isForce = '--force' in parser.rargs
if(isF or isForce ):
isF = "-f" in parser.rargs
isForce = "--force" in parser.rargs
if isF or isForce:
bypass = True
if(isF):
parser.rargs.remove('-f')
if(isForce):
parser.rargs.remove('--force')
if(not bypass):
errorIfMultipleOption(parser.rargs, "Only [-f] and [--force] option is allowed with this option.")
possibleInput = ['y','Y','n','N']
confirm = raw_input("Are you sure you want to change the storage directory? (y/N)")
confirm = confirm or 'N'
while(confirm not in possibleInput):
if isF:
parser.rargs.remove("-f")
if isForce:
parser.rargs.remove("--force")
if not bypass:
errorIfMultipleOption(
parser.rargs, "Only [-f] and [--force] option is allowed with this option."
)
possibleInput = ["y", "Y", "n", "N"]
confirm = raw_input(
"Are you sure you want to change the storage directory? (y/N)"
)
confirm = confirm or "N"
while confirm not in possibleInput:
print "Not an acceptable input: %s\n" % confirm
confirm = raw_input("Are you sure you want to change the storage directory? (y/N) ")
confirm = confirm or 'N'
if(confirm == 'n' or confirm =='N'):
confirm = raw_input(
"Are you sure you want to change the storage directory? (y/N) "
)
confirm = confirm or "N"
if confirm == "n" or confirm == "N":
sys.exit(1)
if(len(parser.rargs) > 1):
raise OptionValueError("Too many arguments. This option requires exactly one argument.")
elif(len(parser.rargs) == 0 ):
raise OptionValueError("No argument found. This option requires exactly one argument.")
if len(parser.rargs) > 1:
raise OptionValueError(
"Too many arguments. This option requires exactly one argument."
)
elif len(parser.rargs) == 0:
raise OptionValueError(
"No argument found. This option requires exactly one argument."
)
path = parser.rargs[0]
if (path[0] == "/" or path[0] == "~"):
if path[0] == "/" or path[0] == "~":
path = os.path.realpath(path)
else:
path = currentDir+path
path = apc.encode_to(path, 'utf-8')
if(os.path.isdir(path)):
path = currentDir + path
path = apc.encode_to(path, "utf-8")
if os.path.isdir(path):
try:
res = api_client.set_storage_dir(path)
except Exception, e:
exit("Unable to connect to the Airtime server.")
# success
if(res['msg']['code'] == 0):
if res["msg"]["code"] == 0:
print "Successfully set storage folder to %s" % path
else:
print "Setting storage folder failed: %s" % res['msg']['error']
print "Setting storage folder failed: %s" % res["msg"]["error"]
else:
print "The given path is not a directory: %s" % path
def StorageGetAction(option, opt, value, parser):
errorIfMultipleOption(parser.rargs)
if(len(parser.rargs) > 0):
if len(parser.rargs) > 0:
raise OptionValueError("This option does not take any arguments.")
print helper_get_stor_dir()
class OptionValueError(RuntimeError):
def __init__(self, msg):
self.msg = msg
usage = """[-c|--copy FILE/DIR [FILE/DIR...]] [-m|--move FILE/DIR [FILE/DIR...]]
[--watch-add DIR] [--watch-list] [--watch-remove DIR]
[--storage-dir-set DIR] [--storage-dir-get]"""
parser = OptionParser(usage=usage, add_help_option=False)
parser.add_option('-c','--copy', action='callback', callback=CopyAction, metavar='FILE', help='Copy FILE(s) into the storage directory.\nYou can specify multiple files or directories.')
parser.add_option('-m','--move', action='callback', callback=MoveAction, metavar='FILE', help='Move FILE(s) into the storage directory.\nYou can specify multiple files or directories.')
parser.add_option('--watch-add', action='callback', callback=WatchAddAction, help='Add DIR to the watched folders list.')
parser.add_option('--watch-list', action='callback', callback=WatchListAction, help='Show the list of folders that are watched.')
parser.add_option('--watch-remove', action='callback', callback=WatchRemoveAction, help='Remove DIR from the watched folders list.')
parser.add_option('--storage-dir-set', action='callback', callback=StorageSetAction, help='Set storage dir to DIR.')
parser.add_option('--storage-dir-get', action='callback', callback=StorageGetAction, help='Show the current storage dir.')
parser.add_option('-h', '--help', dest='help', action='store_true', help='show this help message and exit')
parser.add_option(
"-c",
"--copy",
action="callback",
callback=CopyAction,
metavar="FILE",
help="Copy FILE(s) into the storage directory.\nYou can specify multiple files or directories.",
)
parser.add_option(
"-m",
"--move",
action="callback",
callback=MoveAction,
metavar="FILE",
help="Move FILE(s) into the storage directory.\nYou can specify multiple files or directories.",
)
parser.add_option(
"--watch-add",
action="callback",
callback=WatchAddAction,
help="Add DIR to the watched folders list.",
)
parser.add_option(
"--watch-list",
action="callback",
callback=WatchListAction,
help="Show the list of folders that are watched.",
)
parser.add_option(
"--watch-remove",
action="callback",
callback=WatchRemoveAction,
help="Remove DIR from the watched folders list.",
)
parser.add_option(
"--storage-dir-set",
action="callback",
callback=StorageSetAction,
help="Set storage dir to DIR.",
)
parser.add_option(
"--storage-dir-get",
action="callback",
callback=StorageGetAction,
help="Show the current storage dir.",
)
parser.add_option(
"-h",
"--help",
dest="help",
action="store_true",
help="show this help message and exit",
)
# pop "--dir"
#sys.argv.pop(1)
# sys.argv.pop(1)
# pop "invoked pwd"
currentDir = os.getcwd() #sys.argv.pop(1)+'/'
currentDir = os.getcwd() # sys.argv.pop(1)+'/'
if('-l' in sys.argv or '--link' in sys.argv):
if "-l" in sys.argv or "--link" in sys.argv:
print "\nThe [-l][--link] option is deprecated. Please use the --watch-add option.\nTry 'airtime-import -h' for more detail.\n"
sys.exit()
if('-h' in sys.argv):
if "-h" in sys.argv:
printHelp()
sys.exit()
if(len(sys.argv) == 1 or '-' not in sys.argv[1]):
if len(sys.argv) == 1 or "-" not in sys.argv[1]:
printHelp()
sys.exit()
@ -309,10 +396,10 @@ try:
(option, args) = parser.parse_args()
except Exception, e:
printHelp()
if hasattr(e, 'msg'):
print "Error: "+e.msg
if hasattr(e, "msg"):
print "Error: " + e.msg
else:
print "Error: ",e
print "Error: ", e
sys.exit()
except SystemExit:
printHelp()
@ -321,7 +408,3 @@ except SystemExit:
if option.help:
printHelp()
sys.exit()

View File

@ -21,14 +21,14 @@ logging.disable(50)
logger.addHandler(ch)
if os.geteuid() != 0:
print 'Must be a root user.'
print "Must be a root user."
sys.exit(1)
# loading config file
try:
config = ConfigObj('/etc/airtime/airtime.conf')
config = ConfigObj("/etc/airtime/airtime.conf")
except Exception, e:
print('Error loading config file: %s', e)
print ("Error loading config file: %s", e)
sys.exit(1)
api_client = apc.AirtimeApiClient(config)
@ -43,25 +43,29 @@ try:
# filepath
files = api_client.get_files_without_silan_value()
total_files = len(files)
if total_files == 0: break
if total_files == 0:
break
processed_data = []
total = 0
for f in files:
full_path = f['fp']
full_path = f["fp"]
# silence detect(set default queue in and out)
try:
command = ['silan', '-b' '-f', 'JSON', full_path]
command = ["silan", "-b" "-f", "JSON", full_path]
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
out = proc.communicate()[0].strip('\r\n')
out = proc.communicate()[0].strip("\r\n")
info = json.loads(out)
data = {}
data['cuein'] = str('{0:f}'.format(info['sound'][0][0]))
data['cueout'] = str('{0:f}'.format(info['sound'][-1][1]))
data['length'] = str('{0:f}'.format(info['file duration']))
processed_data.append((f['id'], data))
data["cuein"] = str("{0:f}".format(info["sound"][0][0]))
data["cueout"] = str("{0:f}".format(info["sound"][-1][1]))
data["length"] = str("{0:f}".format(info["file duration"]))
processed_data.append((f["id"], data))
total += 1
if total % 5 == 0:
print "Total %s / %s files has been processed.." % (total, total_files)
print "Total %s / %s files has been processed.." % (
total,
total_files,
)
except Exception, e:
print e
print traceback.format_exc()
@ -70,7 +74,7 @@ try:
try:
print api_client.update_cue_values_by_silan(processed_data)
except Exception ,e:
except Exception, e:
print e
print traceback.format_exc()
print "Total %d songs Processed" % subtotal

View File

@ -16,32 +16,35 @@ if os.geteuid() == 0:
print "Please run this program as non-root"
sys.exit(1)
def printUsage():
print "airtime-test-soundcard [-v] [-o alsa | ao | oss | portaudio | pulseaudio ] [-h]"
print " Where: "
print " -v verbose mode"
print " -o Linux Sound API (default: alsa)"
print " -h show help menu "
def find_liquidsoap_binary():
"""
Starting with Airtime 2.0, we don't know the exact location of the Liquidsoap
binary because it may have been installed through a debian package. Let's find
the location of this binary.
"""
rv = subprocess.call("which airtime-liquidsoap > /dev/null", shell=True)
if rv == 0:
return "airtime-liquidsoap"
return None
try:
optlist, args = getopt.getopt(sys.argv[1:], 'hvo:')
optlist, args = getopt.getopt(sys.argv[1:], "hvo:")
except getopt.GetoptError, g:
printUsage()
sys.exit(1)
sound_api_types = set(["alsa", "ao", "oss", "portaudio", "pulseaudio"])
verbose = False
@ -63,26 +66,25 @@ for o, a in optlist:
try:
print "Sound API: %s" % sound_api
print "Outputting to soundcard. You should be able to hear a monotonous tone. Press ctrl-c to quit."
liquidsoap_exe = find_liquidsoap_binary()
if liquidsoap_exe is None:
raise Exception("Liquidsoap not found!")
command = "%s 'output.%s(sine())'" % (liquidsoap_exe, sound_api)
if not verbose:
command += " > /dev/null"
#print command
# print command
rv = subprocess.call(command, shell=True)
#if we reach this point, it means that our subprocess exited without the user
#doing a keyboard interrupt. This means there was a problem outputting to the
#soundcard. Print appropriate message.
print "There was an error using the selected sound API. Please select a different API " + \
"and run this program again. Use the -h option for help"
# if we reach this point, it means that our subprocess exited without the user
# doing a keyboard interrupt. This means there was a problem outputting to the
# soundcard. Print appropriate message.
print "There was an error using the selected sound API. Please select a different API " + "and run this program again. Use the -h option for help"
except KeyboardInterrupt, ki:
print "\nExiting"
except Exception, e:

View File

@ -16,6 +16,7 @@ if os.geteuid() == 0:
print "Please run this program as non-root"
sys.exit(1)
def printUsage():
print "airtime-test-stream [-v] [-o icecast | shoutcast ] [-H hostname] [-P port] [-u username] [-p password] [-m mount]"
print " Where: "
@ -42,7 +43,8 @@ def find_liquidsoap_binary():
return None
optlist, args = getopt.getopt(sys.argv[1:], 'hvo:H:P:u:p:m:')
optlist, args = getopt.getopt(sys.argv[1:], "hvo:H:P:u:p:m:")
stream_types = set(["shoutcast", "icecast"])
verbose = False
@ -89,31 +91,38 @@ try:
print "Mount: %s\n" % mount
url = "http://%s:%s/%s" % (host, port, mount)
print "Outputting to %s streaming server. You should be able to hear a monotonous tone on '%s'. Press ctrl-c to quit." % (stream_type, url)
print "Outputting to %s streaming server. You should be able to hear a monotonous tone on '%s'. Press ctrl-c to quit." % (
stream_type,
url,
)
liquidsoap_exe = find_liquidsoap_binary()
if liquidsoap_exe is None:
raise Exception("Liquidsoap not found!")
if stream_type == "icecast":
command = "%s 'output.icecast(%%vorbis, host = \"%s\", port = %s, user= \"%s\", password = \"%s\", mount=\"%s\", sine())'" % (liquidsoap_exe, host, port, user, password, mount)
command = (
'%s \'output.icecast(%%vorbis, host = "%s", port = %s, user= "%s", password = "%s", mount="%s", sine())\''
% (liquidsoap_exe, host, port, user, password, mount)
)
else:
command = "%s 'output.shoutcast(%%mp3, host=\"%s\", port = %s, user= \"%s\", password = \"%s\", sine())'" \
% (liquidsoap_exe, host, port, user, password)
command = (
'%s \'output.shoutcast(%%mp3, host="%s", port = %s, user= "%s", password = "%s", sine())\''
% (liquidsoap_exe, host, port, user, password)
)
if not verbose:
command += " 2>/dev/null | grep \"failed\""
command += ' 2>/dev/null | grep "failed"'
else:
print command
#print command
# print command
rv = subprocess.call(command, shell=True)
#if we reach this point, it means that our subprocess exited without the user
#doing a keyboard interrupt. This means there was a problem outputting to the
#stream server. Print appropriate message.
print "There was an error with your stream configuration. Please review your configuration " + \
"and run this program again. Use the -h option for help"
# if we reach this point, it means that our subprocess exited without the user
# doing a keyboard interrupt. This means there was a problem outputting to the
# stream server. Print appropriate message.
print "There was an error with your stream configuration. Please review your configuration " + "and run this program again. Use the -h option for help"
except KeyboardInterrupt, ki:
print "\nExiting"

View File

@ -7,39 +7,45 @@ import requests
from urlparse import urlparse
import sys
CONFIG_PATH='/etc/airtime/airtime.conf'
CONFIG_PATH = "/etc/airtime/airtime.conf"
GENERAL_CONFIG_SECTION = "general"
def read_config_file(config_path):
"""Parse the application's config file located at config_path."""
config = ConfigParser.SafeConfigParser()
try:
config.readfp(open(config_path))
except IOError as e:
print "Failed to open config file at " + config_path + ": " + e.strerror
print "Failed to open config file at " + config_path + ": " + e.strerror
exit(-1)
except Exception:
print e.strerror
print e.strerror
exit(-1)
return config
if __name__ == '__main__':
if __name__ == "__main__":
config = read_config_file(CONFIG_PATH)
api_key = config.get(GENERAL_CONFIG_SECTION, 'api_key')
base_url = config.get(GENERAL_CONFIG_SECTION, 'base_url')
base_dir = config.get(GENERAL_CONFIG_SECTION, 'base_dir')
base_port = config.get(GENERAL_CONFIG_SECTION, 'base_port', 80)
api_key = config.get(GENERAL_CONFIG_SECTION, "api_key")
base_url = config.get(GENERAL_CONFIG_SECTION, "base_url")
base_dir = config.get(GENERAL_CONFIG_SECTION, "base_dir")
base_port = config.get(GENERAL_CONFIG_SECTION, "base_port", 80)
action = "upgrade"
station_url = ""
default_url = "http://%s:%s%s" % (base_url, base_port, base_dir)
parser = argparse.ArgumentParser()
parser.add_argument('--downgrade', help='Downgrade the station', action="store_true")
parser.add_argument('station_url', help='station URL', nargs='?', default=default_url)
parser.add_argument(
"--downgrade", help="Downgrade the station", action="store_true"
)
parser.add_argument(
"station_url", help="station URL", nargs="?", default=default_url
)
args = parser.parse_args()
if args.downgrade:
action = "downgrade"
@ -47,12 +53,11 @@ if __name__ == '__main__':
station_url = args.station_url
# Add http:// if you were lazy and didn't pass a scheme to this script
url = urlparse(station_url)
url = urlparse(station_url)
if not url.scheme:
station_url = "http://%s" % station_url
print "Requesting %s..." % action
r = requests.get("%s/%s" % (station_url, action), auth=(api_key, ''))
r = requests.get("%s/%s" % (station_url, action), auth=(api_key, ""))
print r.text
r.raise_for_status()