Merge branch 'devel' into cc-1960-internationalize-airtime
Conflicts: airtime_mvc/public/js/airtime/playlist/smart_blockbuilder.js
This commit is contained in:
commit
10b6dd202e
|
@ -343,6 +343,7 @@ class LibraryController extends Zend_Controller_Action
|
|||
{
|
||||
$params = $this->getRequest()->getParams();
|
||||
|
||||
# terrible name for the method below. it does not only search files.
|
||||
$r = Application_Model_StoredFile::searchLibraryFiles($params);
|
||||
|
||||
//TODO move this to the datatables row callback.
|
||||
|
|
|
@ -387,11 +387,12 @@ class PreferenceController extends Zend_Controller_Action
|
|||
|
||||
public function rescanWatchDirectoryAction()
|
||||
{
|
||||
$dir = Application_Model_MusicDir::getDirByPath(
|
||||
$this->getRequest()->getParam("dir"));
|
||||
$dir_path = $this->getRequest()->getParam('dir');
|
||||
$dir = Application_Model_MusicDir::getDirByPath($dir_path);
|
||||
$data = array( 'directory' => $dir->getDirectory(),
|
||||
'id' => $dir->getId());
|
||||
Application_Model_RabbitMq::SendMessageToMediaMonitor('rescan_watch', $data);
|
||||
Logging::info("Unhiding all files belonging to:: $dir_path");
|
||||
$dir->unhideFiles();
|
||||
die(); # Get rid of this ugliness later
|
||||
}
|
||||
|
|
|
@ -467,9 +467,9 @@ SQL;
|
|||
Logging::info("Adding to block");
|
||||
Logging::info("at position {$pos}");
|
||||
}
|
||||
|
||||
|
||||
foreach ($p_items as $ac) {
|
||||
Logging::info("Adding audio file {$ac}");
|
||||
Logging::info("Adding audio file {$ac[0]}");
|
||||
try {
|
||||
if (is_array($ac) && $ac[1] == 'audioclip') {
|
||||
$res = $this->insertBlockElement($this->buildEntry($ac[0], $pos));
|
||||
|
|
|
@ -450,8 +450,12 @@ SQL;
|
|||
public function unhideFiles()
|
||||
{
|
||||
$files = $this->_dir->getCcFiless();
|
||||
$hid = 0;
|
||||
foreach ($files as $file) {
|
||||
$hid++;
|
||||
$file->setDbHidden(false);
|
||||
$file->save();
|
||||
}
|
||||
Logging::info("unhide '$hid' files");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -206,7 +206,7 @@ class Application_Model_Preference
|
|||
|
||||
$fade = number_format($fade, 2);
|
||||
//fades need 2 leading zeros for DateTime conversion
|
||||
$fade = rtrim(str_pad($fade, 5, "0", STR_PAD_LEFT), "0");
|
||||
$fade = str_pad($fade, 5, "0", STR_PAD_LEFT);
|
||||
|
||||
return $fade;
|
||||
}
|
||||
|
@ -1116,7 +1116,6 @@ class Application_Model_Preference
|
|||
} else {
|
||||
/*For now we just have this hack for debugging. We should not
|
||||
rely on this crappy behaviour in case of failure*/
|
||||
Logging::info("Pref: $pref_param");
|
||||
Logging::warn("Index $x does not exist preferences");
|
||||
Logging::warn("Defaulting to identity and printing preferences");
|
||||
Logging::warn($ds);
|
||||
|
|
|
@ -321,7 +321,7 @@ SQL;
|
|||
ws.description AS file_album_title,
|
||||
ws.length AS file_length,
|
||||
't'::BOOL AS file_exists,
|
||||
NULL as file_mime
|
||||
ws.mime as file_mime
|
||||
SQL;
|
||||
$streamJoin = <<<SQL
|
||||
cc_schedule AS sched
|
||||
|
@ -674,6 +674,12 @@ SQL;
|
|||
$start = self::AirtimeTimeToPypoTime($item["start"]);
|
||||
$end = self::AirtimeTimeToPypoTime($item["end"]);
|
||||
|
||||
list(,,,$start_hour,,) = explode("-", $start);
|
||||
list(,,,$end_hour,,) = explode("-", $end);
|
||||
|
||||
$same_hour = $start_hour == $end_hour;
|
||||
$independent_event = !$same_hour;
|
||||
|
||||
$schedule_item = array(
|
||||
'id' => $media_id,
|
||||
'type' => 'file',
|
||||
|
@ -687,7 +693,7 @@ SQL;
|
|||
'end' => $end,
|
||||
'show_name' => $item["show_name"],
|
||||
'replay_gain' => is_null($item["replay_gain"]) ? "0": $item["replay_gain"],
|
||||
'independent_event' => false
|
||||
'independent_event' => $independent_event,
|
||||
);
|
||||
self::appendScheduleItem($data, $start, $schedule_item);
|
||||
}
|
||||
|
@ -859,6 +865,8 @@ SQL;
|
|||
$data = array();
|
||||
$data["media"] = array();
|
||||
|
||||
//Harbor kick times *MUST* be ahead of schedule events, so that pypo
|
||||
//executes them first.
|
||||
self::createInputHarborKickTimes($data, $range_start, $range_end);
|
||||
self::createScheduledEvents($data, $range_start, $range_end);
|
||||
|
||||
|
|
|
@ -360,7 +360,7 @@ SQL;
|
|||
}
|
||||
|
||||
|
||||
// set hidden falg to true
|
||||
// set hidden flag to true
|
||||
$this->_file->setDbHidden(true);
|
||||
$this->_file->save();
|
||||
|
||||
|
@ -726,7 +726,7 @@ SQL;
|
|||
|
||||
$plTable = "({$plSelect} FROM cc_playlist AS PL LEFT JOIN cc_subjs AS sub ON (sub.id = PL.creator_id))";
|
||||
$blTable = "({$blSelect} FROM cc_block AS BL LEFT JOIN cc_subjs AS sub ON (sub.id = BL.creator_id))";
|
||||
$fileTable = "({$fileSelect} FROM cc_files AS FILES LEFT JOIN cc_subjs AS sub ON (sub.id = FILES.owner_id) WHERE file_exists = 'TRUE')";
|
||||
$fileTable = "({$fileSelect} FROM cc_files AS FILES LEFT JOIN cc_subjs AS sub ON (sub.id = FILES.owner_id) WHERE file_exists = 'TRUE' AND hidden='FALSE')";
|
||||
//$fileTable = "({$fileSelect} FROM cc_files AS FILES WHERE file_exists = 'TRUE')";
|
||||
$streamTable = "({$streamSelect} FROM cc_webstream AS ws LEFT JOIN cc_subjs AS sub ON (sub.id = ws.creator_id))";
|
||||
$unionTable = "({$plTable} UNION {$blTable} UNION {$fileTable} UNION {$streamTable}) AS RESULTS";
|
||||
|
|
|
@ -655,6 +655,7 @@ var AIRTIME = (function(AIRTIME){
|
|||
$pl.find('.success').show();
|
||||
}
|
||||
disableLoadingIcon();
|
||||
setTimeout(removeSuccessMsg, 5000);
|
||||
});
|
||||
})
|
||||
|
||||
|
|
|
@ -400,7 +400,7 @@ function setupUI() {
|
|||
|
||||
$(".repeat_tracks_help_icon").qtip({
|
||||
content: {
|
||||
text: $.i18n._("If your criteria is too strict, Airtime may not be able to fill up the desired smart block length. Hence, if you check this option, tracks will be used more than once.")
|
||||
text: $.i18n._("The desired block length will not be reached if Airtime cannot find enough unique tracks to match your criteria. Enable this option if you wish to allow tracks to be added multiple times to the smart block.")
|
||||
},
|
||||
hide: {
|
||||
delay: 500,
|
||||
|
|
|
@ -25,7 +25,8 @@ echo "----------------------------------------------------"
|
|||
dist=`lsb_release -is`
|
||||
code=`lsb_release -cs`
|
||||
|
||||
if [ "$dist" = "Debian" ]; then
|
||||
#enable squeeze backports to get lame packages
|
||||
if [ "$dist" = "Debian" -a "$code" = "squeeze" ]; then
|
||||
set +e
|
||||
grep -E "deb http://backports.debian.org/debian-backports squeeze-backports main" /etc/apt/sources.list
|
||||
returncode=$?
|
||||
|
|
|
@ -28,7 +28,8 @@ echo "----------------------------------------------------"
|
|||
dist=`lsb_release -is`
|
||||
code=`lsb_release -cs`
|
||||
|
||||
if [ "$dist" -eq "Debian" ]; then
|
||||
#enable squeeze backports to get lame packages
|
||||
if [ "$dist" = "Debian" -a "$code" = "squeeze" ]; then
|
||||
grep "deb http://backports.debian.org/debian-backports squeeze-backports main" /etc/apt/sources.list
|
||||
if [ "$?" -ne "0" ]; then
|
||||
echo "deb http://backports.debian.org/debian-backports squeeze-backports main" >> /etc/apt/sources.list
|
||||
|
|
|
@ -13,9 +13,8 @@ from media.monitor.log import Loggable
|
|||
from media.monitor.syncdb import AirtimeDB
|
||||
from media.monitor.exceptions import DirectoryIsNotListed
|
||||
from media.monitor.bootstrap import Bootstrapper
|
||||
from media.monitor.listeners import FileMediator
|
||||
|
||||
from media.saas.thread import apc
|
||||
from media.saas.thread import apc, user
|
||||
|
||||
class AirtimeNotifier(Loggable):
|
||||
"""
|
||||
|
@ -192,7 +191,7 @@ class AirtimeMessageReceiver(Loggable):
|
|||
# request that we'd normally get form pyinotify. But right
|
||||
# now event contractor would take care of this sort of
|
||||
# thing anyway so this might not be necessary after all
|
||||
FileMediator.ignore(msg['filepath'])
|
||||
#user().file_mediator.ignore(msg['filepath'])
|
||||
os.unlink(msg['filepath'])
|
||||
# Verify deletion:
|
||||
if not os.path.exists(msg['filepath']):
|
||||
|
|
|
@ -3,18 +3,15 @@ import os
|
|||
import abc
|
||||
import re
|
||||
import media.monitor.pure as mmp
|
||||
import media.monitor.owners as owners
|
||||
from media.monitor.pure import LazyProperty
|
||||
from media.monitor.metadata import Metadata
|
||||
from media.monitor.log import Loggable
|
||||
from media.monitor.exceptions import BadSongFile
|
||||
from media.saas.thread import getsig
|
||||
from media.saas.thread import getsig, user
|
||||
|
||||
class PathChannel(object):
|
||||
"""
|
||||
Simple struct to hold a 'signal' string and a related 'path'. Basically
|
||||
used as a named tuple
|
||||
"""
|
||||
""" Simple struct to hold a 'signal' string and a related 'path'.
|
||||
Basically used as a named tuple """
|
||||
def __init__(self, signal, path):
|
||||
self.signal = getsig(signal)
|
||||
self.path = path
|
||||
|
@ -22,34 +19,24 @@ class PathChannel(object):
|
|||
# TODO : Move this to it's file. Also possible unsingleton and use it as a
|
||||
# simple module just like m.m.owners
|
||||
class EventRegistry(object):
|
||||
"""
|
||||
This class's main use is to keep track all events with a cookie attribute.
|
||||
This is done mainly because some events must be 'morphed' into other events
|
||||
because we later detect that they are move events instead of delete events.
|
||||
"""
|
||||
registry = {}
|
||||
@staticmethod
|
||||
def register(evt): EventRegistry.registry[evt.cookie] = evt
|
||||
@staticmethod
|
||||
def unregister(evt): del EventRegistry.registry[evt.cookie]
|
||||
@staticmethod
|
||||
def registered(evt): return evt.cookie in EventRegistry.registry
|
||||
@staticmethod
|
||||
def matching(evt):
|
||||
event = EventRegistry.registry[evt.cookie]
|
||||
""" This class's main use is to keep track all events with a cookie
|
||||
attribute. This is done mainly because some events must be 'morphed'
|
||||
into other events because we later detect that they are move events
|
||||
instead of delete events. """
|
||||
def __init__(self):
|
||||
self.registry = {}
|
||||
def register(self,evt): self.registry[evt.cookie] = evt
|
||||
def unregister(self,evt): del self.registry[evt.cookie]
|
||||
def registered(self,evt): return evt.cookie in self.registry
|
||||
def matching(self,evt):
|
||||
event = self.registry[evt.cookie]
|
||||
# Want to disallow accessing the same event twice
|
||||
EventRegistry.unregister(event)
|
||||
self.unregister(event)
|
||||
return event
|
||||
def __init__(self,*args,**kwargs):
|
||||
raise Exception("You can instantiate this class. Must only use class \
|
||||
methods")
|
||||
|
||||
|
||||
class EventProxy(Loggable):
|
||||
"""
|
||||
A container object for instances of BaseEvent (or it's subclasses) used for
|
||||
event contractor
|
||||
"""
|
||||
""" A container object for instances of BaseEvent (or it's
|
||||
subclasses) used for event contractor """
|
||||
def __init__(self, orig_evt):
|
||||
self.orig_evt = orig_evt
|
||||
self.evt = orig_evt
|
||||
|
@ -82,12 +69,10 @@ class EventProxy(Loggable):
|
|||
|
||||
|
||||
class HasMetaData(object):
|
||||
"""
|
||||
Any class that inherits from this class gains the metadata attribute that
|
||||
loads metadata from the class's 'path' attribute. This is done lazily so
|
||||
there is no performance penalty to inheriting from this and subsequent
|
||||
calls to metadata are cached
|
||||
"""
|
||||
""" Any class that inherits from this class gains the metadata
|
||||
attribute that loads metadata from the class's 'path' attribute.
|
||||
This is done lazily so there is no performance penalty to inheriting
|
||||
from this and subsequent calls to metadata are cached """
|
||||
__metaclass__ = abc.ABCMeta
|
||||
@LazyProperty
|
||||
def metadata(self): return Metadata(self.path)
|
||||
|
@ -102,7 +87,7 @@ class BaseEvent(Loggable):
|
|||
self._raw_event = raw_event
|
||||
self.path = os.path.normpath(raw_event.pathname)
|
||||
else: self.path = raw_event
|
||||
self.owner = owners.get_owner(self.path)
|
||||
self.owner = user().owner.get_owner(self.path)
|
||||
owner_re = re.search('stor/imported/(?P<owner>\d+)/', self.path)
|
||||
if owner_re:
|
||||
self.logger.info("matched path: %s" % self.path)
|
||||
|
@ -114,11 +99,9 @@ class BaseEvent(Loggable):
|
|||
|
||||
# TODO : delete this method later
|
||||
def reset_hook(self):
|
||||
"""
|
||||
Resets the hook that is called after an event is packed. Before
|
||||
resetting the hook we execute it to make sure that whatever cleanup
|
||||
operations were queued are executed.
|
||||
"""
|
||||
""" Resets the hook that is called after an event is packed.
|
||||
Before resetting the hook we execute it to make sure that
|
||||
whatever cleanup operations were queued are executed. """
|
||||
self._pack_hook()
|
||||
self._pack_hook = lambda: None
|
||||
|
||||
|
@ -132,10 +115,8 @@ class BaseEvent(Loggable):
|
|||
|
||||
# TODO : delete this method later
|
||||
def add_safe_pack_hook(self,k):
|
||||
"""
|
||||
adds a callable object (function) that will be called after the event
|
||||
has been "safe_packed"
|
||||
"""
|
||||
""" adds a callable object (function) that will be called after
|
||||
the event has been "safe_packed" """
|
||||
self._pack_hook = k
|
||||
|
||||
def proxify(self):
|
||||
|
@ -143,17 +124,15 @@ class BaseEvent(Loggable):
|
|||
|
||||
# As opposed to unsafe_pack...
|
||||
def safe_pack(self):
|
||||
"""
|
||||
returns exceptions instead of throwing them to be consistent with
|
||||
events that must catch their own BadSongFile exceptions since generate
|
||||
a set of exceptions instead of a single one
|
||||
"""
|
||||
""" returns exceptions instead of throwing them to be consistent
|
||||
with events that must catch their own BadSongFile exceptions
|
||||
since generate a set of exceptions instead of a single one """
|
||||
try:
|
||||
self._pack_hook()
|
||||
ret = self.pack()
|
||||
# Remove owner of this file only after packing. Otherwise packing
|
||||
# will not serialize the owner correctly into the airtime request
|
||||
owners.remove_file_owner(self.path)
|
||||
user().owner.remove_file_owner(self.path)
|
||||
return ret
|
||||
except BadSongFile as e: return [e]
|
||||
except Exception as e:
|
||||
|
@ -172,42 +151,33 @@ class BaseEvent(Loggable):
|
|||
return self
|
||||
|
||||
def assign_owner(self,req):
|
||||
"""
|
||||
Packs self.owner to req if the owner is valid. I.e. it's not -1. This
|
||||
method is used by various events that would like to pass owner as a
|
||||
parameter. NewFile for example.
|
||||
"""
|
||||
""" Packs self.owner to req if the owner is valid. I.e. it's not
|
||||
-1. This method is used by various events that would like to
|
||||
pass owner as a parameter. NewFile for example. """
|
||||
if self.owner != -1: req['MDATA_KEY_OWNER_ID'] = self.owner
|
||||
|
||||
class FakePyinotify(object):
|
||||
"""
|
||||
sometimes we must create our own pyinotify like objects to
|
||||
""" sometimes we must create our own pyinotify like objects to
|
||||
instantiate objects from the classes below whenever we want to turn
|
||||
a single event into multiple events
|
||||
"""
|
||||
a single event into multiple events """
|
||||
def __init__(self, path): self.pathname = path
|
||||
|
||||
class OrganizeFile(BaseEvent, HasMetaData):
|
||||
"""
|
||||
The only kind of event that does support the pack protocol. It's used
|
||||
internally with mediamonitor to move files in the organize directory.
|
||||
"""
|
||||
""" The only kind of event that does support the pack protocol. It's
|
||||
used internally with mediamonitor to move files in the organize
|
||||
directory. """
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(OrganizeFile, self).__init__(*args, **kwargs)
|
||||
def pack(self):
|
||||
raise AttributeError("You can't send organize events to airtime!!!")
|
||||
|
||||
class NewFile(BaseEvent, HasMetaData):
|
||||
"""
|
||||
NewFile events are the only events that contain MDATA_KEY_OWNER_ID metadata
|
||||
in them.
|
||||
"""
|
||||
""" NewFile events are the only events that contain
|
||||
MDATA_KEY_OWNER_ID metadata in them. """
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NewFile, self).__init__(*args, **kwargs)
|
||||
def pack(self):
|
||||
"""
|
||||
packs turns an event into a media monitor request
|
||||
"""
|
||||
""" packs turns an event into a media monitor request """
|
||||
req_dict = self.metadata.extract()
|
||||
req_dict['mode'] = u'create'
|
||||
req_dict['is_record'] = self.metadata.is_recorded()
|
||||
|
@ -216,11 +186,9 @@ class NewFile(BaseEvent, HasMetaData):
|
|||
return [req_dict]
|
||||
|
||||
class DeleteFile(BaseEvent):
|
||||
"""
|
||||
DeleteFile event only contains the path to be deleted. No other metadata
|
||||
can be or is included. (This is because this event is fired after the
|
||||
deletion occurs).
|
||||
"""
|
||||
""" DeleteFile event only contains the path to be deleted. No other
|
||||
metadata can be or is included. (This is because this event is fired
|
||||
after the deletion occurs). """
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DeleteFile, self).__init__(*args, **kwargs)
|
||||
def pack(self):
|
||||
|
@ -230,9 +198,7 @@ class DeleteFile(BaseEvent):
|
|||
return [req_dict]
|
||||
|
||||
class MoveFile(BaseEvent, HasMetaData):
|
||||
"""
|
||||
Path argument should be the new path of the file that was moved
|
||||
"""
|
||||
""" Path argument should be the new path of the file that was moved """
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(MoveFile, self).__init__(*args, **kwargs)
|
||||
def old_path(self):
|
||||
|
@ -256,10 +222,8 @@ class ModifyFile(BaseEvent, HasMetaData):
|
|||
return [req_dict]
|
||||
|
||||
def map_events(directory, constructor):
|
||||
"""
|
||||
Walks 'directory' and creates an event using 'constructor'. Returns a list
|
||||
of the constructed events.
|
||||
"""
|
||||
""" Walks 'directory' and creates an event using 'constructor'.
|
||||
Returns a list of the constructed events. """
|
||||
# -unknown-path should not appear in the path here but more testing
|
||||
# might be necessary
|
||||
for f in mmp.walk_supported(directory, clean_empties=False):
|
||||
|
@ -268,30 +232,25 @@ def map_events(directory, constructor):
|
|||
except BadSongFile as e: yield e
|
||||
|
||||
class DeleteDir(BaseEvent):
|
||||
"""
|
||||
A DeleteDir event unfolds itself into a list of DeleteFile events for every
|
||||
file in the directory.
|
||||
"""
|
||||
""" A DeleteDir event unfolds itself into a list of DeleteFile
|
||||
events for every file in the directory. """
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DeleteDir, self).__init__(*args, **kwargs)
|
||||
def pack(self):
|
||||
return map_events( self.path, DeleteFile )
|
||||
|
||||
class MoveDir(BaseEvent):
|
||||
"""
|
||||
A MoveDir event unfolds itself into a list of MoveFile events for every
|
||||
file in the directory.
|
||||
"""
|
||||
""" A MoveDir event unfolds itself into a list of MoveFile events
|
||||
for every file in the directory. """
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(MoveDir, self).__init__(*args, **kwargs)
|
||||
def pack(self):
|
||||
return map_events( self.path, MoveFile )
|
||||
|
||||
class DeleteDirWatch(BaseEvent):
|
||||
"""
|
||||
Deleting a watched directory is different from deleting any other
|
||||
directory. Hence we must have a separate event to handle this case
|
||||
"""
|
||||
""" Deleting a watched directory is different from deleting any
|
||||
other directory. Hence we must have a separate event to handle this
|
||||
case """
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DeleteDirWatch, self).__init__(*args, **kwargs)
|
||||
def pack(self):
|
||||
|
|
|
@ -6,38 +6,33 @@ from functools import wraps
|
|||
import media.monitor.pure as mmp
|
||||
from media.monitor.pure import IncludeOnly
|
||||
from media.monitor.events import OrganizeFile, NewFile, MoveFile, DeleteFile, \
|
||||
DeleteDir, EventRegistry, MoveDir,\
|
||||
DeleteDir, MoveDir,\
|
||||
DeleteDirWatch
|
||||
from media.monitor.log import Loggable, get_logger
|
||||
from media.saas.thread import getsig
|
||||
from media.monitor.log import Loggable
|
||||
from media.saas.thread import getsig, user
|
||||
# Note: Because of the way classes that inherit from pyinotify.ProcessEvent
|
||||
# interact with constructors. you should only instantiate objects from them
|
||||
# using keyword arguments. For example:
|
||||
# OrganizeListener('watch_signal') <= wrong
|
||||
# OrganizeListener(signal='watch_signal') <= right
|
||||
|
||||
class FileMediator(object):
|
||||
"""
|
||||
FileMediator is used an intermediate mechanism that filters out certain
|
||||
events.
|
||||
"""
|
||||
ignored_set = set([]) # for paths only
|
||||
logger = get_logger()
|
||||
|
||||
@staticmethod
|
||||
def is_ignored(path): return path in FileMediator.ignored_set
|
||||
@staticmethod
|
||||
def ignore(path): FileMediator.ignored_set.add(path)
|
||||
@staticmethod
|
||||
def unignore(path): FileMediator.ignored_set.remove(path)
|
||||
class FileMediator(Loggable):
|
||||
# TODO : this class is not actually used. remove all references to it
|
||||
# everywhere (including tests).
|
||||
""" FileMediator is used an intermediate mechanism that filters out
|
||||
certain events. """
|
||||
def __init__(self) : self.ignored_set = set([]) # for paths only
|
||||
def is_ignored(self,path) : return path in self.ignored_set
|
||||
def ignore(self, path) : self.ignored_set.add(path)
|
||||
def unignore(self, path) : self.ignored_set.remove(path)
|
||||
|
||||
def mediate_ignored(fn):
|
||||
@wraps(fn)
|
||||
def wrapped(self, event, *args,**kwargs):
|
||||
event.pathname = unicode(event.pathname, "utf-8")
|
||||
if FileMediator.is_ignored(event.pathname):
|
||||
FileMediator.logger.info("Ignoring: '%s' (once)" % event.pathname)
|
||||
FileMediator.unignore(event.pathname)
|
||||
if user().file_mediator.is_ignored(event.pathname):
|
||||
user().file_mediator.logger.info("Ignoring: '%s' (once)" % event.pathname)
|
||||
user().file_mediator.unignore(event.pathname)
|
||||
else: return fn(self, event, *args, **kwargs)
|
||||
return wrapped
|
||||
|
||||
|
@ -80,11 +75,11 @@ class StoreWatchListener(BaseListener, Loggable, pyinotify.ProcessEvent):
|
|||
def process_IN_CLOSE_WRITE(self, event):
|
||||
self.process_create(event)
|
||||
def process_IN_MOVED_TO(self, event):
|
||||
if EventRegistry.registered(event):
|
||||
if user().event_registry.registered(event):
|
||||
# We need this trick because we don't how to "expand" dir events
|
||||
# into file events until we know for sure if we deleted or moved
|
||||
morph = MoveDir(event) if event.dir else MoveFile(event)
|
||||
EventRegistry.matching(event).morph_into(morph)
|
||||
user().event_registry.matching(event).morph_into(morph)
|
||||
else: self.process_create(event)
|
||||
def process_IN_MOVED_FROM(self, event):
|
||||
# Is either delete dir or delete file
|
||||
|
@ -92,7 +87,7 @@ class StoreWatchListener(BaseListener, Loggable, pyinotify.ProcessEvent):
|
|||
# evt can be none whenever event points that a file that would be
|
||||
# ignored by @IncludeOnly
|
||||
if hasattr(event,'cookie') and (evt != None):
|
||||
EventRegistry.register(evt)
|
||||
user().event_registry.register(evt)
|
||||
def process_IN_DELETE(self,event): self.process_delete(event)
|
||||
def process_IN_MOVE_SELF(self, event):
|
||||
if '-unknown-path' in event.pathname:
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import media.monitor.pure as mmp
|
||||
import media.monitor.owners as owners
|
||||
from media.monitor.handler import ReportHandler
|
||||
from media.monitor.log import Loggable
|
||||
from media.monitor.exceptions import BadSongFile
|
||||
from media.monitor.events import OrganizeFile
|
||||
from pydispatch import dispatcher
|
||||
from os.path import dirname
|
||||
from media.saas.thread import getsig
|
||||
from media.saas.thread import getsig, user
|
||||
import os.path
|
||||
|
||||
class Organizer(ReportHandler,Loggable):
|
||||
|
@ -75,7 +74,7 @@ class Organizer(ReportHandler,Loggable):
|
|||
# backwards way is bewcause we are unable to encode the owner id
|
||||
# into the file itself so that the StoreWatchListener listener can
|
||||
# detect it from the file
|
||||
owners.add_file_owner(new_path, owner_id )
|
||||
user().owner.add_file_owner(new_path, owner_id )
|
||||
|
||||
self.logger.info('Organized: "%s" into "%s"' %
|
||||
(event.path, new_path))
|
||||
|
|
|
@ -1,44 +1,40 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from media.monitor.log import get_logger
|
||||
log = get_logger()
|
||||
# hash: 'filepath' => owner_id
|
||||
owners = {}
|
||||
from media.monitor.log import Loggable
|
||||
|
||||
def reset_owners():
|
||||
""" Wipes out all file => owner associations """
|
||||
global owners
|
||||
owners = {}
|
||||
class Owner(Loggable):
|
||||
def __init__(self):
|
||||
# hash: 'filepath' => owner_id
|
||||
self.owners = {}
|
||||
|
||||
def get_owner(self,f):
|
||||
""" Get the owner id of the file 'f' """
|
||||
o = self.owners[f] if f in self.owners else -1
|
||||
self.logger.info("Received owner for %s. Owner: %s" % (f, o))
|
||||
return o
|
||||
|
||||
|
||||
def get_owner(f):
|
||||
""" Get the owner id of the file 'f' """
|
||||
o = owners[f] if f in owners else -1
|
||||
log.info("Received owner for %s. Owner: %s" % (f, o))
|
||||
return o
|
||||
|
||||
|
||||
def add_file_owner(f,owner):
|
||||
""" Associate file f with owner. If owner is -1 then do we will not record
|
||||
it because -1 means there is no owner. Returns True if f is being stored
|
||||
after the function. False otherwise. """
|
||||
if owner == -1: return False
|
||||
if f in owners:
|
||||
if owner != owners[f]: # check for fishiness
|
||||
log.info("Warning ownership of file '%s' changed from '%d' to '%d'"
|
||||
% (f, owners[f], owner))
|
||||
else: return True
|
||||
owners[f] = owner
|
||||
return True
|
||||
|
||||
def has_owner(f):
|
||||
""" True if f is owned by somebody. False otherwise. """
|
||||
return f in owners
|
||||
|
||||
def remove_file_owner(f):
|
||||
""" Try and delete any association made with file f. Returns true if
|
||||
the the association was actually deleted. False otherwise. """
|
||||
if f in owners:
|
||||
del owners[f]
|
||||
def add_file_owner(self,f,owner):
|
||||
""" Associate file f with owner. If owner is -1 then do we will not record
|
||||
it because -1 means there is no owner. Returns True if f is being stored
|
||||
after the function. False otherwise. """
|
||||
if owner == -1: return False
|
||||
if f in self.owners:
|
||||
if owner != self.owners[f]: # check for fishiness
|
||||
self.logger.info("Warning ownership of file '%s' changed from '%d' to '%d'"
|
||||
% (f, self.owners[f], owner))
|
||||
else: return True
|
||||
self.owners[f] = owner
|
||||
return True
|
||||
else: return False
|
||||
|
||||
def has_owner(self,f):
|
||||
""" True if f is owned by somebody. False otherwise. """
|
||||
return f in self.owners
|
||||
|
||||
def remove_file_owner(self,f):
|
||||
""" Try and delete any association made with file f. Returns true if
|
||||
the the association was actually deleted. False otherwise. """
|
||||
if f in self.owners:
|
||||
del self.owners[f]
|
||||
return True
|
||||
else: return False
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ import media.monitor.pure as mmp
|
|||
import os
|
||||
from media.monitor.log import Loggable
|
||||
from media.monitor.exceptions import CouldNotCreateIndexFile
|
||||
from media.saas.thread import InstanceInheritingThread
|
||||
|
||||
class Toucher(Loggable):
|
||||
"""
|
||||
|
@ -17,56 +18,23 @@ class Toucher(Loggable):
|
|||
self.path)
|
||||
self.logger.info(str(e))
|
||||
|
||||
#http://code.activestate.com/lists/python-ideas/8982/
|
||||
from datetime import datetime
|
||||
import time
|
||||
|
||||
import threading
|
||||
|
||||
class RepeatTimer(threading.Thread):
|
||||
def __init__(self, interval, callable, args=[], kwargs={}):
|
||||
threading.Thread.__init__(self)
|
||||
# interval_current shows number of milliseconds in currently triggered
|
||||
# <tick>
|
||||
self.interval_current = interval
|
||||
# interval_new shows number of milliseconds for next <tick>
|
||||
self.interval_new = interval
|
||||
class RepeatTimer(InstanceInheritingThread):
|
||||
def __init__(self, interval, callable, *args, **kwargs):
|
||||
super(RepeatTimer, self).__init__()
|
||||
self.interval = interval
|
||||
self.callable = callable
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
self.event = threading.Event()
|
||||
self.event.set()
|
||||
self.activation_dt = None
|
||||
self.__timer = None
|
||||
|
||||
def run(self):
|
||||
while self.event.is_set():
|
||||
self.activation_dt = datetime.utcnow()
|
||||
self.__timer = threading.Timer(self.interval_new,
|
||||
self.callable,
|
||||
self.args,
|
||||
self.kwargs)
|
||||
self.interval_current = self.interval_new
|
||||
self.__timer.start()
|
||||
self.__timer.join()
|
||||
|
||||
def cancel(self):
|
||||
self.event.clear()
|
||||
if self.__timer is not None:
|
||||
self.__timer.cancel()
|
||||
|
||||
def trigger(self):
|
||||
self.callable(*self.args, **self.kwargs)
|
||||
if self.__timer is not None:
|
||||
self.__timer.cancel()
|
||||
|
||||
def change_interval(self, value):
|
||||
self.interval_new = value
|
||||
|
||||
while True:
|
||||
time.sleep(self.interval)
|
||||
self.callable(*self.args, **self.kwargs)
|
||||
|
||||
class ToucherThread(Loggable):
|
||||
"""
|
||||
Creates a thread that touches a file 'path' every 'interval' seconds
|
||||
"""
|
||||
""" Creates a thread that touches a file 'path' every 'interval'
|
||||
seconds """
|
||||
def __init__(self, path, interval=5):
|
||||
if not os.path.exists(path):
|
||||
try:
|
||||
|
|
|
@ -4,6 +4,9 @@ from os.path import join
|
|||
from media.monitor.exceptions import NoConfigFile
|
||||
from media.monitor.pure import LazyProperty
|
||||
from media.monitor.config import MMConfig
|
||||
from media.monitor.owners import Owner
|
||||
from media.monitor.events import EventRegistry
|
||||
from media.monitor.listeners import FileMediator
|
||||
from api_clients.api_client import AirtimeApiClient
|
||||
|
||||
# poor man's phantom types...
|
||||
|
@ -46,3 +49,20 @@ class AirtimeInstance(object):
|
|||
@LazyProperty
|
||||
def mm_config(self):
|
||||
return MMConfig(self.config_paths['media_monitor'])
|
||||
|
||||
# NOTE to future code monkeys:
|
||||
# I'm well aware that I'm using the shitty service locator pattern
|
||||
# instead of normal constructor injection as I should be. The reason
|
||||
# for this is that I found these issues a little too close to the
|
||||
# end of my tenure. It's highly recommended to rewrite this crap
|
||||
# using proper constructor injection if you ever have the time
|
||||
|
||||
@LazyProperty
|
||||
def owner(self): return Owner()
|
||||
|
||||
@LazyProperty
|
||||
def event_registry(self): return EventRegistry()
|
||||
|
||||
@LazyProperty
|
||||
def file_mediator(self): return FileMediator()
|
||||
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
import threading
|
||||
|
||||
class UserlessThread(Exception):
|
||||
def __str__():
|
||||
def __str__(self):
|
||||
return "Current thread: %s is not an instance of InstanceThread \
|
||||
of InstanceInheritingThread" % str(threading.current_thread())
|
||||
|
||||
class HasUser(object):
|
||||
def user(self): return self._user
|
||||
def assign_user(self): self._user = threading.current_thread().user()
|
||||
|
||||
class InstanceThread(threading.Thread, HasUser):
|
||||
def __init__(self,user, *args, **kwargs):
|
||||
|
@ -15,7 +16,7 @@ class InstanceThread(threading.Thread, HasUser):
|
|||
|
||||
class InstanceInheritingThread(threading.Thread, HasUser):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._user = threading.current_thread().user()
|
||||
self.assign_user()
|
||||
super(InstanceInheritingThread, self).__init__(*args, **kwargs)
|
||||
|
||||
def user():
|
||||
|
|
|
@ -254,29 +254,15 @@ def output_to(output_type, type, bitrate, host, port, pass, mount_point, url, de
|
|||
if user == "" then
|
||||
user_ref := "source"
|
||||
end
|
||||
|
||||
description_ref = ref description
|
||||
if description == "" then
|
||||
description_ref := "N/A"
|
||||
end
|
||||
|
||||
genre_ref = ref genre
|
||||
if genre == "" then
|
||||
genre_ref := "N/A"
|
||||
end
|
||||
|
||||
url_ref = ref url
|
||||
if url == "" then
|
||||
url_ref := "N/A"
|
||||
end
|
||||
|
||||
output.shoutcast_mono = output.shoutcast(id = "shoutcast_stream_#{stream}",
|
||||
host = host,
|
||||
port = port,
|
||||
password = pass,
|
||||
fallible = true,
|
||||
url = !url_ref,
|
||||
genre = !genre_ref,
|
||||
name = !description_ref,
|
||||
url = url,
|
||||
genre = genre,
|
||||
name = description,
|
||||
user = !user_ref,
|
||||
on_error = on_error,
|
||||
on_connect = on_connect)
|
||||
|
@ -286,9 +272,9 @@ def output_to(output_type, type, bitrate, host, port, pass, mount_point, url, de
|
|||
port = port,
|
||||
password = pass,
|
||||
fallible = true,
|
||||
url = !url_ref,
|
||||
genre = !genre_ref,
|
||||
name = !description_ref,
|
||||
url = url,
|
||||
genre = genre,
|
||||
name = description,
|
||||
user = !user_ref,
|
||||
on_error = on_error,
|
||||
on_connect = on_connect)
|
||||
|
@ -390,13 +376,6 @@ def add_skip_command(s)
|
|||
"skip",fun(s) -> begin log("source.skip") skip(s) end)
|
||||
end
|
||||
|
||||
dyn_out = output.icecast(%wav,
|
||||
host="localhost",
|
||||
port=8999,
|
||||
password=stream_harbor_pass,
|
||||
mount="test-harbor",
|
||||
fallible=true)
|
||||
|
||||
def set_dynamic_source_id(id) =
|
||||
current_dyn_id := id
|
||||
string_of(!current_dyn_id)
|
||||
|
@ -406,123 +385,159 @@ def get_dynamic_source_id() =
|
|||
string_of(!current_dyn_id)
|
||||
end
|
||||
|
||||
#cc-4633
|
||||
|
||||
# Function to create a playlist source and output it.
|
||||
def create_dynamic_source(uri) =
|
||||
# The playlist source
|
||||
s = audio_to_stereo(input.http(buffer=2., max=12., uri))
|
||||
|
||||
# The output
|
||||
active_dyn_out = dyn_out(s)
|
||||
# NOTE
|
||||
# A few values are hardcoded and may be dependent:
|
||||
# - the delay in gracetime is linked with the buffer duration of input.http
|
||||
# (delay should be a bit less than buffer)
|
||||
# - crossing duration should be less than buffer length
|
||||
# (at best, a higher duration will be ineffective)
|
||||
|
||||
# We register both source and output
|
||||
# in the list of sources
|
||||
dyn_sources :=
|
||||
list.append([(!current_dyn_id, s),(!current_dyn_id, active_dyn_out)], !dyn_sources)
|
||||
# HTTP input with "restart" command that waits for "stop" to be effected
|
||||
# before "start" command is issued. Optionally it takes a new URL to play,
|
||||
# which makes it a convenient replacement for "url".
|
||||
# In the future, this may become a core feature of the HTTP input.
|
||||
# TODO If we stop and restart quickly several times in a row,
|
||||
# the data bursts accumulate and create buffer overflow.
|
||||
# Flushing the buffer on restart could be a good idea, but
|
||||
# it would also create an interruptions while the buffer is
|
||||
# refilling... on the other hand, this would avoid having to
|
||||
# fade using both cross() and switch().
|
||||
def input.http_restart(~id,~initial_url="http://dummy/url")
|
||||
|
||||
source = input.http(buffer=5.,max=15.,id=id,autostart=false,initial_url)
|
||||
|
||||
def stopped()
|
||||
"stopped" == list.hd(server.execute("#{id}.status"))
|
||||
end
|
||||
|
||||
server.register(namespace=id,
|
||||
"restart",
|
||||
usage="restart [url]",
|
||||
fun (url) -> begin
|
||||
if url != "" then
|
||||
log(string_of(server.execute("#{id}.url #{url}")))
|
||||
end
|
||||
log(string_of(server.execute("#{id}.stop")))
|
||||
add_timeout(0.5,
|
||||
{ if stopped() then
|
||||
log(string_of(server.execute("#{id}.start"))) ;
|
||||
(-1.)
|
||||
else 0.5 end})
|
||||
"OK"
|
||||
end)
|
||||
|
||||
# Dummy output should be useless if HTTP stream is meant
|
||||
# to be listened to immediately. Otherwise, apply it.
|
||||
#
|
||||
# output.dummy(fallible=true,source)
|
||||
|
||||
source
|
||||
|
||||
notify([("schedule_table_id", !current_dyn_id)])
|
||||
"Done!"
|
||||
end
|
||||
|
||||
# Transitions between URL changes in HTTP streams.
|
||||
def cross_http(~debug=true,~http_input_id,source)
|
||||
|
||||
# A function to destroy a dynamic source
|
||||
def destroy_dynamic_source(id) =
|
||||
# We need to find the source in the list,
|
||||
# remove it and destroy it. Currently, the language
|
||||
# lacks some nice operators for that so we do it
|
||||
# the functional way
|
||||
id = http_input_id
|
||||
last_url = ref ""
|
||||
change = ref false
|
||||
|
||||
# This function is executed on every item in the list
|
||||
# of dynamic sources
|
||||
def parse_list(ret, current_element) =
|
||||
# ret is of the form: (matching_sources, remaining_sources)
|
||||
# We extract those two:
|
||||
matching_sources = fst(ret)
|
||||
remaining_sources = snd(ret)
|
||||
|
||||
# current_element is of the form: ("uri", source) so
|
||||
# we check the first element
|
||||
current_id = fst(current_element)
|
||||
if current_id == id then
|
||||
# In this case, we add the source to the list of
|
||||
# matched sources
|
||||
(list.append( [snd(current_element)],
|
||||
matching_sources),
|
||||
remaining_sources)
|
||||
else
|
||||
# In this case, we put the element in the list of remaining
|
||||
# sources
|
||||
(matching_sources,
|
||||
list.append([current_element],
|
||||
remaining_sources))
|
||||
def on_m(m)
|
||||
notify_stream(m)
|
||||
changed = m["source_url"] != !last_url
|
||||
log("URL now #{m['source_url']} (change: #{changed})")
|
||||
if changed then
|
||||
if !last_url != "" then change := true end
|
||||
last_url := m["source_url"]
|
||||
end
|
||||
end
|
||||
|
||||
# Now we execute the function:
|
||||
result = list.fold(parse_list, ([], []), !dyn_sources)
|
||||
matching_sources = fst(result)
|
||||
remaining_sources = snd(result)
|
||||
|
||||
# We store the remaining sources in dyn_sources
|
||||
dyn_sources := remaining_sources
|
||||
# We use both metadata and status to know about the current URL.
|
||||
# Using only metadata may be more precise is crazy corner cases,
|
||||
# but it's also asking too much: the metadata may not pass through
|
||||
# before the crosser is instantiated.
|
||||
# Using only status in crosser misses some info, eg. on first URL.
|
||||
source = on_metadata(on_m,source)
|
||||
|
||||
# If no source matched, we return an error
|
||||
if list.length(matching_sources) == 0 then
|
||||
"Error: no matching sources!"
|
||||
else
|
||||
# We stop all sources
|
||||
list.iter(source.shutdown, matching_sources)
|
||||
# And return
|
||||
"Done!"
|
||||
cross_d = 3.
|
||||
|
||||
def crosser(a,b)
|
||||
url = list.hd(server.execute('#{id}.url'))
|
||||
status = list.hd(server.execute('#{id}.status'))
|
||||
on_m([("source_url",url)])
|
||||
if debug then
|
||||
log("New track inside HTTP stream")
|
||||
log(" status: #{status}")
|
||||
log(" need to cross: #{!change}")
|
||||
log(" remaining #{source.remaining(a)} sec before, \
|
||||
#{source.remaining(b)} sec after")
|
||||
end
|
||||
if !change then
|
||||
change := false
|
||||
# In principle one should avoid crossing on a live stream
|
||||
# it'd be okay to do it here (eg. use add instead of sequence)
|
||||
# because it's only once per URL, but be cautious.
|
||||
sequence([fade.out(duration=cross_d,a),fade.in(b)])
|
||||
else
|
||||
# This is done on tracks inside a single stream.
|
||||
# Do NOT cross here or you'll gradually empty the buffer!
|
||||
sequence([a,b])
|
||||
end
|
||||
end
|
||||
|
||||
# Setting conservative=true would mess with the delayed switch below
|
||||
cross(duration=cross_d,conservative=false,crosser,source)
|
||||
|
||||
end
|
||||
|
||||
# Custom fallback between http and default source with fading of
|
||||
# beginning and end of HTTP stream.
|
||||
# It does not take potential URL changes into account, as long as
|
||||
# they do not interrupt streaming (thanks to the HTTP buffer).
|
||||
def http_fallback(~http_input_id,~http,~default)
|
||||
|
||||
id = http_input_id
|
||||
|
||||
# We use a custom switching predicate to trigger switching (and thus,
|
||||
# transitions) before the end of a track (rather, end of HTTP stream).
|
||||
# It is complexified because we don't want to trigger switching when
|
||||
# HTTP disconnects for just an instant, when changing URL: for that
|
||||
# we use gracetime below.
|
||||
|
||||
# A function to destroy a dynamic source
|
||||
def destroy_dynamic_source_all() =
|
||||
# We need to find the source in the list,
|
||||
# remove it and destroy it. Currently, the language
|
||||
# lacks some nice operators for that so we do it
|
||||
# the functional way
|
||||
|
||||
# This function is executed on every item in the list
|
||||
# of dynamic sources
|
||||
def parse_list(ret, current_element) =
|
||||
# ret is of the form: (matching_sources, remaining_sources)
|
||||
# We extract those two:
|
||||
matching_sources = fst(ret)
|
||||
remaining_sources = snd(ret)
|
||||
|
||||
# current_element is of the form: ("uri", source) so
|
||||
# we check the first element
|
||||
current_uri = fst(current_element)
|
||||
# in this case, we add the source to the list of
|
||||
# matched sources
|
||||
(list.append( [snd(current_element)],
|
||||
matching_sources),
|
||||
remaining_sources)
|
||||
def gracetime(~delay=3.,f)
|
||||
last_true = ref 0.
|
||||
{ if f() then
|
||||
last_true := gettimeofday()
|
||||
true
|
||||
else
|
||||
gettimeofday() < !last_true+delay
|
||||
end }
|
||||
end
|
||||
|
||||
# now we execute the function:
|
||||
result = list.fold(parse_list, ([], []), !dyn_sources)
|
||||
matching_sources = fst(result)
|
||||
remaining_sources = snd(result)
|
||||
|
||||
# we store the remaining sources in dyn_sources
|
||||
dyn_sources := remaining_sources
|
||||
|
||||
# if no source matched, we return an error
|
||||
if list.length(matching_sources) == 0 then
|
||||
"error: no matching sources!"
|
||||
else
|
||||
# we stop all sources
|
||||
list.iter(source.shutdown, matching_sources)
|
||||
# And return
|
||||
"Done!"
|
||||
def connected()
|
||||
status = list.hd(server.execute("#{id}.status"))
|
||||
not(list.mem(status,["polling","stopped"]))
|
||||
end
|
||||
connected = gracetime(connected)
|
||||
|
||||
def to_live(a,b) =
|
||||
log("TRANSITION to live")
|
||||
add(normalize=false,
|
||||
[fade.initial(b),fade.final(a)])
|
||||
end
|
||||
def to_static(a,b) =
|
||||
log("TRANSITION to static")
|
||||
sequence([fade.out(a),fade.initial(b)])
|
||||
end
|
||||
|
||||
switch(
|
||||
track_sensitive=false,
|
||||
transitions=[to_live,to_static],
|
||||
[(# make sure it is connected, and not buffering
|
||||
{connected() and source.is_ready(http) and !webstream_enabled}, http),
|
||||
({true},default)])
|
||||
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -7,11 +7,11 @@ set("server.telnet", true)
|
|||
set("server.telnet.port", 1234)
|
||||
|
||||
#Dynamic source list
|
||||
dyn_sources = ref []
|
||||
#dyn_sources = ref []
|
||||
webstream_enabled = ref false
|
||||
|
||||
time = ref string_of(gettimeofday())
|
||||
queue = audio_to_stereo(id="queue_src", request.equeue(id="queue", length=0.5))
|
||||
queue = audio_to_stereo(id="queue_src", mksafe(request.equeue(id="queue", length=0.5)))
|
||||
queue = cue_cut(queue)
|
||||
queue = amplify(1., override="replay_gain", queue)
|
||||
|
||||
|
@ -35,15 +35,19 @@ s2_namespace = ref ''
|
|||
s3_namespace = ref ''
|
||||
just_switched = ref false
|
||||
|
||||
stream_harbor_pass = list.hd(get_process_lines('pwgen -s -N 1 -n 20'))
|
||||
#stream_harbor_pass = list.hd(get_process_lines('pwgen -s -N 1 -n 20'))
|
||||
|
||||
%include "ls_lib.liq"
|
||||
|
||||
web_stream = input.harbor("test-harbor", port=8999, password=stream_harbor_pass)
|
||||
web_stream = on_metadata(notify_stream, web_stream)
|
||||
output.dummy(fallible=true, web_stream)
|
||||
#web_stream = input.harbor("test-harbor", port=8999, password=stream_harbor_pass)
|
||||
#web_stream = on_metadata(notify_stream, web_stream)
|
||||
#output.dummy(fallible=true, web_stream)
|
||||
|
||||
|
||||
http = input.http_restart(id="http")
|
||||
http = cross_http(http_input_id="http",http)
|
||||
stream_queue = http_fallback(http_input_id="http",http=http,default=queue)
|
||||
|
||||
# the crossfade function controls fade in/out
|
||||
queue = crossfade_airtime(queue)
|
||||
queue = on_metadata(notify, queue)
|
||||
|
@ -51,10 +55,10 @@ queue = map_metadata(update=false, append_title, queue)
|
|||
output.dummy(fallible=true, queue)
|
||||
|
||||
|
||||
stream_queue = switch(id="stream_queue_switch", track_sensitive=false,
|
||||
transitions=[transition, transition],
|
||||
[({!webstream_enabled},web_stream),
|
||||
({true}, queue)])
|
||||
#stream_queue = switch(id="stream_queue_switch", track_sensitive=false,
|
||||
# transitions=[transition, transition],
|
||||
# [({!webstream_enabled},web_stream),
|
||||
# ({true}, queue)])
|
||||
|
||||
ignore(output.dummy(stream_queue, fallible=true))
|
||||
|
||||
|
@ -92,32 +96,33 @@ server.register(namespace="dynamic_source",
|
|||
fun (s) -> begin log("dynamic_source.output_stop") webstream_enabled := false "disabled" end)
|
||||
|
||||
server.register(namespace="dynamic_source",
|
||||
description="Set the cc_schedule row id",
|
||||
description="Set the streams cc_schedule row id",
|
||||
usage="id <id>",
|
||||
"id",
|
||||
fun (s) -> begin log("dynamic_source.id") set_dynamic_source_id(s) end)
|
||||
|
||||
server.register(namespace="dynamic_source",
|
||||
description="Get the cc_schedule row id",
|
||||
description="Get the streams cc_schedule row id",
|
||||
usage="get_id",
|
||||
"get_id",
|
||||
fun (s) -> begin log("dynamic_source.get_id") get_dynamic_source_id() end)
|
||||
|
||||
server.register(namespace="dynamic_source",
|
||||
description="Start a new dynamic source.",
|
||||
usage="start <uri>",
|
||||
"read_start",
|
||||
fun (uri) -> begin log("dynamic_source.read_start") create_dynamic_source(uri) end)
|
||||
server.register(namespace="dynamic_source",
|
||||
description="Stop a dynamic source.",
|
||||
usage="stop <id>",
|
||||
"read_stop",
|
||||
fun (s) -> begin log("dynamic_source.read_stop") destroy_dynamic_source(s) end)
|
||||
server.register(namespace="dynamic_source",
|
||||
description="Stop a dynamic source.",
|
||||
usage="stop <id>",
|
||||
"read_stop_all",
|
||||
fun (s) -> begin log("dynamic_source.read_stop") destroy_dynamic_source_all() end)
|
||||
#server.register(namespace="dynamic_source",
|
||||
# description="Start a new dynamic source.",
|
||||
# usage="start <uri>",
|
||||
# "read_start",
|
||||
# fun (uri) -> begin log("dynamic_source.read_start") begin_stream_read(uri) end)
|
||||
#server.register(namespace="dynamic_source",
|
||||
# description="Stop a dynamic source.",
|
||||
# usage="stop <id>",
|
||||
# "read_stop",
|
||||
# fun (s) -> begin log("dynamic_source.read_stop") stop_stream_read(s) end)
|
||||
|
||||
#server.register(namespace="dynamic_source",
|
||||
# description="Stop a dynamic source.",
|
||||
# usage="stop <id>",
|
||||
# "read_stop_all",
|
||||
# fun (s) -> begin log("dynamic_source.read_stop") destroy_dynamic_source_all() end)
|
||||
|
||||
default = amplify(id="silence_src", 0.00001, noise())
|
||||
default = rewrite_metadata([("artist","Airtime"), ("title", "offline")], default)
|
||||
|
|
|
@ -478,8 +478,8 @@ class PypoPush(Thread):
|
|||
self.logger.debug(msg)
|
||||
tn.write(msg)
|
||||
|
||||
#example: dynamic_source.read_start http://87.230.101.24:80/top100station.mp3
|
||||
msg = 'dynamic_source.read_start %s\n' % media_item['uri'].encode('latin-1')
|
||||
#msg = 'dynamic_source.read_start %s\n' % media_item['uri'].encode('latin-1')
|
||||
msg = 'http.restart %s\n' % media_item['uri'].encode('latin-1')
|
||||
self.logger.debug(msg)
|
||||
tn.write(msg)
|
||||
|
||||
|
@ -520,7 +520,8 @@ class PypoPush(Thread):
|
|||
self.telnet_lock.acquire()
|
||||
tn = telnetlib.Telnet(LS_HOST, LS_PORT)
|
||||
|
||||
msg = 'dynamic_source.read_stop_all xxx\n'
|
||||
#msg = 'dynamic_source.read_stop_all xxx\n'
|
||||
msg = 'http.stop\n'
|
||||
self.logger.debug(msg)
|
||||
tn.write(msg)
|
||||
|
||||
|
@ -546,7 +547,8 @@ class PypoPush(Thread):
|
|||
tn = telnetlib.Telnet(LS_HOST, LS_PORT)
|
||||
#dynamic_source.stop http://87.230.101.24:80/top100station.mp3
|
||||
|
||||
msg = 'dynamic_source.read_stop %s\n' % media_item['row_id']
|
||||
#msg = 'dynamic_source.read_stop %s\n' % media_item['row_id']
|
||||
msg = 'http.stop\n'
|
||||
self.logger.debug(msg)
|
||||
tn.write(msg)
|
||||
|
||||
|
|
|
@ -8,6 +8,9 @@ import json
|
|||
import shutil
|
||||
import commands
|
||||
|
||||
sys.path.append('/usr/lib/airtime/media-monitor/mm2/')
|
||||
from media.monitor.pure import is_file_supported
|
||||
|
||||
# create logger
|
||||
logger = logging.getLogger()
|
||||
|
||||
|
@ -53,8 +56,7 @@ def copy_or_move_files_to(paths, dest, flag):
|
|||
copy_or_move_files_to(sub_path, dest, flag)
|
||||
elif(os.path.isfile(path)):
|
||||
#copy file to dest
|
||||
ext = os.path.splitext(path)[1]
|
||||
if( 'mp3' in ext or 'ogg' in ext ):
|
||||
if(is_file_supported(path)):
|
||||
destfile = dest+os.path.basename(path)
|
||||
if(flag == 'copy'):
|
||||
print "Copying %(src)s to %(dest)s..." % {'src':path, 'dest':destfile}
|
||||
|
@ -159,7 +161,7 @@ def WatchAddAction(option, opt, value, parser):
|
|||
path = currentDir+path
|
||||
path = apc.encode_to(path, 'utf-8')
|
||||
if(os.path.isdir(path)):
|
||||
os.chmod(path, 0765)
|
||||
#os.chmod(path, 0765)
|
||||
res = api_client.add_watched_dir(path)
|
||||
if(res is None):
|
||||
exit("Unable to connect to the server.")
|
||||
|
|
|
@ -27,14 +27,14 @@ def printUsage():
|
|||
print " -m mount (default: test) "
|
||||
print " -h show help menu"
|
||||
|
||||
|
||||
|
||||
def find_liquidsoap_binary():
|
||||
"""
|
||||
Starting with Airtime 2.0, we don't know the exact location of the Liquidsoap
|
||||
binary because it may have been installed through a debian package. Let's find
|
||||
the location of this binary.
|
||||
"""
|
||||
|
||||
|
||||
rv = subprocess.call("which airtime-liquidsoap > /dev/null", shell=True)
|
||||
if rv == 0:
|
||||
return "airtime-liquidsoap"
|
||||
|
@ -78,7 +78,7 @@ for o, a in optlist:
|
|||
mount = a
|
||||
|
||||
try:
|
||||
|
||||
|
||||
print "Protocol: %s " % stream_type
|
||||
print "Host: %s" % host
|
||||
print "Port: %s" % port
|
||||
|
@ -86,35 +86,35 @@ try:
|
|||
print "Password: %s" % password
|
||||
if stream_type == "icecast":
|
||||
print "Mount: %s\n" % mount
|
||||
|
||||
|
||||
url = "http://%s:%s/%s" % (host, port, mount)
|
||||
print "Outputting to %s streaming server. You should be able to hear a monotonous tone on '%s'. Press ctrl-c to quit." % (stream_type, url)
|
||||
|
||||
|
||||
liquidsoap_exe = find_liquidsoap_binary()
|
||||
|
||||
|
||||
if liquidsoap_exe is None:
|
||||
raise Exception("Liquidsoap not found!")
|
||||
|
||||
|
||||
if stream_type == "icecast":
|
||||
command = "%s 'output.icecast(%%vorbis, host = \"%s\", port = %s, user= \"%s\", password = \"%s\", mount=\"%s\", sine())'" % (liquidsoap_exe, host, port, user, password, mount)
|
||||
else:
|
||||
command = "%s /usr/lib/airtime/pypo/bin/liquidsoap_scripts/library/pervasives.liq 'output.shoutcast(%%mp3, host=\"%s\", port = %s, user= \"%s\", password = \"%s\", sine())'" \
|
||||
% (liquidsoap_exe, host, port, user, password)
|
||||
|
||||
|
||||
if not verbose:
|
||||
command += " 2>/dev/null | grep \"failed\""
|
||||
else:
|
||||
print command
|
||||
|
||||
|
||||
#print command
|
||||
rv = subprocess.call(command, shell=True)
|
||||
|
||||
|
||||
#if we reach this point, it means that our subprocess exited without the user
|
||||
#doing a keyboard interrupt. This means there was a problem outputting to the
|
||||
#doing a keyboard interrupt. This means there was a problem outputting to the
|
||||
#stream server. Print appropriate message.
|
||||
print "There was an error with your stream configuration. Please review your configuration " + \
|
||||
"and run this program again. Use the -h option for help"
|
||||
|
||||
|
||||
except KeyboardInterrupt, ki:
|
||||
print "\nExiting"
|
||||
except Exception, e:
|
||||
|
|
Loading…
Reference in New Issue