Merge branch 'cc-3336-refactor-pypo' into devel
This commit is contained in:
commit
057e377c5f
|
@ -276,17 +276,19 @@ class ApiController extends Zend_Controller_Action
|
|||
|
||||
$api_key = $this->_getParam('api_key');
|
||||
|
||||
/*
|
||||
if(!in_array($api_key, $CC_CONFIG["apiKey"]))
|
||||
{
|
||||
header('HTTP/1.0 401 Unauthorized');
|
||||
print 'You are not allowed to access this resource. ';
|
||||
exit;
|
||||
}
|
||||
* */
|
||||
|
||||
PEAR::setErrorHandling(PEAR_ERROR_RETURN);
|
||||
|
||||
$result = Application_Model_Schedule::GetScheduledPlaylists();
|
||||
echo json_encode($result);
|
||||
$data = Application_Model_Schedule::GetScheduledPlaylists();
|
||||
echo json_encode($data, JSON_FORCE_OBJECT);
|
||||
}
|
||||
|
||||
public function notifyMediaItemStartPlayAction()
|
||||
|
|
|
@ -46,87 +46,6 @@ class Application_Model_Schedule {
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns array indexed by:
|
||||
* "playlistId"/"playlist_id" (aliases to the same thing)
|
||||
* "start"/"starts" (aliases to the same thing) as YYYY-MM-DD HH:MM:SS.nnnnnn
|
||||
* "end"/"ends" (aliases to the same thing) as YYYY-MM-DD HH:MM:SS.nnnnnn
|
||||
* "group_id"/"id" (aliases to the same thing)
|
||||
* "clip_length" (for audio clips this is the length of the audio clip,
|
||||
* for playlists this is the length of the entire playlist)
|
||||
* "name" (playlist only)
|
||||
* "creator" (playlist only)
|
||||
* "file_id" (audioclip only)
|
||||
* "count" (number of items in the playlist, always 1 for audioclips.
|
||||
* Note that playlists with one item will also have count = 1.
|
||||
*
|
||||
* @param string $p_fromDateTime
|
||||
* In the format YYYY-MM-DD HH:MM:SS.nnnnnn
|
||||
* @param string $p_toDateTime
|
||||
* In the format YYYY-MM-DD HH:MM:SS.nnnnnn
|
||||
* @param boolean $p_playlistsOnly
|
||||
* Retrieve playlists as a single item.
|
||||
* @return array
|
||||
* Returns empty array if nothing found
|
||||
*/
|
||||
|
||||
public static function GetItems($p_currentDateTime, $p_toDateTime, $p_playlistsOnly = true)
|
||||
{
|
||||
global $CC_CONFIG, $CC_DBC;
|
||||
$rows = array();
|
||||
if (!$p_playlistsOnly) {
|
||||
$sql = "SELECT * FROM ".$CC_CONFIG["scheduleTable"]
|
||||
." WHERE (starts >= TIMESTAMP '$p_currentDateTime') "
|
||||
." AND (ends <= TIMESTAMP '$p_toDateTime')";
|
||||
$rows = $CC_DBC->GetAll($sql);
|
||||
foreach ($rows as &$row) {
|
||||
$row["count"] = "1";
|
||||
$row["playlistId"] = $row["playlist_id"];
|
||||
$row["start"] = $row["starts"];
|
||||
$row["end"] = $row["ends"];
|
||||
$row["id"] = $row["group_id"];
|
||||
}
|
||||
} else {
|
||||
$sql = "SELECT MIN(pt.creator) AS creator,"
|
||||
." st.group_id,"
|
||||
." SUM(st.clip_length) AS clip_length,"
|
||||
." MIN(st.file_id) AS file_id,"
|
||||
." COUNT(*) as count,"
|
||||
." MIN(st.playlist_id) AS playlist_id,"
|
||||
." MIN(st.starts) AS starts,"
|
||||
." MAX(st.ends) AS ends,"
|
||||
." MIN(sh.name) AS show_name,"
|
||||
." MIN(si.starts) AS show_start,"
|
||||
." MAX(si.ends) AS show_end"
|
||||
." FROM $CC_CONFIG[scheduleTable] as st"
|
||||
." LEFT JOIN $CC_CONFIG[playListTable] as pt"
|
||||
." ON st.playlist_id = pt.id"
|
||||
." LEFT JOIN $CC_CONFIG[showInstances] as si"
|
||||
." ON st.instance_id = si.id"
|
||||
." LEFT JOIN $CC_CONFIG[showTable] as sh"
|
||||
." ON si.show_id = sh.id"
|
||||
//The next line ensures we only get songs that haven't ended yet
|
||||
." WHERE (st.ends >= TIMESTAMP '$p_currentDateTime')"
|
||||
." AND (st.ends <= TIMESTAMP '$p_toDateTime')"
|
||||
//next line makes sure that we aren't returning items that
|
||||
//are past the show's scheduled timeslot.
|
||||
." AND (st.starts < si.ends)"
|
||||
." GROUP BY st.group_id"
|
||||
." ORDER BY starts";
|
||||
|
||||
$rows = $CC_DBC->GetAll($sql);
|
||||
if (!PEAR::isError($rows)) {
|
||||
foreach ($rows as &$row) {
|
||||
$row["playlistId"] = $row["playlist_id"];
|
||||
$row["start"] = $row["starts"];
|
||||
$row["end"] = $row["ends"];
|
||||
$row["id"] = $row["group_id"];
|
||||
}
|
||||
}
|
||||
}
|
||||
return $rows;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns data related to the scheduled items.
|
||||
*
|
||||
|
@ -475,7 +394,115 @@ class Application_Model_Schedule {
|
|||
}
|
||||
return $diff;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns array indexed by:
|
||||
* "playlistId"/"playlist_id" (aliases to the same thing)
|
||||
* "start"/"starts" (aliases to the same thing) as YYYY-MM-DD HH:MM:SS.nnnnnn
|
||||
* "end"/"ends" (aliases to the same thing) as YYYY-MM-DD HH:MM:SS.nnnnnn
|
||||
* "group_id"/"id" (aliases to the same thing)
|
||||
* "clip_length" (for audio clips this is the length of the audio clip,
|
||||
* for playlists this is the length of the entire playlist)
|
||||
* "name" (playlist only)
|
||||
* "creator" (playlist only)
|
||||
* "file_id" (audioclip only)
|
||||
* "count" (number of items in the playlist, always 1 for audioclips.
|
||||
* Note that playlists with one item will also have count = 1.
|
||||
*
|
||||
* @param string $p_fromDateTime
|
||||
* In the format YYYY-MM-DD HH:MM:SS.nnnnnn
|
||||
* @param string $p_toDateTime
|
||||
* In the format YYYY-MM-DD HH:MM:SS.nnnnnn
|
||||
* @param boolean $p_playlistsOnly
|
||||
* Retrieve playlists as a single item.
|
||||
* @return array
|
||||
* Returns null if nothing found
|
||||
*/
|
||||
public static function GetItems($p_currentDateTime, $p_toDateTime) {
|
||||
global $CC_CONFIG, $CC_DBC;
|
||||
$rows = array();
|
||||
|
||||
$sql = "SELECT st.file_id AS file_id,"
|
||||
." st.starts AS start,"
|
||||
." st.ends AS end,"
|
||||
." si.starts as show_start,"
|
||||
." si.ends as show_end"
|
||||
." FROM $CC_CONFIG[scheduleTable] as st"
|
||||
." LEFT JOIN $CC_CONFIG[showInstances] as si"
|
||||
." ON st.instance_id = si.id"
|
||||
." ORDER BY start";
|
||||
|
||||
Logging::log($sql);
|
||||
|
||||
$rows = $CC_DBC->GetAll($sql);
|
||||
if (PEAR::isError($rows)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return $rows;
|
||||
}
|
||||
|
||||
public static function GetScheduledPlaylists($p_fromDateTime = null, $p_toDateTime = null){
|
||||
|
||||
global $CC_CONFIG, $CC_DBC;
|
||||
|
||||
/* if $p_fromDateTime and $p_toDateTime function parameters are null, then set range
|
||||
* from "now" to "now + 24 hours". */
|
||||
if (is_null($p_fromDateTime)) {
|
||||
$t1 = new DateTime("@".time());
|
||||
$range_start = $t1->format("Y-m-d H:i:s");
|
||||
} else {
|
||||
$range_start = Application_Model_Schedule::PypoTimeToAirtimeTime($p_fromDateTime);
|
||||
}
|
||||
if (is_null($p_fromDateTime)) {
|
||||
$t2 = new DateTime("@".time());
|
||||
$t2->add(new DateInterval("PT24H"));
|
||||
$range_end = $t2->format("Y-m-d H:i:s");
|
||||
} else {
|
||||
$range_end = Application_Model_Schedule::PypoTimeToAirtimeTime($p_toDateTime);
|
||||
}
|
||||
|
||||
// Scheduler wants everything in a playlist
|
||||
$items = Application_Model_Schedule::GetItems($range_start, $range_end);
|
||||
|
||||
$data = array();
|
||||
$utcTimeZone = new DateTimeZone("UTC");
|
||||
|
||||
$data["status"] = array();
|
||||
$data["media"] = array();
|
||||
|
||||
foreach ($items as $item){
|
||||
|
||||
$storedFile = Application_Model_StoredFile::Recall($item["file_id"]);
|
||||
$uri = $storedFile->getFileUrlUsingConfigAddress();
|
||||
|
||||
$showEndDateTime = new DateTime($item["show_end"], $utcTimeZone);
|
||||
$trackEndDateTime = new DateTime($item["end"], $utcTimeZone);
|
||||
|
||||
/* Note: cue_out and end are always the same. */
|
||||
/* TODO: Not all tracks will have "show_end" */
|
||||
|
||||
if ($trackEndDateTime->getTimestamp() > $showEndDateTime->getTimestamp()){
|
||||
$diff = $trackEndDateTime->getTimestamp() - $showEndDateTime->getTimestamp();
|
||||
//assuming ends takes cue_out into assumption
|
||||
$item["cue_out"] = $item["cue_out"] - $diff;
|
||||
}
|
||||
|
||||
$start = Application_Model_Schedule::AirtimeTimeToPypoTime($item["start"]);
|
||||
$data["media"][$start] = array(
|
||||
'id' => $storedFile->getGunid(),
|
||||
'uri' => $uri,
|
||||
'fade_in' => Application_Model_Schedule::WallTimeToMillisecs($item["fade_in"]),
|
||||
'fade_out' => Application_Model_Schedule::WallTimeToMillisecs($item["fade_out"]),
|
||||
'cue_in' => Application_Model_DateHelper::CalculateLengthInSeconds($item["cue_in"]),
|
||||
'cue_out' => Application_Model_DateHelper::CalculateLengthInSeconds($item["cue_out"]),
|
||||
'start' => $start,
|
||||
'end' => Application_Model_Schedule::AirtimeTimeToPypoTime($item["end"])
|
||||
);
|
||||
}
|
||||
|
||||
return $data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Export the schedule in json formatted for pypo (the liquidsoap scheduler)
|
||||
|
@ -485,7 +512,7 @@ class Application_Model_Schedule {
|
|||
* @param string $p_toDateTime
|
||||
* In the format "YYYY-MM-DD-HH-mm-SS"
|
||||
*/
|
||||
public static function GetScheduledPlaylists($p_fromDateTime = null, $p_toDateTime = null)
|
||||
public static function GetScheduledPlaylistsOld($p_fromDateTime = null, $p_toDateTime = null)
|
||||
{
|
||||
global $CC_CONFIG, $CC_DBC;
|
||||
|
||||
|
@ -546,7 +573,6 @@ class Application_Model_Schedule {
|
|||
|
||||
$starts = Application_Model_Schedule::AirtimeTimeToPypoTime($item["starts"]);
|
||||
$medias[$starts] = array(
|
||||
'row_id' => $item["id"],
|
||||
'id' => $storedFile->getGunid(),
|
||||
'uri' => $uri,
|
||||
'fade_in' => Application_Model_Schedule::WallTimeToMillisecs($item["fade_in"]),
|
||||
|
@ -554,7 +580,6 @@ class Application_Model_Schedule {
|
|||
'fade_cross' => 0,
|
||||
'cue_in' => Application_Model_DateHelper::CalculateLengthInSeconds($item["cue_in"]),
|
||||
'cue_out' => Application_Model_DateHelper::CalculateLengthInSeconds($item["cue_out"]),
|
||||
'export_source' => 'scheduler',
|
||||
'start' => $starts,
|
||||
'end' => Application_Model_Schedule::AirtimeTimeToPypoTime($item["ends"])
|
||||
);
|
||||
|
|
|
@ -261,15 +261,15 @@ class AirTimeApiClient(ApiClientInterface):
|
|||
export_url = export_url.replace('%%api_key%%', self.config["api_key"])
|
||||
|
||||
response = ""
|
||||
status = 0
|
||||
try:
|
||||
response_json = self.get_response_from_server(export_url)
|
||||
response = json.loads(response_json)
|
||||
status = response['check']
|
||||
success = True
|
||||
except Exception, e:
|
||||
logger.error(e)
|
||||
success = False
|
||||
|
||||
return status, response
|
||||
return success, response
|
||||
|
||||
|
||||
def get_media(self, uri, dst):
|
||||
|
|
|
@ -56,23 +56,16 @@ except Exception, e:
|
|||
class Global:
|
||||
def __init__(self):
|
||||
self.api_client = api_client.api_client_factory(config)
|
||||
self.set_export_source('scheduler')
|
||||
|
||||
def selfcheck(self):
|
||||
self.api_client = api_client.api_client_factory(config)
|
||||
return self.api_client.is_server_compatible()
|
||||
|
||||
def set_export_source(self, export_source):
|
||||
self.export_source = export_source
|
||||
self.cache_dir = config["cache_dir"] + self.export_source + '/'
|
||||
self.schedule_file = self.cache_dir + 'schedule.pickle'
|
||||
self.schedule_tracker_file = self.cache_dir + "schedule_tracker.pickle"
|
||||
|
||||
def test_api(self):
|
||||
self.api_client.test()
|
||||
|
||||
"""
|
||||
def check_schedule(self, export_source):
|
||||
def check_schedule(self):
|
||||
logger = logging.getLogger()
|
||||
|
||||
try:
|
||||
|
|
|
@ -39,12 +39,28 @@ class PypoFetch(Thread):
|
|||
def __init__(self, pypoFetch_q, pypoPush_q):
|
||||
Thread.__init__(self)
|
||||
self.api_client = api_client.api_client_factory(config)
|
||||
self.set_export_source('scheduler')
|
||||
self.fetch_queue = pypoFetch_q
|
||||
self.push_queue = pypoPush_q
|
||||
|
||||
self.logger = logging.getLogger();
|
||||
|
||||
self.cache_dir = os.path.join(config["cache_dir"], "scheduler")
|
||||
logger.debug("Cache dir %s", self.cache_dir)
|
||||
|
||||
try:
|
||||
if not os.path.isdir(dir):
|
||||
"""
|
||||
We get here if path does not exist, or path does exist but
|
||||
is a file. We are not handling the second case, but don't
|
||||
think we actually care about handling it.
|
||||
"""
|
||||
logger.debug("Cache dir does not exist. Creating...")
|
||||
os.makedirs(dir)
|
||||
except Exception, e:
|
||||
pass
|
||||
|
||||
self.schedule_data = []
|
||||
logger = logging.getLogger('fetch')
|
||||
logger.info("PypoFetch: init complete")
|
||||
self.logger.info("PypoFetch: init complete")
|
||||
|
||||
"""
|
||||
Handle a message from RabbitMQ, put it into our yucky global var.
|
||||
|
@ -52,53 +68,51 @@ class PypoFetch(Thread):
|
|||
"""
|
||||
def handle_message(self, message):
|
||||
try:
|
||||
logger = logging.getLogger('fetch')
|
||||
logger.info("Received event from Pypo Message Handler: %s" % message)
|
||||
self.logger.info("Received event from Pypo Message Handler: %s" % message)
|
||||
|
||||
m = json.loads(message)
|
||||
command = m['event_type']
|
||||
logger.info("Handling command: " + command)
|
||||
self.logger.info("Handling command: " + command)
|
||||
|
||||
if command == 'update_schedule':
|
||||
self.schedule_data = m['schedule']
|
||||
self.process_schedule(self.schedule_data, "scheduler", False)
|
||||
self.process_schedule(self.schedule_data, False)
|
||||
elif command == 'update_stream_setting':
|
||||
logger.info("Updating stream setting...")
|
||||
self.logger.info("Updating stream setting...")
|
||||
self.regenerateLiquidsoapConf(m['setting'])
|
||||
elif command == 'update_stream_format':
|
||||
logger.info("Updating stream format...")
|
||||
self.logger.info("Updating stream format...")
|
||||
self.update_liquidsoap_stream_format(m['stream_format'])
|
||||
elif command == 'update_station_name':
|
||||
logger.info("Updating station name...")
|
||||
self.logger.info("Updating station name...")
|
||||
self.update_liquidsoap_station_name(m['station_name'])
|
||||
elif command == 'cancel_current_show':
|
||||
logger.info("Cancel current show command received...")
|
||||
self.logger.info("Cancel current show command received...")
|
||||
self.stop_current_show()
|
||||
except Exception, e:
|
||||
import traceback
|
||||
top = traceback.format_exc()
|
||||
logger.error('Exception: %s', e)
|
||||
logger.error("traceback: %s", top)
|
||||
logger.error("Exception in handling Message Handler message: %s", e)
|
||||
self.logger.error('Exception: %s', e)
|
||||
self.logger.error("traceback: %s", top)
|
||||
self.logger.error("Exception in handling Message Handler message: %s", e)
|
||||
|
||||
|
||||
def stop_current_show(self):
|
||||
logger = logging.getLogger('fetch')
|
||||
logger.debug('Notifying Liquidsoap to stop playback.')
|
||||
self.logger.debug('Notifying Liquidsoap to stop playback.')
|
||||
try:
|
||||
tn = telnetlib.Telnet(LS_HOST, LS_PORT)
|
||||
tn.write('source.skip\n')
|
||||
tn.write('exit\n')
|
||||
tn.read_all()
|
||||
except Exception, e:
|
||||
logger.debug(e)
|
||||
logger.debug('Could not connect to liquidsoap')
|
||||
self.logger.debug(e)
|
||||
self.logger.debug('Could not connect to liquidsoap')
|
||||
|
||||
def regenerateLiquidsoapConf(self, setting):
|
||||
logger = logging.getLogger('fetch')
|
||||
existing = {}
|
||||
# create a temp file
|
||||
fh = open('/etc/airtime/liquidsoap.cfg', 'r')
|
||||
logger.info("Reading existing config...")
|
||||
self.logger.info("Reading existing config...")
|
||||
# read existing conf file and build dict
|
||||
while 1:
|
||||
line = fh.readline()
|
||||
|
@ -128,7 +142,7 @@ class PypoFetch(Thread):
|
|||
#restart flag
|
||||
restart = False
|
||||
|
||||
logger.info("Looking for changes...")
|
||||
self.logger.info("Looking for changes...")
|
||||
# look for changes
|
||||
for s in setting:
|
||||
if "output_sound_device" in s[u'keyname'] or "icecast_vorbis_metadata" in s[u'keyname']:
|
||||
|
@ -136,13 +150,13 @@ class PypoFetch(Thread):
|
|||
state_change_restart[stream] = False
|
||||
# This is the case where restart is required no matter what
|
||||
if (existing[s[u'keyname']] != s[u'value']):
|
||||
logger.info("'Need-to-restart' state detected for %s...", s[u'keyname'])
|
||||
self.logger.info("'Need-to-restart' state detected for %s...", s[u'keyname'])
|
||||
restart = True;
|
||||
else:
|
||||
stream, dump = s[u'keyname'].split('_',1)
|
||||
if "_output" in s[u'keyname']:
|
||||
if (existing[s[u'keyname']] != s[u'value']):
|
||||
logger.info("'Need-to-restart' state detected for %s...", s[u'keyname'])
|
||||
self.logger.info("'Need-to-restart' state detected for %s...", s[u'keyname'])
|
||||
restart = True;
|
||||
state_change_restart[stream] = True
|
||||
elif ( s[u'value'] != 'disabled'):
|
||||
|
@ -154,22 +168,22 @@ class PypoFetch(Thread):
|
|||
if stream not in change:
|
||||
change[stream] = False
|
||||
if not (s[u'value'] == existing[s[u'keyname']]):
|
||||
logger.info("Keyname: %s, Curent value: %s, New Value: %s", s[u'keyname'], existing[s[u'keyname']], s[u'value'])
|
||||
self.logger.info("Keyname: %s, Curent value: %s, New Value: %s", s[u'keyname'], existing[s[u'keyname']], s[u'value'])
|
||||
change[stream] = True
|
||||
|
||||
# set flag change for sound_device alway True
|
||||
logger.info("Change:%s, State_Change:%s...", change, state_change_restart)
|
||||
self.logger.info("Change:%s, State_Change:%s...", change, state_change_restart)
|
||||
|
||||
for k, v in state_change_restart.items():
|
||||
if k == "sound_device" and v:
|
||||
restart = True
|
||||
elif v and change[k]:
|
||||
logger.info("'Need-to-restart' state detected for %s...", k)
|
||||
self.logger.info("'Need-to-restart' state detected for %s...", k)
|
||||
restart = True
|
||||
# rewrite
|
||||
if restart:
|
||||
fh = open('/etc/airtime/liquidsoap.cfg', 'w')
|
||||
logger.info("Rewriting liquidsoap.cfg...")
|
||||
self.logger.info("Rewriting liquidsoap.cfg...")
|
||||
fh.write("################################################\n")
|
||||
fh.write("# THIS FILE IS AUTO GENERATED. DO NOT CHANGE!! #\n")
|
||||
fh.write("################################################\n")
|
||||
|
@ -191,17 +205,17 @@ class PypoFetch(Thread):
|
|||
fh.close()
|
||||
# restarting pypo.
|
||||
# we could just restart liquidsoap but it take more time somehow.
|
||||
logger.info("Restarting pypo...")
|
||||
self.logger.info("Restarting pypo...")
|
||||
sys.exit(0)
|
||||
else:
|
||||
logger.info("No change detected in setting...")
|
||||
self.logger.info("No change detected in setting...")
|
||||
self.update_liquidsoap_connection_status()
|
||||
"""
|
||||
def update_liquidsoap_connection_status(self):
|
||||
"""
|
||||
updates the status of liquidsoap connection to the streaming server
|
||||
This fucntion updates the bootup time variable in liquidsoap script
|
||||
"""
|
||||
def update_liquidsoap_connection_status(self):
|
||||
logger = logging.getLogger('fetch')
|
||||
tn = telnetlib.Telnet(LS_HOST, LS_PORT)
|
||||
# update the boot up time of liquidsoap. Since liquidsoap is not restarting,
|
||||
# we are manually adjusting the bootup time variable so the status msg will get
|
||||
|
@ -219,7 +233,7 @@ class PypoFetch(Thread):
|
|||
# streamin info is in the form of:
|
||||
# eg. s1:true,2:true,3:false
|
||||
streams = stream_info.split(",")
|
||||
logger.info(streams)
|
||||
self.logger.info(streams)
|
||||
|
||||
fake_time = current_time + 1
|
||||
for s in streams:
|
||||
|
@ -229,46 +243,35 @@ class PypoFetch(Thread):
|
|||
if(status == "true"):
|
||||
self.api_client.notify_liquidsoap_status("OK", stream_id, str(fake_time))
|
||||
|
||||
|
||||
|
||||
def set_export_source(self, export_source):
|
||||
logger = logging.getLogger('fetch')
|
||||
self.export_source = export_source
|
||||
self.cache_dir = config["cache_dir"] + self.export_source + '/'
|
||||
logger.info("Creating cache directory at %s", self.cache_dir)
|
||||
|
||||
|
||||
def update_liquidsoap_stream_format(self, stream_format):
|
||||
# Push stream metadata to liquidsoap
|
||||
# TODO: THIS LIQUIDSOAP STUFF NEEDS TO BE MOVED TO PYPO-PUSH!!!
|
||||
try:
|
||||
logger = logging.getLogger('fetch')
|
||||
logger.info(LS_HOST)
|
||||
logger.info(LS_PORT)
|
||||
self.logger.info(LS_HOST)
|
||||
self.logger.info(LS_PORT)
|
||||
tn = telnetlib.Telnet(LS_HOST, LS_PORT)
|
||||
command = ('vars.stream_metadata_type %s\n' % stream_format).encode('utf-8')
|
||||
logger.info(command)
|
||||
self.logger.info(command)
|
||||
tn.write(command)
|
||||
tn.write('exit\n')
|
||||
tn.read_all()
|
||||
except Exception, e:
|
||||
logger.error("Exception %s", e)
|
||||
self.logger.error("Exception %s", e)
|
||||
|
||||
def update_liquidsoap_station_name(self, station_name):
|
||||
# Push stream metadata to liquidsoap
|
||||
# TODO: THIS LIQUIDSOAP STUFF NEEDS TO BE MOVED TO PYPO-PUSH!!!
|
||||
try:
|
||||
logger = logging.getLogger('fetch')
|
||||
logger.info(LS_HOST)
|
||||
logger.info(LS_PORT)
|
||||
self.logger.info(LS_HOST)
|
||||
self.logger.info(LS_PORT)
|
||||
tn = telnetlib.Telnet(LS_HOST, LS_PORT)
|
||||
command = ('vars.station_name %s\n' % station_name).encode('utf-8')
|
||||
logger.info(command)
|
||||
self.logger.info(command)
|
||||
tn.write(command)
|
||||
tn.write('exit\n')
|
||||
tn.read_all()
|
||||
except Exception, e:
|
||||
logger.error("Exception %s", e)
|
||||
self.logger.error("Exception %s", e)
|
||||
|
||||
"""
|
||||
Process the schedule
|
||||
|
@ -278,166 +281,163 @@ class PypoFetch(Thread):
|
|||
to the cache dir (Folder-structure: cache/YYYY-MM-DD-hh-mm-ss)
|
||||
- runs the cleanup routine, to get rid of unused cached files
|
||||
"""
|
||||
def process_schedule(self, schedule_data, export_source, bootstrapping):
|
||||
logger = logging.getLogger('fetch')
|
||||
playlists = schedule_data["playlists"]
|
||||
def process_schedule(self, schedule_data, bootstrapping):
|
||||
self.logger.debug(schedule_data)
|
||||
media = schedule_data["media"]
|
||||
|
||||
# Download all the media and put playlists in liquidsoap "annotate" format
|
||||
try:
|
||||
liquidsoap_playlists = self.prepare_playlists(playlists, bootstrapping)
|
||||
except Exception, e: logger.error("%s", e)
|
||||
media = self.prepare_media(media, bootstrapping)
|
||||
except Exception, e: self.logger.error("%s", e)
|
||||
|
||||
# Send the data to pypo-push
|
||||
scheduled_data = dict()
|
||||
scheduled_data['liquidsoap_playlists'] = liquidsoap_playlists
|
||||
scheduled_data['schedule'] = playlists
|
||||
self.push_queue.put(scheduled_data)
|
||||
self.logger.debug("Pushing to pypo-push: "+ str(media))
|
||||
self.push_queue.put(media)
|
||||
|
||||
"""
|
||||
TODO
|
||||
# cleanup
|
||||
try: self.cleanup(self.export_source)
|
||||
except Exception, e: logger.error("%s", e)
|
||||
try: self.cleanup()
|
||||
except Exception, e: self.logger.error("%s", e)
|
||||
"""
|
||||
|
||||
"""
|
||||
In this function every audio file is cut as necessary (cue_in/cue_out != 0)
|
||||
and stored in a playlist folder.
|
||||
file is e.g. 2010-06-23-15-00-00/17_cue_10.132-123.321.mp3
|
||||
"""
|
||||
def prepare_playlists(self, playlists, bootstrapping):
|
||||
logger = logging.getLogger('fetch')
|
||||
|
||||
liquidsoap_playlists = dict()
|
||||
|
||||
# Dont do anything if playlists is empty
|
||||
if not playlists:
|
||||
logger.debug("Schedule is empty.")
|
||||
return liquidsoap_playlists
|
||||
|
||||
scheduleKeys = sorted(playlists.iterkeys())
|
||||
|
||||
|
||||
def prepare_media(self, media, bootstrapping):
|
||||
"""
|
||||
Iterate through the list of media items in "media" and
|
||||
download them.
|
||||
"""
|
||||
try:
|
||||
for pkey in scheduleKeys:
|
||||
logger.info("Playlist starting at %s", pkey)
|
||||
playlist = playlists[pkey]
|
||||
mediaKeys = sorted(media.iterkeys())
|
||||
for mkey in mediaKeys:
|
||||
self.logger.debug("Media item starting at %s", mkey)
|
||||
media_item = media[mkey]
|
||||
|
||||
if bootstrapping:
|
||||
check_for_previous_crash(media_item)
|
||||
|
||||
# create playlist directory
|
||||
try:
|
||||
os.mkdir(self.cache_dir + str(pkey))
|
||||
"""
|
||||
Extract year, month, date from mkey
|
||||
"""
|
||||
y_m_d = mkey[0:10]
|
||||
download_dir = os.mkdir(os.path.join(self.cache_dir, y_m_d))
|
||||
fileExt = os.path.splitext(media_item['uri'])[1]
|
||||
dst = os.path.join(download_dir, media_item['id']+fileExt)
|
||||
except Exception, e:
|
||||
logger.warning(e)
|
||||
self.logger.warning(e)
|
||||
|
||||
if self.handle_media_file(media_item, dst):
|
||||
entry = create_liquidsoap_annotation(media_item, dst)
|
||||
#entry['show_name'] = playlist['show_name']
|
||||
entry['show_name'] = "TODO"
|
||||
media_item["annotation"] = entry
|
||||
|
||||
ls_playlist = self.handle_media_file(playlist, pkey, bootstrapping)
|
||||
|
||||
liquidsoap_playlists[pkey] = ls_playlist
|
||||
except Exception, e:
|
||||
logger.error("%s", e)
|
||||
return liquidsoap_playlists
|
||||
self.logger.error("%s", e)
|
||||
|
||||
return media
|
||||
|
||||
|
||||
def create_liquidsoap_annotation(media, dst):
|
||||
pl_entry = \
|
||||
'annotate:media_id="%s",liq_start_next="%s",liq_fade_in="%s",liq_fade_out="%s",liq_cue_in="%s",liq_cue_out="%s",schedule_table_id="%s":%s' \
|
||||
% (media['id'], 0, \
|
||||
float(media['fade_in']) / 1000, \
|
||||
float(media['fade_out']) / 1000, \
|
||||
float(media['cue_in']), \
|
||||
float(media['cue_out']), \
|
||||
media['row_id'], dst)
|
||||
|
||||
"""
|
||||
Download and cache the media files.
|
||||
This handles both remote and local files.
|
||||
Returns an updated ls_playlist string.
|
||||
"""
|
||||
def handle_media_file(self, playlist, pkey, bootstrapping):
|
||||
logger = logging.getLogger('fetch')
|
||||
"""
|
||||
Tracks are only added to the playlist if they are accessible
|
||||
on the file system and larger than 0 bytes.
|
||||
So this can lead to playlists shorter than expectet.
|
||||
(there is a hardware silence detector for this cases...)
|
||||
"""
|
||||
entry = dict()
|
||||
entry['type'] = 'file'
|
||||
entry['annotate'] = pl_entry
|
||||
return entry
|
||||
|
||||
ls_playlist = []
|
||||
def check_for_previous_crash(media_item):
|
||||
start = media_item['start']
|
||||
end = media_item['end']
|
||||
|
||||
dtnow = datetime.utcnow()
|
||||
str_tnow_s = dtnow.strftime('%Y-%m-%d-%H-%M-%S')
|
||||
|
||||
sortedKeys = sorted(playlist['medias'].iterkeys())
|
||||
|
||||
for key in sortedKeys:
|
||||
media = playlist['medias'][key]
|
||||
logger.debug("Processing track %s", media['uri'])
|
||||
|
||||
if start <= str_tnow_s and str_tnow_s < end:
|
||||
#song is currently playing and we just started pypo. Maybe there
|
||||
#was a power outage? Let's restart playback of this song.
|
||||
start_split = map(int, start.split('-'))
|
||||
media_start = datetime(start_split[0], start_split[1], start_split[2], start_split[3], start_split[4], start_split[5], 0, None)
|
||||
self.logger.debug("Found media item that started at %s.", media_start)
|
||||
|
||||
if bootstrapping:
|
||||
start = media['start']
|
||||
end = media['end']
|
||||
|
||||
if end <= str_tnow_s:
|
||||
continue
|
||||
elif start <= str_tnow_s and str_tnow_s < end:
|
||||
#song is currently playing and we just started pypo. Maybe there
|
||||
#was a power outage? Let's restart playback of this song.
|
||||
start_split = map(int, start.split('-'))
|
||||
media_start = datetime(start_split[0], start_split[1], start_split[2], start_split[3], start_split[4], start_split[5], 0, None)
|
||||
logger.debug("Found media item that started at %s.", media_start)
|
||||
|
||||
delta = dtnow - media_start #we get a TimeDelta object from this operation
|
||||
logger.info("Starting media item at %d second point", delta.seconds)
|
||||
media['cue_in'] = delta.seconds + 10
|
||||
td = timedelta(seconds=10)
|
||||
playlist['start'] = (dtnow + td).strftime('%Y-%m-%d-%H-%M-%S')
|
||||
logger.info("Crash detected, setting playlist to restart at %s", (dtnow + td).strftime('%Y-%m-%d-%H-%M-%S'))
|
||||
delta = dtnow - media_start #we get a TimeDelta object from this operation
|
||||
self.logger.info("Starting media item at %d second point", delta.seconds)
|
||||
|
||||
"""
|
||||
Set the cue_in. This is used by Liquidsoap to determine at what point in the media
|
||||
item it should start playing. If the cue_in happens to be > cue_out, then make cue_in = cue_out
|
||||
"""
|
||||
media_item['cue_in'] = delta.seconds + 10 if delta.seconds + 10 < media_item['cue_out'] else media_item['cue_out']
|
||||
|
||||
"""
|
||||
Set the start time, which is used by pypo-push to determine when a media item is scheduled.
|
||||
Pushing the start time into the future will ensure pypo-push will push this to Liquidsoap.
|
||||
"""
|
||||
td = timedelta(seconds=10)
|
||||
media_item['start'] = (dtnow + td).strftime('%Y-%m-%d-%H-%M-%S')
|
||||
self.logger.info("Crash detected, setting playlist to restart at %s", (dtnow + td).strftime('%Y-%m-%d-%H-%M-%S'))
|
||||
|
||||
def handle_media_file(self, media_item, dst):
|
||||
"""
|
||||
Download and cache the media item.
|
||||
"""
|
||||
|
||||
self.logger.debug("Processing track %s", media_item['uri'])
|
||||
|
||||
fileExt = os.path.splitext(media['uri'])[1]
|
||||
try:
|
||||
dst = "%s%s/%s%s" % (self.cache_dir, pkey, media['id'], fileExt)
|
||||
|
||||
# download media file
|
||||
self.handle_remote_file(media, dst)
|
||||
|
||||
if True == os.access(dst, os.R_OK):
|
||||
# check filesize (avoid zero-byte files)
|
||||
try: fsize = os.path.getsize(dst)
|
||||
except Exception, e:
|
||||
logger.error("%s", e)
|
||||
fsize = 0
|
||||
|
||||
try:
|
||||
#blocking function to download the media item
|
||||
self.download_file(media_item, dst)
|
||||
|
||||
if os.access(dst, os.R_OK):
|
||||
# check filesize (avoid zero-byte files)
|
||||
try:
|
||||
fsize = os.path.getsize(dst)
|
||||
if fsize > 0:
|
||||
pl_entry = \
|
||||
'annotate:export_source="%s",media_id="%s",liq_start_next="%s",liq_fade_in="%s",liq_fade_out="%s",liq_cue_in="%s",liq_cue_out="%s",schedule_table_id="%s":%s' \
|
||||
% (media['export_source'], media['id'], 0, \
|
||||
float(media['fade_in']) / 1000, \
|
||||
float(media['fade_out']) / 1000, \
|
||||
float(media['cue_in']), \
|
||||
float(media['cue_out']), \
|
||||
media['row_id'], dst)
|
||||
return True
|
||||
except Exception, e:
|
||||
self.logger.error("%s", e)
|
||||
fsize = 0
|
||||
else:
|
||||
self.logger.warning("Cannot read file %s.", dst)
|
||||
|
||||
"""
|
||||
Tracks are only added to the playlist if they are accessible
|
||||
on the file system and larger than 0 bytes.
|
||||
So this can lead to playlists shorter than expectet.
|
||||
(there is a hardware silence detector for this cases...)
|
||||
"""
|
||||
entry = dict()
|
||||
entry['type'] = 'file'
|
||||
entry['annotate'] = pl_entry
|
||||
entry['show_name'] = playlist['show_name']
|
||||
ls_playlist.append(entry)
|
||||
|
||||
else:
|
||||
logger.warning("zero-size file - skipping %s. will not add it to playlist at %s", media['uri'], dst)
|
||||
|
||||
else:
|
||||
logger.warning("something went wrong. file %s not available. will not add it to playlist", dst)
|
||||
|
||||
except Exception, e: logger.info("%s", e)
|
||||
return ls_playlist
|
||||
except Exception, e:
|
||||
self.logger.info("%s", e)
|
||||
|
||||
return False
|
||||
|
||||
|
||||
"""
|
||||
Download a file from a remote server and store it in the cache.
|
||||
"""
|
||||
def handle_remote_file(self, media, dst):
|
||||
logger = logging.getLogger('fetch')
|
||||
def download_file(self, media_item, dst):
|
||||
if os.path.isfile(dst):
|
||||
pass
|
||||
#logger.debug("file already in cache: %s", dst)
|
||||
#self.logger.debug("file already in cache: %s", dst)
|
||||
else:
|
||||
logger.debug("try to download %s", media['uri'])
|
||||
self.api_client.get_media(media['uri'], dst)
|
||||
self.logger.debug("try to download %s", media_item['uri'])
|
||||
self.api_client.get_media(media_item['uri'], dst)
|
||||
|
||||
"""
|
||||
Cleans up folders in cache_dir. Look for modification date older than "now - CACHE_FOR"
|
||||
and deletes them.
|
||||
"""
|
||||
def cleanup(self, export_source):
|
||||
logger = logging.getLogger('fetch')
|
||||
|
||||
def cleanup(self):
|
||||
offset = 3600 * int(config["cache_for"])
|
||||
now = time.time()
|
||||
|
||||
|
@ -447,86 +447,42 @@ class PypoFetch(Thread):
|
|||
timestamp = calendar.timegm(time.strptime(dir, "%Y-%m-%d-%H-%M-%S"))
|
||||
if (now - timestamp) > offset:
|
||||
try:
|
||||
logger.debug('trying to remove %s - timestamp: %s', os.path.join(r, dir), timestamp)
|
||||
self.logger.debug('trying to remove %s - timestamp: %s', os.path.join(r, dir), timestamp)
|
||||
shutil.rmtree(os.path.join(r, dir))
|
||||
except Exception, e:
|
||||
logger.error("%s", e)
|
||||
self.logger.error("%s", e)
|
||||
pass
|
||||
else:
|
||||
logger.info('sucessfully removed %s', os.path.join(r, dir))
|
||||
self.logger.info('sucessfully removed %s', os.path.join(r, dir))
|
||||
except Exception, e:
|
||||
logger.error(e)
|
||||
self.logger.error(e)
|
||||
|
||||
|
||||
def main(self):
|
||||
logger = logging.getLogger('fetch')
|
||||
|
||||
try: os.mkdir(self.cache_dir)
|
||||
except Exception, e: pass
|
||||
|
||||
# Bootstrap: since we are just starting up, we need to grab the
|
||||
# most recent schedule. After that we can just wait for updates.
|
||||
status, self.schedule_data = self.api_client.get_schedule()
|
||||
if status == 1:
|
||||
logger.info("Bootstrap schedule received: %s", self.schedule_data)
|
||||
self.process_schedule(self.schedule_data, "scheduler", True)
|
||||
success, self.schedule_data = self.api_client.get_schedule()
|
||||
if success:
|
||||
self.logger.info("Bootstrap schedule received: %s", self.schedule_data)
|
||||
self.process_schedule(self.schedule_data, True)
|
||||
|
||||
loops = 1
|
||||
while True:
|
||||
logger.info("Loop #%s", loops)
|
||||
self.logger.info("Loop #%s", loops)
|
||||
try:
|
||||
try:
|
||||
"""
|
||||
our simple_queue.get() requires a timeout, in which case we
|
||||
fetch the Airtime schedule manually. It is important to fetch
|
||||
the schedule periodically because if we didn't, we would only
|
||||
get schedule updates via RabbitMq if the user was constantly
|
||||
using the Airtime interface.
|
||||
|
||||
If the user is not using the interface, RabbitMq messages are not
|
||||
sent, and we will have very stale (or non-existent!) data about the
|
||||
schedule.
|
||||
|
||||
Currently we are checking every 3600 seconds (1 hour)
|
||||
"""
|
||||
message = self.fetch_queue.get(block=True, timeout=3600)
|
||||
self.handle_message(message)
|
||||
except Empty, e:
|
||||
"""
|
||||
Queue timeout. Fetching data manually
|
||||
"""
|
||||
raise
|
||||
except Exception, e:
|
||||
"""
|
||||
sleep 5 seconds so that we don't spin inside this
|
||||
while loop and eat all the CPU
|
||||
"""
|
||||
time.sleep(5)
|
||||
|
||||
"""
|
||||
There is a problem with the RabbitMq messenger service. Let's
|
||||
log the error and get the schedule via HTTP polling
|
||||
"""
|
||||
logger.error("Exception, %s", e)
|
||||
raise
|
||||
message = self.fetch_queue.get(block=True, timeout=3600)
|
||||
self.handle_message(message)
|
||||
except Exception, e:
|
||||
"""
|
||||
There is a problem with the RabbitMq messenger service. Let's
|
||||
log the error and get the schedule via HTTP polling
|
||||
"""
|
||||
logger.error("Exception, %s", e)
|
||||
self.logger.error("Exception, %s", e)
|
||||
|
||||
status, self.schedule_data = self.api_client.get_schedule()
|
||||
if status == 1:
|
||||
self.process_schedule(self.schedule_data, "scheduler", False)
|
||||
success, self.schedule_data = self.api_client.get_schedule()
|
||||
if success:
|
||||
self.process_schedule(self.schedule_data, False)
|
||||
|
||||
loops += 1
|
||||
|
||||
"""
|
||||
Main loop of the thread:
|
||||
Wait for schedule updates from RabbitMQ, but in case there arent any,
|
||||
poll the server to get the upcoming schedule.
|
||||
"""
|
||||
def run(self):
|
||||
while True:
|
||||
self.main()
|
||||
"""
|
||||
Entry point of the thread
|
||||
"""
|
||||
self.main()
|
||||
|
|
|
@ -34,27 +34,20 @@ class PypoPush(Thread):
|
|||
def __init__(self, q):
|
||||
Thread.__init__(self)
|
||||
self.api_client = api_client.api_client_factory(config)
|
||||
self.set_export_source('scheduler')
|
||||
self.queue = q
|
||||
|
||||
self.schedule = dict()
|
||||
self.playlists = dict()
|
||||
self.media = dict()
|
||||
|
||||
self.liquidsoap_state_play = True
|
||||
self.push_ahead = 10
|
||||
|
||||
def set_export_source(self, export_source):
|
||||
self.export_source = export_source
|
||||
self.cache_dir = config["cache_dir"] + self.export_source + '/'
|
||||
self.schedule_tracker_file = self.cache_dir + "schedule_tracker.pickle"
|
||||
self.push_ahead = 30
|
||||
|
||||
"""
|
||||
The Push Loop - the push loop periodically checks if there is a playlist
|
||||
that should be scheduled at the current time.
|
||||
If yes, the current liquidsoap playlist gets replaced with the corresponding one,
|
||||
then liquidsoap is asked (via telnet) to reload and immediately play it.
|
||||
"""
|
||||
def push(self, export_source):
|
||||
def push(self):
|
||||
"""
|
||||
The Push Loop - the push loop periodically checks if there is a playlist
|
||||
that should be scheduled at the current time.
|
||||
If yes, the current liquidsoap playlist gets replaced with the corresponding one,
|
||||
then liquidsoap is asked (via telnet) to reload and immediately play it.
|
||||
"""
|
||||
logger = logging.getLogger('push')
|
||||
|
||||
timenow = time.time()
|
||||
|
@ -62,35 +55,30 @@ class PypoPush(Thread):
|
|||
if not self.queue.empty():
|
||||
# make sure we get the latest schedule
|
||||
while not self.queue.empty():
|
||||
scheduled_data = self.queue.get()
|
||||
logger.debug("Received data from pypo-fetch")
|
||||
self.schedule = scheduled_data['schedule']
|
||||
self.playlists = scheduled_data['liquidsoap_playlists']
|
||||
|
||||
logger.debug('schedule %s' % json.dumps(self.schedule))
|
||||
logger.debug('playlists %s' % json.dumps(self.playlists))
|
||||
self.media = self.queue.get()
|
||||
logger.debug("Received data from pypo-fetch")
|
||||
logger.debug('media %s' % json.dumps(self.media))
|
||||
|
||||
schedule = self.schedule
|
||||
playlists = self.playlists
|
||||
media = self.media
|
||||
|
||||
currently_on_air = False
|
||||
if schedule:
|
||||
if media:
|
||||
tnow = time.gmtime(timenow)
|
||||
tcoming = time.gmtime(timenow + self.push_ahead)
|
||||
str_tnow_s = "%04d-%02d-%02d-%02d-%02d-%02d" % (tnow[0], tnow[1], tnow[2], tnow[3], tnow[4], tnow[5])
|
||||
str_tcoming_s = "%04d-%02d-%02d-%02d-%02d-%02d" % (tcoming[0], tcoming[1], tcoming[2], tcoming[3], tcoming[4], tcoming[5])
|
||||
|
||||
for pkey in schedule:
|
||||
plstart = schedule[pkey]['start'][0:19]
|
||||
|
||||
if str_tnow_s <= plstart and plstart < str_tcoming_s:
|
||||
logger.debug('Preparing to push playlist scheduled at: %s', pkey)
|
||||
playlist = schedule[pkey]
|
||||
|
||||
|
||||
# We have a match, replace the current playlist and
|
||||
# force liquidsoap to refresh.
|
||||
if (self.push_liquidsoap(pkey, schedule, playlists) == 1):
|
||||
|
||||
for media_item in media:
|
||||
item_start = media_item['start'][0:19]
|
||||
|
||||
if str_tnow_s <= item_start and item_start < str_tcoming_s:
|
||||
"""
|
||||
If the media item starts in the next 30 seconds, push it to the queue.
|
||||
"""
|
||||
logger.debug('Preparing to push media item scheduled at: %s', pkey)
|
||||
|
||||
if self.push_to_liquidsoap(media_item):
|
||||
logger.debug("Pushed to liquidsoap, updating 'played' status.")
|
||||
|
||||
currently_on_air = True
|
||||
|
@ -99,90 +87,82 @@ class PypoPush(Thread):
|
|||
# Call API to update schedule states
|
||||
logger.debug("Doing callback to server to update 'played' status.")
|
||||
self.api_client.notify_scheduled_item_start_playing(pkey, schedule)
|
||||
|
||||
show_start = schedule[pkey]['show_start']
|
||||
show_end = schedule[pkey]['show_end']
|
||||
|
||||
if show_start <= str_tnow_s and str_tnow_s < show_end:
|
||||
currently_on_air = True
|
||||
"""
|
||||
If currently_on_air = False but liquidsoap_state_play = True then it means that Liquidsoap may
|
||||
still be playing audio even though the show has ended ('currently_on_air = False' means no show is scheduled)
|
||||
See CC-3231.
|
||||
This is a temporary solution for Airtime 2.0
|
||||
"""
|
||||
if not currently_on_air and self.liquidsoap_state_play:
|
||||
logger.debug('Notifying Liquidsoap to stop playback.')
|
||||
try:
|
||||
tn = telnetlib.Telnet(LS_HOST, LS_PORT)
|
||||
tn.write('source.skip\n')
|
||||
tn.write('exit\n')
|
||||
tn.read_all()
|
||||
except Exception, e:
|
||||
logger.debug(e)
|
||||
logger.debug('Could not connect to liquidsoap')
|
||||
|
||||
self.liquidsoap_state_play = False
|
||||
|
||||
|
||||
def push_liquidsoap(self, pkey, schedule, playlists):
|
||||
logger = logging.getLogger('push')
|
||||
|
||||
|
||||
def push_to_liquidsoap(self, media_item):
|
||||
"""
|
||||
This function looks at the media item, and either pushes it to the Liquidsoap
|
||||
queue immediately, or if the queue is empty - waits until the start time of the
|
||||
media item before pushing it.
|
||||
"""
|
||||
try:
|
||||
playlist = playlists[pkey]
|
||||
plstart = schedule[pkey]['start'][0:19]
|
||||
|
||||
#strptime returns struct_time in local time
|
||||
#mktime takes a time_struct and returns a floating point
|
||||
#gmtime Convert a time expressed in seconds since the epoch to a struct_time in UTC
|
||||
#mktime: expresses the time in local time, not UTC. It returns a floating point number, for compatibility with time().
|
||||
|
||||
epoch_start = calendar.timegm(time.strptime(plstart, '%Y-%m-%d-%H-%M-%S'))
|
||||
|
||||
#Return the time as a floating point number expressed in seconds since the epoch, in UTC.
|
||||
epoch_now = time.time()
|
||||
|
||||
logger.debug("Epoch start: %s" % epoch_start)
|
||||
logger.debug("Epoch now: %s" % epoch_now)
|
||||
|
||||
sleep_time = epoch_start - epoch_now;
|
||||
|
||||
if sleep_time < 0:
|
||||
sleep_time = 0
|
||||
|
||||
logger.debug('sleeping for %s s' % (sleep_time))
|
||||
time.sleep(sleep_time)
|
||||
|
||||
tn = telnetlib.Telnet(LS_HOST, LS_PORT)
|
||||
|
||||
#skip the currently playing song if any.
|
||||
logger.debug("source.skip\n")
|
||||
tn.write("source.skip\n")
|
||||
|
||||
# Get any extra information for liquidsoap (which will be sent back to us)
|
||||
liquidsoap_data = self.api_client.get_liquidsoap_data(pkey, schedule)
|
||||
|
||||
#Sending schedule table row id string.
|
||||
logger.debug("vars.pypo_data %s\n"%(liquidsoap_data["schedule_id"]))
|
||||
tn.write(("vars.pypo_data %s\n"%liquidsoap_data["schedule_id"]).encode('utf-8'))
|
||||
|
||||
logger.debug('Preparing to push playlist %s' % pkey)
|
||||
for item in playlist:
|
||||
annotate = item['annotate']
|
||||
tn.write(str('queue.push %s\n' % annotate.encode('utf-8')))
|
||||
|
||||
show_name = item['show_name']
|
||||
tn.write(str('vars.show_name %s\n' % show_name.encode('utf-8')))
|
||||
|
||||
tn.write("exit\n")
|
||||
logger.debug(tn.read_all())
|
||||
|
||||
status = 1
|
||||
if media_item["starts"] == self.last_end_time:
|
||||
"""
|
||||
this media item is attached to the end of the last
|
||||
track, so let's push it now so that Liquidsoap can start playing
|
||||
it immediately after (and prepare crossfades if need be).
|
||||
"""
|
||||
telnet_to_liquidsoap(media_item)
|
||||
self.last_end_time = media_item["end"]
|
||||
else:
|
||||
"""
|
||||
this media item does not start right after a current playing track.
|
||||
We need to sleep, and then wake up when this track starts.
|
||||
"""
|
||||
sleep_until_start(media_item)
|
||||
|
||||
telnet_to_liquidsoap(media_item)
|
||||
self.last_end_time = media_item["end"]
|
||||
except Exception, e:
|
||||
logger.error('%s', e)
|
||||
status = 0
|
||||
return status
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def sleep_until_start(media_item):
|
||||
"""
|
||||
The purpose of this function is to look at the difference between
|
||||
"now" and when the media_item starts, and sleep for that period of time.
|
||||
After waking from sleep, this function returns.
|
||||
"""
|
||||
|
||||
mi_start = media_item['start'][0:19]
|
||||
|
||||
#strptime returns struct_time in local time
|
||||
epoch_start = calendar.timegm(time.strptime(mi_start, '%Y-%m-%d-%H-%M-%S'))
|
||||
|
||||
#Return the time as a floating point number expressed in seconds since the epoch, in UTC.
|
||||
epoch_now = time.time()
|
||||
|
||||
logger.debug("Epoch start: %s" % epoch_start)
|
||||
logger.debug("Epoch now: %s" % epoch_now)
|
||||
|
||||
sleep_time = epoch_start - epoch_now
|
||||
|
||||
if sleep_time < 0:
|
||||
sleep_time = 0
|
||||
|
||||
logger.debug('sleeping for %s s' % (sleep_time))
|
||||
time.sleep(sleep_time)
|
||||
|
||||
def telnet_to_liquidsoap(media_item):
|
||||
"""
|
||||
telnets to liquidsoap and pushes the media_item to its queue. Push the
|
||||
show name of every media_item as well, just to keep Liquidsoap up-to-date
|
||||
about which show is playing.
|
||||
"""
|
||||
|
||||
tn = telnetlib.Telnet(LS_HOST, LS_PORT)
|
||||
|
||||
#tn.write(("vars.pypo_data %s\n"%liquidsoap_data["schedule_id"]).encode('utf-8'))
|
||||
|
||||
annotation = media_item['annotation']
|
||||
tn.write('queue.push %s\n' % annotation.encode('utf-8'))
|
||||
|
||||
show_name = media_item['show_name']
|
||||
tn.write('vars.show_name %s\n' % show_name.encode('utf-8'))
|
||||
|
||||
tn.write("exit\n")
|
||||
logger.debug(tn.read_all())
|
||||
|
||||
def run(self):
|
||||
loops = 0
|
||||
heartbeat_period = math.floor(30/PUSH_INTERVAL)
|
||||
|
@ -192,7 +172,7 @@ class PypoPush(Thread):
|
|||
if loops % heartbeat_period == 0:
|
||||
logger.info("heartbeat")
|
||||
loops = 0
|
||||
try: self.push('scheduler')
|
||||
try: self.push()
|
||||
except Exception, e:
|
||||
logger.error('Pypo Push Exception: %s', e)
|
||||
time.sleep(PUSH_INTERVAL)
|
||||
|
|
Loading…
Reference in New Issue