2011-03-03 06:22:28 +01:00
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
import time
|
|
|
|
import logging
|
|
|
|
import logging.config
|
|
|
|
import shutil
|
|
|
|
import random
|
|
|
|
import string
|
|
|
|
import json
|
2011-03-04 02:13:55 +01:00
|
|
|
import telnetlib
|
2011-03-21 00:34:43 +01:00
|
|
|
import math
|
2011-09-08 18:17:42 +02:00
|
|
|
import socket
|
2011-03-21 00:34:43 +01:00
|
|
|
from threading import Thread
|
2011-03-24 03:14:57 +01:00
|
|
|
from subprocess import Popen, PIPE
|
2011-06-14 23:17:15 +02:00
|
|
|
from datetime import datetime
|
2011-06-16 00:23:12 +02:00
|
|
|
from datetime import timedelta
|
2011-03-03 06:22:28 +01:00
|
|
|
|
2011-03-23 06:09:27 +01:00
|
|
|
# For RabbitMQ
|
|
|
|
from kombu.connection import BrokerConnection
|
|
|
|
from kombu.messaging import Exchange, Queue, Consumer, Producer
|
|
|
|
|
2011-03-03 06:22:28 +01:00
|
|
|
from api_clients import api_client
|
|
|
|
|
|
|
|
from configobj import ConfigObj
|
|
|
|
|
2011-03-21 00:34:43 +01:00
|
|
|
# configure logging
|
|
|
|
logging.config.fileConfig("logging.cfg")
|
|
|
|
|
2011-03-03 06:22:28 +01:00
|
|
|
# loading config file
|
|
|
|
try:
|
2011-03-30 00:32:53 +02:00
|
|
|
config = ConfigObj('/etc/airtime/pypo.cfg')
|
2011-03-03 06:22:28 +01:00
|
|
|
LS_HOST = config['ls_host']
|
|
|
|
LS_PORT = config['ls_port']
|
2011-03-23 06:09:27 +01:00
|
|
|
POLL_INTERVAL = int(config['poll_interval'])
|
2011-03-21 00:34:43 +01:00
|
|
|
|
2011-03-03 06:22:28 +01:00
|
|
|
except Exception, e:
|
2011-06-01 18:32:42 +02:00
|
|
|
logger = logging.getLogger()
|
|
|
|
logger.error('Error loading config file: %s', e)
|
2011-03-03 06:22:28 +01:00
|
|
|
sys.exit()
|
|
|
|
|
2011-03-23 06:09:27 +01:00
|
|
|
# Yuk - using a global, i know!
|
|
|
|
SCHEDULE_PUSH_MSG = []
|
|
|
|
|
|
|
|
"""
|
|
|
|
Handle a message from RabbitMQ, put it into our yucky global var.
|
|
|
|
Hopefully there is a better way to do this.
|
|
|
|
"""
|
|
|
|
def handle_message(body, message):
|
|
|
|
logger = logging.getLogger('fetch')
|
|
|
|
global SCHEDULE_PUSH_MSG
|
|
|
|
logger.info("Received schedule from RabbitMQ: " + message.body)
|
|
|
|
SCHEDULE_PUSH_MSG = json.loads(message.body)
|
|
|
|
# ACK the message to take it off the queue
|
|
|
|
message.ack()
|
|
|
|
|
|
|
|
|
2011-03-21 00:34:43 +01:00
|
|
|
class PypoFetch(Thread):
|
|
|
|
def __init__(self, q):
|
|
|
|
Thread.__init__(self)
|
2011-03-23 06:09:27 +01:00
|
|
|
logger = logging.getLogger('fetch')
|
2011-03-03 06:22:28 +01:00
|
|
|
self.api_client = api_client.api_client_factory(config)
|
|
|
|
self.set_export_source('scheduler')
|
2011-03-21 00:34:43 +01:00
|
|
|
self.queue = q
|
2011-06-08 20:26:19 +02:00
|
|
|
logger.info("PypoFetch: init complete")
|
2011-03-03 06:22:28 +01:00
|
|
|
|
2011-06-08 20:26:19 +02:00
|
|
|
def init_rabbit_mq(self):
|
|
|
|
logger = logging.getLogger('fetch')
|
2011-03-23 06:09:27 +01:00
|
|
|
logger.info("Initializing RabbitMQ stuff")
|
2011-06-08 20:26:19 +02:00
|
|
|
try:
|
|
|
|
schedule_exchange = Exchange("airtime-schedule", "direct", durable=True, auto_delete=True)
|
|
|
|
schedule_queue = Queue("pypo-fetch", exchange=schedule_exchange, key="foo")
|
|
|
|
self.connection = BrokerConnection(config["rabbitmq_host"], config["rabbitmq_user"], config["rabbitmq_password"], "/")
|
|
|
|
channel = self.connection.channel()
|
|
|
|
consumer = Consumer(channel, schedule_queue)
|
|
|
|
consumer.register_callback(handle_message)
|
|
|
|
consumer.consume()
|
|
|
|
except Exception, e:
|
|
|
|
logger.error(e)
|
|
|
|
return False
|
|
|
|
|
|
|
|
return True
|
2011-03-23 06:09:27 +01:00
|
|
|
|
|
|
|
|
2011-03-03 06:22:28 +01:00
|
|
|
def set_export_source(self, export_source):
|
2011-07-12 23:39:15 +02:00
|
|
|
logger = logging.getLogger('fetch')
|
2011-03-03 06:22:28 +01:00
|
|
|
self.export_source = export_source
|
|
|
|
self.cache_dir = config["cache_dir"] + self.export_source + '/'
|
2011-07-12 23:39:15 +02:00
|
|
|
logger.info("Creating cache directory at %s", self.cache_dir)
|
2011-03-23 06:09:27 +01:00
|
|
|
|
2011-03-23 18:43:11 +01:00
|
|
|
def check_matching_timezones(self, server_timezone):
|
|
|
|
logger = logging.getLogger('fetch')
|
2011-03-24 03:14:57 +01:00
|
|
|
|
|
|
|
process = Popen(["date", "+%z"], stdout=PIPE)
|
|
|
|
pypo_timezone = (process.communicate()[0]).strip(' \r\n\t')
|
|
|
|
|
2011-03-23 18:43:11 +01:00
|
|
|
if server_timezone != pypo_timezone:
|
2011-03-29 23:31:22 +02:00
|
|
|
logger.error("ERROR: Airtime server and pypo timezone offsets do not match. Audio playback will not start when expected!!!")
|
|
|
|
logger.error(" * Server timezone offset: %s", server_timezone)
|
|
|
|
logger.error(" * Pypo timezone offset: %s", pypo_timezone)
|
|
|
|
logger.error(" * To fix this, you need to set the 'date.timezone' value in your php.ini file and restart apache.")
|
|
|
|
logger.error(" * See this page for more info (v1.7): http://wiki.sourcefabric.org/x/BQBF")
|
|
|
|
logger.error(" * and also the 'FAQ and Support' page underneath it.")
|
2011-05-13 17:27:40 +02:00
|
|
|
|
2011-06-16 00:23:12 +02:00
|
|
|
"""
|
2011-06-14 23:17:15 +02:00
|
|
|
def get_currently_scheduled(self, playlistsOrMedias, str_tnow_s):
|
|
|
|
for key in playlistsOrMedias:
|
|
|
|
start = playlistsOrMedias[key]['start']
|
|
|
|
end = playlistsOrMedias[key]['end']
|
2011-05-13 17:27:40 +02:00
|
|
|
|
|
|
|
if start <= str_tnow_s and str_tnow_s < end:
|
2011-06-14 23:17:15 +02:00
|
|
|
return key
|
2011-05-13 17:27:40 +02:00
|
|
|
|
2011-06-14 23:17:15 +02:00
|
|
|
return None
|
2011-05-13 17:27:40 +02:00
|
|
|
|
2011-06-14 23:17:15 +02:00
|
|
|
def handle_shows_currently_scheduled(self, playlists):
|
|
|
|
logger = logging.getLogger('fetch')
|
|
|
|
|
|
|
|
dtnow = datetime.today()
|
|
|
|
tnow = dtnow.timetuple()
|
|
|
|
str_tnow_s = "%04d-%02d-%02d-%02d-%02d-%02d" % (tnow[0], tnow[1], tnow[2], tnow[3], tnow[4], tnow[5])
|
|
|
|
|
|
|
|
current_pkey = self.get_currently_scheduled(playlists, str_tnow_s)
|
|
|
|
if current_pkey is not None:
|
|
|
|
logger.debug("FOUND CURRENT PLAYLIST %s", current_pkey)
|
|
|
|
# So we have found that a playlist if currently scheduled
|
|
|
|
# even though we just started pypo. Perhaps there was a
|
|
|
|
# system crash. Lets calculate what position in the playlist
|
|
|
|
# we are supposed to be in.
|
|
|
|
medias = playlists[current_pkey]["medias"]
|
|
|
|
current_mkey = self.get_currently_scheduled(medias, str_tnow_s)
|
|
|
|
if current_mkey is not None:
|
|
|
|
mkey_split = map(int, current_mkey.split('-'))
|
|
|
|
media_start = datetime(mkey_split[0], mkey_split[1], mkey_split[2], mkey_split[3], mkey_split[4], mkey_split[5])
|
|
|
|
logger.debug("Found media item that started at %s.", media_start)
|
|
|
|
|
|
|
|
delta = dtnow - media_start #we get a TimeDelta object from this operation
|
|
|
|
logger.info("Starting media item at %d second point", delta.seconds)
|
2011-06-16 00:23:12 +02:00
|
|
|
"""
|
2011-06-14 23:17:15 +02:00
|
|
|
|
2011-03-03 06:22:28 +01:00
|
|
|
"""
|
2011-03-23 06:09:27 +01:00
|
|
|
Process the schedule
|
|
|
|
- Reads the scheduled entries of a given range (actual time +/- "prepare_ahead" / "cache_for")
|
|
|
|
- Saves a serialized file of the schedule
|
|
|
|
- playlists are prepared. (brought to liquidsoap format) and, if not mounted via nsf, files are copied
|
|
|
|
to the cache dir (Folder-structure: cache/YYYY-MM-DD-hh-mm-ss)
|
2011-06-14 23:17:15 +02:00
|
|
|
- runs the cleanup routine, to get rid of unused cached files
|
2011-03-03 06:22:28 +01:00
|
|
|
"""
|
2011-05-13 17:27:40 +02:00
|
|
|
def process_schedule(self, schedule_data, export_source, bootstrapping):
|
2011-03-21 00:34:43 +01:00
|
|
|
logger = logging.getLogger('fetch')
|
2011-04-14 06:20:19 +02:00
|
|
|
playlists = schedule_data["playlists"]
|
2011-03-23 18:43:11 +01:00
|
|
|
|
2011-06-16 00:23:12 +02:00
|
|
|
#if bootstrapping:
|
2011-06-15 21:49:42 +02:00
|
|
|
#TODO: possible allow prepare_playlists to handle this.
|
2011-06-16 00:23:12 +02:00
|
|
|
#self.handle_shows_currently_scheduled(playlists)
|
2011-05-13 17:27:40 +02:00
|
|
|
|
2011-03-23 18:43:11 +01:00
|
|
|
self.check_matching_timezones(schedule_data["server_timezone"])
|
|
|
|
|
2011-03-23 06:09:27 +01:00
|
|
|
# Push stream metadata to liquidsoap
|
|
|
|
# TODO: THIS LIQUIDSOAP STUFF NEEDS TO BE MOVED TO PYPO-PUSH!!!
|
|
|
|
stream_metadata = schedule_data['stream_metadata']
|
2011-03-03 06:22:28 +01:00
|
|
|
try:
|
2011-03-23 06:09:27 +01:00
|
|
|
tn = telnetlib.Telnet(LS_HOST, LS_PORT)
|
|
|
|
#encode in latin-1 due to telnet protocol not supporting utf-8
|
|
|
|
tn.write(('vars.stream_metadata_type %s\n' % stream_metadata['format']).encode('latin-1'))
|
|
|
|
tn.write(('vars.station_name %s\n' % stream_metadata['station_name']).encode('latin-1'))
|
|
|
|
tn.write('exit\n')
|
|
|
|
tn.read_all()
|
|
|
|
except Exception, e:
|
|
|
|
logger.error("Exception %s", e)
|
|
|
|
status = 0
|
2011-03-03 06:22:28 +01:00
|
|
|
|
2011-06-15 21:49:42 +02:00
|
|
|
# Download all the media and put playlists in liquidsoap "annotate" format
|
2011-03-21 00:34:43 +01:00
|
|
|
try:
|
2011-06-16 00:23:12 +02:00
|
|
|
liquidsoap_playlists = self.prepare_playlists(playlists, bootstrapping)
|
2011-03-21 00:34:43 +01:00
|
|
|
except Exception, e: logger.error("%s", e)
|
|
|
|
|
2011-03-23 06:09:27 +01:00
|
|
|
# Send the data to pypo-push
|
2011-03-21 00:34:43 +01:00
|
|
|
scheduled_data = dict()
|
2011-04-14 06:20:19 +02:00
|
|
|
scheduled_data['liquidsoap_playlists'] = liquidsoap_playlists
|
|
|
|
scheduled_data['schedule'] = playlists
|
2011-09-09 20:11:56 +02:00
|
|
|
scheduled_data['stream_metadata'] = schedule_data["stream_metadata"]
|
2011-03-21 00:34:43 +01:00
|
|
|
self.queue.put(scheduled_data)
|
2011-03-03 06:22:28 +01:00
|
|
|
|
|
|
|
# cleanup
|
|
|
|
try: self.cleanup(self.export_source)
|
|
|
|
except Exception, e: logger.error("%s", e)
|
|
|
|
|
|
|
|
|
|
|
|
"""
|
2011-03-23 06:09:27 +01:00
|
|
|
In this function every audio file is cut as necessary (cue_in/cue_out != 0)
|
|
|
|
and stored in a playlist folder.
|
|
|
|
file is e.g. 2010-06-23-15-00-00/17_cue_10.132-123.321.mp3
|
2011-03-03 06:22:28 +01:00
|
|
|
"""
|
2011-06-16 00:23:12 +02:00
|
|
|
def prepare_playlists(self, playlists, bootstrapping):
|
2011-03-21 00:34:43 +01:00
|
|
|
logger = logging.getLogger('fetch')
|
2011-03-03 06:22:28 +01:00
|
|
|
|
2011-04-14 06:20:19 +02:00
|
|
|
liquidsoap_playlists = dict()
|
2011-03-03 06:22:28 +01:00
|
|
|
|
2011-04-14 06:20:19 +02:00
|
|
|
# Dont do anything if playlists is empty
|
|
|
|
if not playlists:
|
2011-03-03 06:22:28 +01:00
|
|
|
logger.debug("Schedule is empty.")
|
2011-04-14 06:20:19 +02:00
|
|
|
return liquidsoap_playlists
|
2011-03-03 06:22:28 +01:00
|
|
|
|
2011-04-14 06:20:19 +02:00
|
|
|
scheduleKeys = sorted(playlists.iterkeys())
|
2011-03-03 06:22:28 +01:00
|
|
|
|
|
|
|
try:
|
|
|
|
for pkey in scheduleKeys:
|
2011-03-23 06:09:27 +01:00
|
|
|
logger.info("Playlist starting at %s", pkey)
|
2011-04-14 06:20:19 +02:00
|
|
|
playlist = playlists[pkey]
|
2011-03-03 06:22:28 +01:00
|
|
|
|
|
|
|
# create playlist directory
|
|
|
|
try:
|
|
|
|
os.mkdir(self.cache_dir + str(pkey))
|
|
|
|
except Exception, e:
|
2011-06-16 00:23:12 +02:00
|
|
|
logger.error(e)
|
2011-03-03 06:22:28 +01:00
|
|
|
|
2011-06-15 21:49:42 +02:00
|
|
|
#June 13, 2011: Commented this block out since we are not currently setting this to '1'
|
|
|
|
#on the server side. Currently using a different method to detect if already played - Martin
|
|
|
|
#if int(playlist['played']) == 1:
|
|
|
|
# logger.info("playlist %s already played / sent to liquidsoap, so will ignore it", pkey)
|
|
|
|
|
2011-06-16 00:23:12 +02:00
|
|
|
ls_playlist = self.handle_media_file(playlist, pkey, bootstrapping)
|
2011-03-03 06:22:28 +01:00
|
|
|
|
2011-04-14 06:20:19 +02:00
|
|
|
liquidsoap_playlists[pkey] = ls_playlist
|
2011-03-03 06:22:28 +01:00
|
|
|
except Exception, e:
|
2011-06-16 00:23:12 +02:00
|
|
|
logger.error("%s", e)
|
2011-04-14 06:20:19 +02:00
|
|
|
return liquidsoap_playlists
|
2011-03-03 06:22:28 +01:00
|
|
|
|
2011-03-23 06:09:27 +01:00
|
|
|
|
|
|
|
"""
|
|
|
|
Download and cache the media files.
|
|
|
|
This handles both remote and local files.
|
|
|
|
Returns an updated ls_playlist string.
|
|
|
|
"""
|
2011-06-16 00:23:12 +02:00
|
|
|
def handle_media_file(self, playlist, pkey, bootstrapping):
|
|
|
|
logger = logging.getLogger('fetch')
|
|
|
|
|
2011-03-03 06:22:28 +01:00
|
|
|
ls_playlist = []
|
2011-06-16 00:23:12 +02:00
|
|
|
|
|
|
|
dtnow = datetime.today()
|
|
|
|
str_tnow_s = dtnow.strftime('%Y-%m-%d-%H-%M-%S')
|
2011-03-03 06:22:28 +01:00
|
|
|
|
2011-06-16 00:23:12 +02:00
|
|
|
sortedKeys = sorted(playlist['medias'].iterkeys())
|
|
|
|
|
|
|
|
for key in sortedKeys:
|
2011-06-14 23:17:15 +02:00
|
|
|
media = playlist['medias'][key]
|
2011-03-03 06:22:28 +01:00
|
|
|
logger.debug("Processing track %s", media['uri'])
|
2011-06-16 00:23:12 +02:00
|
|
|
|
|
|
|
if bootstrapping:
|
|
|
|
start = media['start']
|
|
|
|
end = media['end']
|
|
|
|
|
|
|
|
if end <= str_tnow_s:
|
|
|
|
continue
|
|
|
|
elif start <= str_tnow_s and str_tnow_s < end:
|
|
|
|
#song is currently playing and we just started pypo. Maybe there
|
|
|
|
#was a power outage? Let's restart playback of this song.
|
|
|
|
start_split = map(int, start.split('-'))
|
|
|
|
media_start = datetime(start_split[0], start_split[1], start_split[2], start_split[3], start_split[4], start_split[5])
|
|
|
|
logger.debug("Found media item that started at %s.", media_start)
|
|
|
|
|
|
|
|
delta = dtnow - media_start #we get a TimeDelta object from this operation
|
|
|
|
logger.info("Starting media item at %d second point", delta.seconds)
|
2011-06-17 00:59:15 +02:00
|
|
|
media['cue_in'] = delta.seconds + 10
|
2011-06-16 00:23:12 +02:00
|
|
|
td = timedelta(seconds=10)
|
|
|
|
playlist['start'] = (dtnow + td).strftime('%Y-%m-%d-%H-%M-%S')
|
|
|
|
logger.info("Crash detected, setting playlist to restart at %s", (dtnow + td).strftime('%Y-%m-%d-%H-%M-%S'))
|
|
|
|
|
2011-03-03 06:22:28 +01:00
|
|
|
|
|
|
|
fileExt = os.path.splitext(media['uri'])[1]
|
|
|
|
try:
|
2011-07-12 23:39:15 +02:00
|
|
|
dst = "%s%s/%s%s" % (self.cache_dir, pkey, media['id'], fileExt)
|
2011-03-03 06:22:28 +01:00
|
|
|
|
2011-03-29 22:10:00 +02:00
|
|
|
# download media file
|
2011-06-15 21:49:42 +02:00
|
|
|
self.handle_remote_file(media, dst)
|
2011-03-03 06:22:28 +01:00
|
|
|
|
|
|
|
if True == os.access(dst, os.R_OK):
|
|
|
|
# check filesize (avoid zero-byte files)
|
|
|
|
try: fsize = os.path.getsize(dst)
|
|
|
|
except Exception, e:
|
|
|
|
logger.error("%s", e)
|
|
|
|
fsize = 0
|
|
|
|
|
|
|
|
if fsize > 0:
|
|
|
|
pl_entry = \
|
2011-06-16 00:23:12 +02:00
|
|
|
'annotate:export_source="%s",media_id="%s",liq_start_next="%s",liq_fade_in="%s",liq_fade_out="%s",liq_cue_in="%s",liq_cue_out="%s",schedule_table_id="%s":%s' \
|
2011-07-12 23:39:15 +02:00
|
|
|
% (media['export_source'], media['id'], 0, \
|
|
|
|
float(media['fade_in']) / 1000, \
|
|
|
|
float(media['fade_out']) / 1000, \
|
|
|
|
float(media['cue_in']), \
|
|
|
|
float(media['cue_out']), \
|
2011-06-16 00:23:12 +02:00
|
|
|
media['row_id'], dst)
|
2011-03-03 06:22:28 +01:00
|
|
|
|
|
|
|
"""
|
|
|
|
Tracks are only added to the playlist if they are accessible
|
|
|
|
on the file system and larger than 0 bytes.
|
|
|
|
So this can lead to playlists shorter than expectet.
|
|
|
|
(there is a hardware silence detector for this cases...)
|
|
|
|
"""
|
|
|
|
entry = dict()
|
|
|
|
entry['type'] = 'file'
|
|
|
|
entry['annotate'] = pl_entry
|
2011-03-04 18:07:22 +01:00
|
|
|
entry['show_name'] = playlist['show_name']
|
2011-03-03 06:22:28 +01:00
|
|
|
ls_playlist.append(entry)
|
|
|
|
|
|
|
|
else:
|
2011-06-01 18:32:42 +02:00
|
|
|
logger.warning("zero-size file - skipping %s. will not add it to playlist at %s", media['uri'], dst)
|
2011-03-03 06:22:28 +01:00
|
|
|
|
|
|
|
else:
|
|
|
|
logger.warning("something went wrong. file %s not available. will not add it to playlist", dst)
|
|
|
|
|
|
|
|
except Exception, e: logger.info("%s", e)
|
|
|
|
return ls_playlist
|
|
|
|
|
|
|
|
|
2011-03-23 06:09:27 +01:00
|
|
|
"""
|
|
|
|
Download a file from a remote server and store it in the cache.
|
|
|
|
"""
|
2011-06-15 21:49:42 +02:00
|
|
|
def handle_remote_file(self, media, dst):
|
2011-03-21 00:34:43 +01:00
|
|
|
logger = logging.getLogger('fetch')
|
2011-06-15 21:49:42 +02:00
|
|
|
if os.path.isfile(dst):
|
|
|
|
pass
|
|
|
|
#logger.debug("file already in cache: %s", dst)
|
2011-03-03 06:22:28 +01:00
|
|
|
else:
|
2011-06-15 21:49:42 +02:00
|
|
|
logger.debug("try to download %s", media['uri'])
|
|
|
|
self.api_client.get_media(media['uri'], dst)
|
2011-03-03 06:22:28 +01:00
|
|
|
|
2011-03-23 06:09:27 +01:00
|
|
|
"""
|
|
|
|
Cleans up folders in cache_dir. Look for modification date older than "now - CACHE_FOR"
|
|
|
|
and deletes them.
|
|
|
|
"""
|
2011-03-03 06:22:28 +01:00
|
|
|
def cleanup(self, export_source):
|
2011-03-21 00:34:43 +01:00
|
|
|
logger = logging.getLogger('fetch')
|
2011-03-03 06:22:28 +01:00
|
|
|
|
|
|
|
offset = 3600 * int(config["cache_for"])
|
|
|
|
now = time.time()
|
|
|
|
|
|
|
|
for r, d, f in os.walk(self.cache_dir):
|
|
|
|
for dir in d:
|
|
|
|
try:
|
|
|
|
timestamp = time.mktime(time.strptime(dir, "%Y-%m-%d-%H-%M-%S"))
|
|
|
|
if (now - timestamp) > offset:
|
|
|
|
try:
|
|
|
|
logger.debug('trying to remove %s - timestamp: %s', os.path.join(r, dir), timestamp)
|
|
|
|
shutil.rmtree(os.path.join(r, dir))
|
|
|
|
except Exception, e:
|
|
|
|
logger.error("%s", e)
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
logger.info('sucessfully removed %s', os.path.join(r, dir))
|
|
|
|
except Exception, e:
|
2011-06-01 18:32:42 +02:00
|
|
|
logger.error(e)
|
2011-03-03 06:22:28 +01:00
|
|
|
|
2011-03-23 06:09:27 +01:00
|
|
|
|
2011-09-08 18:17:42 +02:00
|
|
|
def main(self):
|
2011-03-21 00:34:43 +01:00
|
|
|
logger = logging.getLogger('fetch')
|
2011-03-23 06:09:27 +01:00
|
|
|
|
2011-06-08 20:26:19 +02:00
|
|
|
while not self.init_rabbit_mq():
|
|
|
|
logger.error("Error connecting to RabbitMQ Server. Trying again in few seconds")
|
|
|
|
time.sleep(5)
|
|
|
|
|
2011-03-23 06:09:27 +01:00
|
|
|
try: os.mkdir(self.cache_dir)
|
|
|
|
except Exception, e: pass
|
|
|
|
|
|
|
|
# Bootstrap: since we are just starting up, we need to grab the
|
|
|
|
# most recent schedule. After that we can just wait for updates.
|
|
|
|
status, schedule_data = self.api_client.get_schedule()
|
|
|
|
if status == 1:
|
2011-08-25 22:32:57 +02:00
|
|
|
logger.info("Bootstrap schedule received: %s", schedule_data)
|
2011-05-13 17:27:40 +02:00
|
|
|
self.process_schedule(schedule_data, "scheduler", True)
|
2011-03-23 06:09:27 +01:00
|
|
|
logger.info("Bootstrap complete: got initial copy of the schedule")
|
|
|
|
|
|
|
|
loops = 1
|
2011-03-21 00:34:43 +01:00
|
|
|
while True:
|
2011-07-12 23:39:15 +02:00
|
|
|
logger.info("Loop #%s", loops)
|
2011-03-23 06:09:27 +01:00
|
|
|
try:
|
|
|
|
# Wait for messages from RabbitMQ. Timeout if we
|
|
|
|
# dont get any after POLL_INTERVAL.
|
|
|
|
self.connection.drain_events(timeout=POLL_INTERVAL)
|
|
|
|
# Hooray for globals!
|
|
|
|
schedule_data = SCHEDULE_PUSH_MSG
|
|
|
|
status = 1
|
2011-09-08 18:17:42 +02:00
|
|
|
except socket.timeout, se:
|
2011-03-23 06:09:27 +01:00
|
|
|
# We didnt get a message for a while, so poll the server
|
|
|
|
# to get an updated schedule.
|
|
|
|
status, schedule_data = self.api_client.get_schedule()
|
2011-09-08 18:17:42 +02:00
|
|
|
except Exception, e:
|
|
|
|
"""
|
|
|
|
This Generic exception is thrown whenever the RabbitMQ
|
|
|
|
Service is stopped. In this case let's check every few
|
|
|
|
seconds to see if it has come back up
|
|
|
|
"""
|
|
|
|
logger.info("Unknown exception")
|
|
|
|
return
|
|
|
|
|
|
|
|
#return based on the exception
|
2011-03-23 06:09:27 +01:00
|
|
|
|
|
|
|
if status == 1:
|
2011-05-13 17:27:40 +02:00
|
|
|
self.process_schedule(schedule_data, "scheduler", False)
|
2011-09-08 18:17:42 +02:00
|
|
|
loops += 1
|
|
|
|
|
|
|
|
"""
|
|
|
|
Main loop of the thread:
|
|
|
|
Wait for schedule updates from RabbitMQ, but in case there arent any,
|
|
|
|
poll the server to get the upcoming schedule.
|
|
|
|
"""
|
|
|
|
def run(self):
|
|
|
|
while True:
|
|
|
|
self.main()
|
2011-03-23 06:09:27 +01:00
|
|
|
|