Merge branch '2.2.x' of dev.sourcefabric.org:airtime into 2.2.x
This commit is contained in:
commit
5ab183e01e
|
@ -1,40 +1,40 @@
|
|||
<?php
|
||||
|
||||
define('AIRTIME_COPYRIGHT_DATE', '2010-2012');
|
||||
define('AIRTIME_REST_VERSION', '1.1');
|
||||
define('AIRTIME_API_VERSION', '1.1');
|
||||
define('AIRTIME_COPYRIGHT_DATE' , '2010-2012');
|
||||
define('AIRTIME_REST_VERSION' , '1.1');
|
||||
define('AIRTIME_API_VERSION' , '1.1');
|
||||
|
||||
// Metadata Keys for files
|
||||
define('MDATA_KEY_FILEPATH', 'filepath');
|
||||
define('MDATA_KEY_DIRECTORY', 'directory');
|
||||
define('MDATA_KEY_MD5', 'md5');
|
||||
define('MDATA_KEY_TITLE', 'track_title');
|
||||
define('MDATA_KEY_CREATOR', 'artist_name');
|
||||
define('MDATA_KEY_SOURCE', 'album_title');
|
||||
define('MDATA_KEY_DURATION', 'length');
|
||||
define('MDATA_KEY_MIME', 'mime');
|
||||
define('MDATA_KEY_FTYPE', 'ftype');
|
||||
define('MDATA_KEY_URL', 'info_url');
|
||||
define('MDATA_KEY_GENRE', 'genre');
|
||||
define('MDATA_KEY_MOOD', 'mood');
|
||||
define('MDATA_KEY_LABEL', 'label');
|
||||
define('MDATA_KEY_COMPOSER', 'composer');
|
||||
define('MDATA_KEY_DESCRIPTION', 'description');
|
||||
define('MDATA_KEY_SAMPLERATE', 'sample_rate');
|
||||
define('MDATA_KEY_BITRATE', 'bit_rate');
|
||||
define('MDATA_KEY_ENCODER', 'encoded_by');
|
||||
define('MDATA_KEY_ISRC', 'isrc_number');
|
||||
define('MDATA_KEY_COPYRIGHT', 'copyright');
|
||||
define('MDATA_KEY_YEAR', 'year');
|
||||
define('MDATA_KEY_BPM', 'bpm');
|
||||
define('MDATA_KEY_TRACKNUMBER', 'track_number');
|
||||
define('MDATA_KEY_CONDUCTOR', 'conductor');
|
||||
define('MDATA_KEY_LANGUAGE', 'language');
|
||||
define('MDATA_KEY_REPLAYGAIN', 'replay_gain');
|
||||
define('MDATA_KEY_OWNER_ID', 'owner_id');
|
||||
define('MDATA_KEY_FILEPATH' , 'filepath');
|
||||
define('MDATA_KEY_DIRECTORY' , 'directory');
|
||||
define('MDATA_KEY_MD5' , 'md5');
|
||||
define('MDATA_KEY_TITLE' , 'track_title');
|
||||
define('MDATA_KEY_CREATOR' , 'artist_name');
|
||||
define('MDATA_KEY_SOURCE' , 'album_title');
|
||||
define('MDATA_KEY_DURATION' , 'length');
|
||||
define('MDATA_KEY_MIME' , 'mime');
|
||||
define('MDATA_KEY_FTYPE' , 'ftype');
|
||||
define('MDATA_KEY_URL' , 'info_url');
|
||||
define('MDATA_KEY_GENRE' , 'genre');
|
||||
define('MDATA_KEY_MOOD' , 'mood');
|
||||
define('MDATA_KEY_LABEL' , 'label');
|
||||
define('MDATA_KEY_COMPOSER' , 'composer');
|
||||
define('MDATA_KEY_DESCRIPTION' , 'description');
|
||||
define('MDATA_KEY_SAMPLERATE' , 'sample_rate');
|
||||
define('MDATA_KEY_BITRATE' , 'bit_rate');
|
||||
define('MDATA_KEY_ENCODER' , 'encoded_by');
|
||||
define('MDATA_KEY_ISRC' , 'isrc_number');
|
||||
define('MDATA_KEY_COPYRIGHT' , 'copyright');
|
||||
define('MDATA_KEY_YEAR' , 'year');
|
||||
define('MDATA_KEY_BPM' , 'bpm');
|
||||
define('MDATA_KEY_TRACKNUMBER' , 'track_number');
|
||||
define('MDATA_KEY_CONDUCTOR' , 'conductor');
|
||||
define('MDATA_KEY_LANGUAGE' , 'language');
|
||||
define('MDATA_KEY_REPLAYGAIN' , 'replay_gain');
|
||||
define('MDATA_KEY_OWNER_ID' , 'owner_id');
|
||||
|
||||
define('UI_MDATA_VALUE_FORMAT_FILE', 'File');
|
||||
define('UI_MDATA_VALUE_FORMAT_STREAM', 'live stream');
|
||||
define('UI_MDATA_VALUE_FORMAT_FILE' , 'File');
|
||||
define('UI_MDATA_VALUE_FORMAT_STREAM' , 'live stream');
|
||||
|
||||
// Session Keys
|
||||
define('UI_PLAYLISTCONTROLLER_OBJ_SESSNAME', 'PLAYLISTCONTROLLER_OBJ');
|
||||
|
@ -43,6 +43,6 @@ define('UI_BLOCK_SESSNAME', 'BLOCK');*/
|
|||
|
||||
|
||||
// Soundcloud contants
|
||||
define('SOUNDCLOUD_NOT_UPLOADED_YET', -1);
|
||||
define('SOUNDCLOUD_PROGRESS', -2);
|
||||
define('SOUNDCLOUD_ERROR', -3);
|
||||
define('SOUNDCLOUD_NOT_UPLOADED_YET' , -1);
|
||||
define('SOUNDCLOUD_PROGRESS' , -2);
|
||||
define('SOUNDCLOUD_ERROR' , -3);
|
||||
|
|
|
@ -366,12 +366,11 @@ class Application_Model_Scheduler
|
|||
* @param array $fileIds
|
||||
* @param array $playlistIds
|
||||
*/
|
||||
private function insertAfter($scheduleItems, $schedFiles, $adjustSched = true)
|
||||
private function insertAfter($scheduleItems, $schedFiles, $adjustSched = true, $mediaItems = null)
|
||||
{
|
||||
try {
|
||||
|
||||
$affectedShowInstances = array();
|
||||
|
||||
|
||||
//dont want to recalculate times for moved items.
|
||||
$excludeIds = array();
|
||||
foreach ($schedFiles as $file) {
|
||||
|
@ -384,7 +383,17 @@ class Application_Model_Scheduler
|
|||
|
||||
foreach ($scheduleItems as $schedule) {
|
||||
$id = intval($schedule["id"]);
|
||||
|
||||
|
||||
// if mediaItmes is passed in, we want to create contents
|
||||
// at the time of insert. This is for dyanmic blocks or
|
||||
// playlist that contains dynamic blocks
|
||||
if ($mediaItems != null) {
|
||||
$schedFiles = array();
|
||||
foreach ($mediaItems as $media) {
|
||||
$schedFiles = array_merge($schedFiles, $this->retrieveMediaFiles($media["id"], $media["type"]));
|
||||
}
|
||||
}
|
||||
|
||||
if ($id !== 0) {
|
||||
$schedItem = CcScheduleQuery::create()->findPK($id, $this->con);
|
||||
$instance = $schedItem->getCcShowInstances($this->con);
|
||||
|
@ -527,10 +536,32 @@ class Application_Model_Scheduler
|
|||
|
||||
$this->validateRequest($scheduleItems);
|
||||
|
||||
$requireDynamicContentCreation = false;
|
||||
|
||||
foreach ($mediaItems as $media) {
|
||||
$schedFiles = array_merge($schedFiles, $this->retrieveMediaFiles($media["id"], $media["type"]));
|
||||
if ($media['type'] == "playlist") {
|
||||
$pl = new Application_Model_Playlist($media['id']);
|
||||
if ($pl->hasDynamicBlock()) {
|
||||
$requireDynamicContentCreation = true;
|
||||
break;
|
||||
}
|
||||
} else if ($media['type'] == "block") {
|
||||
$bl = new Application_Model_Block($media['id']);
|
||||
if (!$bl->isStatic()) {
|
||||
$requireDynamicContentCreation = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ($requireDynamicContentCreation) {
|
||||
$this->insertAfter($scheduleItems, $schedFiles, $adjustSched, $mediaItems);
|
||||
} else {
|
||||
foreach ($mediaItems as $media) {
|
||||
$schedFiles = array_merge($schedFiles, $this->retrieveMediaFiles($media["id"], $media["type"]));
|
||||
}
|
||||
$this->insertAfter($scheduleItems, $schedFiles, $adjustSched);
|
||||
}
|
||||
$this->insertAfter($scheduleItems, $schedFiles, $adjustSched);
|
||||
|
||||
$this->con->commit();
|
||||
|
||||
|
|
|
@ -54,7 +54,10 @@ function open_audio_preview(type, id, audioFileTitle, audioFileArtist) {
|
|||
audioFileTitle = audioFileTitle.substring(0,index);
|
||||
}
|
||||
|
||||
openPreviewWindow('audiopreview/audio-preview/audioFileID/'+id+'/audioFileArtist/'+encodeURIComponent(audioFileArtist)+'/audioFileTitle/'+encodeURIComponent(audioFileTitle)+'/type/'+type);
|
||||
// The reason that we need to encode artist and title string is that
|
||||
// sometime they contain '/' or '\' and apache reject %2f or %5f
|
||||
// so the work around is to encode it twice.
|
||||
openPreviewWindow('audiopreview/audio-preview/audioFileID/'+id+'/audioFileArtist/'+encodeURIComponent(encodeURIComponent(audioFileArtist))+'/audioFileTitle/'+encodeURIComponent(encodeURIComponent(audioFileTitle))+'/type/'+type);
|
||||
|
||||
_preview_window.focus();
|
||||
}
|
||||
|
|
|
@ -1,111 +1,143 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import media.monitor.process as md
|
||||
import media.metadata.process as md
|
||||
import re
|
||||
from os.path import normpath
|
||||
from media.monitor.pure import format_length, file_md5
|
||||
from media.monitor.pure import format_length, file_md5, is_airtime_recorded, \
|
||||
no_extension_basename
|
||||
|
||||
with md.metadata('MDATA_KEY_DURATION') as t:
|
||||
t.default(u'0.0')
|
||||
t.depends('length')
|
||||
t.translate(lambda k: format_length(k['length']))
|
||||
defs_loaded = False
|
||||
|
||||
with md.metadata('MDATA_KEY_MIME') as t:
|
||||
t.default(u'')
|
||||
t.depends('mime')
|
||||
t.translate(lambda k: k['mime'].replace('-','/'))
|
||||
def is_defs_loaded():
|
||||
global defs_loaded
|
||||
return defs_loaded
|
||||
|
||||
with md.metadata('MDATA_KEY_BITRATE') as t:
|
||||
t.default(u'')
|
||||
t.depends('bitrate')
|
||||
t.translate(lambda k: k['bitrate'])
|
||||
def load_definitions():
|
||||
with md.metadata('MDATA_KEY_DURATION') as t:
|
||||
t.default(u'0.0')
|
||||
t.depends('length')
|
||||
t.translate(lambda k: format_length(k['length']))
|
||||
|
||||
with md.metadata('MDATA_KEY_SAMPLERATE') as t:
|
||||
t.default(u'0')
|
||||
t.depends('sample_rate')
|
||||
t.translate(lambda k: k['sample_rate'])
|
||||
with md.metadata('MDATA_KEY_MIME') as t:
|
||||
t.default(u'')
|
||||
t.depends('mime')
|
||||
# Is this necessary?
|
||||
t.translate(lambda k: k['mime'].replace('audio/vorbis','audio/ogg'))
|
||||
|
||||
with md.metadata('MDATA_KEY_FTYPE'):
|
||||
t.depends('ftype') # i don't think this field even exists
|
||||
t.default(u'audioclip')
|
||||
t.translate(lambda k: k['ftype']) # but just in case
|
||||
with md.metadata('MDATA_KEY_BITRATE') as t:
|
||||
t.default(u'')
|
||||
t.depends('bitrate')
|
||||
t.translate(lambda k: k['bitrate'])
|
||||
|
||||
with md.metadata("MDATA_KEY_CREATOR") as t:
|
||||
t.depends("artist")
|
||||
# A little kludge to make sure that we have some value for when we parse
|
||||
# MDATA_KEY_TITLE
|
||||
t.default(u"")
|
||||
t.max_length(512)
|
||||
with md.metadata('MDATA_KEY_SAMPLERATE') as t:
|
||||
t.default(u'0')
|
||||
t.depends('sample_rate')
|
||||
t.translate(lambda k: k['sample_rate'])
|
||||
|
||||
with md.metadata("MDATA_KEY_SOURCE") as t:
|
||||
t.depends("album")
|
||||
t.max_length(512)
|
||||
with md.metadata('MDATA_KEY_FTYPE') as t:
|
||||
t.depends('ftype') # i don't think this field even exists
|
||||
t.default(u'audioclip')
|
||||
t.translate(lambda k: k['ftype']) # but just in case
|
||||
|
||||
with md.metadata("MDATA_KEY_GENRE") as t:
|
||||
t.depends("genre")
|
||||
t.max_length(64)
|
||||
with md.metadata("MDATA_KEY_CREATOR") as t:
|
||||
t.depends("artist")
|
||||
# A little kludge to make sure that we have some value for when we parse
|
||||
# MDATA_KEY_TITLE
|
||||
t.default(u"")
|
||||
t.max_length(512)
|
||||
|
||||
with md.metadata("MDATA_KEY_MOOD") as t:
|
||||
t.depends("mood")
|
||||
t.max_length(64)
|
||||
with md.metadata("MDATA_KEY_SOURCE") as t:
|
||||
t.depends("album")
|
||||
t.max_length(512)
|
||||
|
||||
with md.metadata("MDATA_KEY_TRACKNUMBER") as t:
|
||||
t.depends("tracknumber")
|
||||
with md.metadata("MDATA_KEY_GENRE") as t:
|
||||
t.depends("genre")
|
||||
t.max_length(64)
|
||||
|
||||
with md.metadata("MDATA_KEY_BPM") as t:
|
||||
t.depends("bpm")
|
||||
t.max_length(8)
|
||||
with md.metadata("MDATA_KEY_MOOD") as t:
|
||||
t.depends("mood")
|
||||
t.max_length(64)
|
||||
|
||||
with md.metadata("MDATA_KEY_LABEL") as t:
|
||||
t.depends("organization")
|
||||
t.max_length(512)
|
||||
with md.metadata("MDATA_KEY_TRACKNUMBER") as t:
|
||||
t.depends("tracknumber")
|
||||
|
||||
with md.metadata("MDATA_KEY_COMPOSER") as t:
|
||||
t.depends("composer")
|
||||
t.max_length(512)
|
||||
with md.metadata("MDATA_KEY_BPM") as t:
|
||||
t.depends("bpm")
|
||||
t.max_length(8)
|
||||
|
||||
with md.metadata("MDATA_KEY_ENCODER") as t:
|
||||
t.depends("encodedby")
|
||||
t.max_length(512)
|
||||
with md.metadata("MDATA_KEY_LABEL") as t:
|
||||
t.depends("organization")
|
||||
t.max_length(512)
|
||||
|
||||
with md.metadata("MDATA_KEY_CONDUCTOR") as t:
|
||||
t.depends("conductor")
|
||||
t.max_length(512)
|
||||
with md.metadata("MDATA_KEY_COMPOSER") as t:
|
||||
t.depends("composer")
|
||||
t.max_length(512)
|
||||
|
||||
with md.metadata("MDATA_KEY_YEAR") as t:
|
||||
t.depends("date")
|
||||
t.max_length(16)
|
||||
with md.metadata("MDATA_KEY_ENCODER") as t:
|
||||
t.depends("encodedby")
|
||||
t.max_length(512)
|
||||
|
||||
with md.metadata("MDATA_KEY_URL") as t:
|
||||
t.depends("website")
|
||||
with md.metadata("MDATA_KEY_CONDUCTOR") as t:
|
||||
t.depends("conductor")
|
||||
t.max_length(512)
|
||||
|
||||
with md.metadata("MDATA_KEY_ISRC") as t:
|
||||
t.depends("isrc")
|
||||
t.max_length(512)
|
||||
with md.metadata("MDATA_KEY_YEAR") as t:
|
||||
t.depends("date")
|
||||
t.max_length(16)
|
||||
|
||||
with md.metadata("MDATA_KEY_COPYRIGHT") as t:
|
||||
t.depends("copyright")
|
||||
t.max_length(512)
|
||||
with md.metadata("MDATA_KEY_URL") as t:
|
||||
t.depends("website")
|
||||
|
||||
with md.metadata("MDATA_KEY_FILEPATH") as t:
|
||||
t.depends('path')
|
||||
t.translate(lambda k: normpath(k['path']))
|
||||
with md.metadata("MDATA_KEY_ISRC") as t:
|
||||
t.depends("isrc")
|
||||
t.max_length(512)
|
||||
|
||||
with md.metadata("MDATA_KEY_MD5") as t:
|
||||
t.depends('path')
|
||||
t.optional(False)
|
||||
t.translate(lambda k: file_md5(k['path'], max_length=100))
|
||||
with md.metadata("MDATA_KEY_COPYRIGHT") as t:
|
||||
t.depends("copyright")
|
||||
t.max_length(512)
|
||||
|
||||
# owner is handled differently by (by events.py)
|
||||
with md.metadata("MDATA_KEY_ORIGINAL_PATH") as t:
|
||||
t.depends('path')
|
||||
t.translate(lambda k: unicode(normpath(k['path'])))
|
||||
|
||||
with md.metadata('MDATA_KEY_ORIGINAL_PATH') as t:
|
||||
t.depends('original_path')
|
||||
with md.metadata("MDATA_KEY_MD5") as t:
|
||||
t.depends('path')
|
||||
t.optional(False)
|
||||
t.translate(lambda k: file_md5(k['path'], max_length=100))
|
||||
|
||||
# MDATA_KEY_TITLE is the annoying special case
|
||||
with md.metadata('MDATA_KEY_TITLE') as t:
|
||||
# Need to know MDATA_KEY_CREATOR to know if show was recorded. Value is
|
||||
# defaulted to "" from definitions above
|
||||
t.depends('title','MDATA_KEY_CREATOR')
|
||||
t.max_length(512)
|
||||
# owner is handled differently by (by events.py)
|
||||
|
||||
with md.metadata('MDATA_KEY_LABEL') as t:
|
||||
t.depends('label')
|
||||
t.max_length(512)
|
||||
# MDATA_KEY_TITLE is the annoying special case b/c we sometimes read it
|
||||
# from file name
|
||||
|
||||
|
||||
# must handle 3 cases:
|
||||
# 1. regular case (not recorded + title is present)
|
||||
# 2. title is absent (read from file)
|
||||
# 3. recorded file
|
||||
def tr_title(k):
|
||||
#unicode_unknown = u"unknown"
|
||||
new_title = u""
|
||||
if is_airtime_recorded(k) or k['title'] != u"":
|
||||
new_title = k['title']
|
||||
else:
|
||||
default_title = no_extension_basename(k['path'])
|
||||
default_title = re.sub(r'__\d+\.',u'.', default_title)
|
||||
|
||||
# format is: track_number-title-123kbps.mp3
|
||||
m = re.match(".+?-(?P<title>.+)-(\d+kbps|unknown)$", default_title)
|
||||
if m: new_title = m.group('title')
|
||||
else: new_title = re.sub(r'-\d+kbps$', u'', default_title)
|
||||
|
||||
return new_title
|
||||
|
||||
with md.metadata('MDATA_KEY_TITLE') as t:
|
||||
# Need to know MDATA_KEY_CREATOR to know if show was recorded. Value is
|
||||
# defaulted to "" from definitions above
|
||||
t.depends('title','MDATA_KEY_CREATOR','path')
|
||||
t.optional(False)
|
||||
t.translate(tr_title)
|
||||
t.max_length(512)
|
||||
|
||||
with md.metadata('MDATA_KEY_LABEL') as t:
|
||||
t.depends('label')
|
||||
t.max_length(512)
|
||||
|
|
|
@ -1,14 +1,36 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from contextlib import contextmanager
|
||||
from media.monitor.pure import truncate_to_length, toposort
|
||||
from os.path import normpath
|
||||
from media.monitor.exceptions import BadSongFile
|
||||
from media.monitor.log import Loggable
|
||||
import media.monitor.pure as mmp
|
||||
from collections import namedtuple
|
||||
import mutagen
|
||||
|
||||
class FakeMutagen(dict):
|
||||
"""
|
||||
Need this fake mutagen object so that airtime_special functions
|
||||
return a proper default value instead of throwing an exceptions for
|
||||
files that mutagen doesn't recognize
|
||||
"""
|
||||
FakeInfo = namedtuple('FakeInfo','length bitrate')
|
||||
def __init__(self,path):
|
||||
self.path = path
|
||||
self.mime = ['audio/wav']
|
||||
self.info = FakeMutagen.FakeInfo(0.0, '')
|
||||
dict.__init__(self)
|
||||
def set_length(self,l):
|
||||
old_bitrate = self.info.bitrate
|
||||
self.info = FakeMutagen.FakeInfo(l, old_bitrate)
|
||||
|
||||
|
||||
class MetadataAbsent(Exception):
|
||||
def __init__(self, name): self.name = name
|
||||
def __str__(self): return "Could not obtain element '%s'" % self.name
|
||||
|
||||
class MetadataElement(object):
|
||||
class MetadataElement(Loggable):
|
||||
|
||||
def __init__(self,name):
|
||||
self.name = name
|
||||
# "Sane" defaults
|
||||
|
@ -18,6 +40,7 @@ class MetadataElement(object):
|
|||
self.__default = None
|
||||
self.__is_normalized = lambda _ : True
|
||||
self.__max_length = -1
|
||||
self.__translator = None
|
||||
|
||||
def max_length(self,l):
|
||||
self.__max_length = l
|
||||
|
@ -57,31 +80,64 @@ class MetadataElement(object):
|
|||
return self.__path
|
||||
|
||||
def __slice_deps(self, d):
|
||||
"""
|
||||
returns a dictionary of all the key value pairs in d that are also
|
||||
present in self.__deps
|
||||
"""
|
||||
return dict( (k,v) for k,v in d.iteritems() if k in self.__deps)
|
||||
|
||||
def __str__(self):
|
||||
return "%s(%s)" % (self.name, ' '.join(list(self.__deps)))
|
||||
|
||||
def read_value(self, path, original, running={}):
|
||||
# If value is present and normalized then we don't touch it
|
||||
|
||||
# If value is present and normalized then we only check if it's
|
||||
# normalized or not. We normalize if it's not normalized already
|
||||
|
||||
|
||||
if self.name in original:
|
||||
v = original[self.name]
|
||||
if self.__is_normalized(v): return v
|
||||
else: return self.__normalizer(v)
|
||||
|
||||
# A dictionary slice with all the dependencies and their values
|
||||
# We slice out only the dependencies that are required for the metadata
|
||||
# element.
|
||||
dep_slice_orig = self.__slice_deps(original)
|
||||
dep_slice_running = self.__slice_deps(running)
|
||||
# TODO : remove this later
|
||||
dep_slice_special = self.__slice_deps({'path' : path})
|
||||
# We combine all required dependencies into a single dictionary
|
||||
# that we will pass to the translator
|
||||
full_deps = dict( dep_slice_orig.items()
|
||||
+ dep_slice_running.items() )
|
||||
+ dep_slice_running.items()
|
||||
+ dep_slice_special.items())
|
||||
|
||||
# check if any dependencies are absent
|
||||
if len(full_deps) != len(self.__deps) or len(self.__deps) == 0:
|
||||
# note: there is no point checking the case that len(full_deps) >
|
||||
# len(self.__deps) because we make sure to "slice out" any supefluous
|
||||
# dependencies above.
|
||||
if len(full_deps) != len(self.dependencies()) or \
|
||||
len(self.dependencies()) == 0:
|
||||
# If we have a default value then use that. Otherwise throw an
|
||||
# exception
|
||||
if self.has_default(): return self.get_default()
|
||||
else: raise MetadataAbsent(self.name)
|
||||
|
||||
# We have all dependencies. Now for actual for parsing
|
||||
def def_translate(dep):
|
||||
def wrap(k):
|
||||
e = [ x for x in dep ][0]
|
||||
return k[e]
|
||||
return wrap
|
||||
|
||||
# Only case where we can select a default translator
|
||||
if self.__translator is None:
|
||||
self.translate(def_translate(self.dependencies()))
|
||||
if len(self.dependencies()) > 2: # dependencies include themselves
|
||||
self.logger.info("Ignoring some dependencies in translate %s"
|
||||
% self.name)
|
||||
self.logger.info(self.dependencies())
|
||||
|
||||
r = self.__normalizer( self.__translator(full_deps) )
|
||||
if self.__max_length != -1:
|
||||
r = truncate_to_length(r, self.__max_length)
|
||||
|
@ -92,24 +148,40 @@ def normalize_mutagen(path):
|
|||
Consumes a path and reads the metadata using mutagen. normalizes some of
|
||||
the metadata that isn't read through the mutagen hash
|
||||
"""
|
||||
m = mutagen.File(path, easy=True)
|
||||
if not mmp.file_playable(path): raise BadSongFile(path)
|
||||
try : m = mutagen.File(path, easy=True)
|
||||
except Exception : raise BadSongFile(path)
|
||||
if m is None: m = FakeMutagen(path)
|
||||
try:
|
||||
if mmp.extension(path) == 'wav':
|
||||
m.set_length(mmp.read_wave_duration(path))
|
||||
except Exception: raise BadSongFile(path)
|
||||
md = {}
|
||||
for k,v in m.iteritems():
|
||||
if type(v) is list: md[k] = v[0]
|
||||
if type(v) is list:
|
||||
if len(v) > 0: md[k] = v[0]
|
||||
else: md[k] = v
|
||||
# populate special metadata values
|
||||
md['length'] = getattr(m.info, u'length', 0.0)
|
||||
md['length'] = getattr(m.info, 'length', 0.0)
|
||||
md['bitrate'] = getattr(m.info, 'bitrate', u'')
|
||||
md['sample_rate'] = getattr(m.info, 'sample_rate', 0)
|
||||
md['mime'] = m.mime[0] if len(m.mime) > 0 else u''
|
||||
md['path'] = path
|
||||
md['path'] = normpath(path)
|
||||
if 'title' not in md: md['title'] = u''
|
||||
return md
|
||||
|
||||
|
||||
class OverwriteMetadataElement(Exception):
|
||||
def __init__(self, m): self.m = m
|
||||
def __str__(self): return "Trying to overwrite: %s" % self.m
|
||||
|
||||
class MetadataReader(object):
|
||||
def __init__(self):
|
||||
self.clear()
|
||||
|
||||
def register_metadata(self,m):
|
||||
if m in self.__mdata_name_map:
|
||||
raise OverwriteMetadataElement(m)
|
||||
self.__mdata_name_map[m.name] = m
|
||||
d = dict( (name,m.dependencies()) for name,m in
|
||||
self.__mdata_name_map.iteritems() )
|
||||
|
@ -131,6 +203,9 @@ class MetadataReader(object):
|
|||
if not mdata.is_optional(): raise
|
||||
return normalized_metadata
|
||||
|
||||
def read_mutagen(self, path):
|
||||
return self.read(path, normalize_mutagen(path))
|
||||
|
||||
global_reader = MetadataReader()
|
||||
|
||||
@contextmanager
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import pyinotify
|
||||
from pydispatch import dispatcher
|
||||
from functools import wraps
|
||||
|
||||
import media.monitor.pure as mmp
|
||||
from media.monitor.pure import IncludeOnly
|
||||
|
@ -31,6 +32,7 @@ class FileMediator(object):
|
|||
def unignore(path): FileMediator.ignored_set.remove(path)
|
||||
|
||||
def mediate_ignored(fn):
|
||||
@wraps(fn)
|
||||
def wrapped(self, event, *args,**kwargs):
|
||||
event.pathname = unicode(event.pathname, "utf-8")
|
||||
if FileMediator.is_ignored(event.pathname):
|
||||
|
|
|
@ -6,37 +6,30 @@ from media.monitor.pure import LazyProperty
|
|||
appname = 'root'
|
||||
|
||||
def setup_logging(log_path):
|
||||
"""
|
||||
Setup logging by writing log to 'log_path'
|
||||
"""
|
||||
""" Setup logging by writing log to 'log_path' """
|
||||
#logger = logging.getLogger(appname)
|
||||
logging.basicConfig(filename=log_path, level=logging.DEBUG)
|
||||
|
||||
def get_logger():
|
||||
"""
|
||||
in case we want to use the common logger from a procedural interface
|
||||
"""
|
||||
""" in case we want to use the common logger from a procedural
|
||||
interface """
|
||||
return logging.getLogger()
|
||||
|
||||
class Loggable(object):
|
||||
"""
|
||||
Any class that wants to log can inherit from this class and automatically
|
||||
get a logger attribute that can be used like: self.logger.info(...) etc.
|
||||
"""
|
||||
""" Any class that wants to log can inherit from this class and
|
||||
automatically get a logger attribute that can be used like:
|
||||
self.logger.info(...) etc. """
|
||||
__metaclass__ = abc.ABCMeta
|
||||
@LazyProperty
|
||||
def logger(self): return get_logger()
|
||||
|
||||
def unexpected_exception(self,e):
|
||||
"""
|
||||
Default message for 'unexpected' exceptions
|
||||
"""
|
||||
""" Default message for 'unexpected' exceptions """
|
||||
self.fatal_exception("'Unexpected' exception has occured:", e)
|
||||
|
||||
def fatal_exception(self, message, e):
|
||||
"""
|
||||
Prints an exception 'e' with 'message'. Also outputs the traceback.
|
||||
"""
|
||||
""" Prints an exception 'e' with 'message'. Also outputs the
|
||||
traceback. """
|
||||
self.logger.error( message )
|
||||
self.logger.error( str(e) )
|
||||
self.logger.error( traceback.format_exc() )
|
||||
|
|
|
@ -2,15 +2,19 @@
|
|||
import mutagen
|
||||
import os
|
||||
import copy
|
||||
from collections import namedtuple
|
||||
from mutagen.easymp4 import EasyMP4KeyError
|
||||
from mutagen.easyid3 import EasyID3KeyError
|
||||
|
||||
from media.monitor.exceptions import BadSongFile, InvalidMetadataElement
|
||||
from media.monitor.log import Loggable
|
||||
from media.monitor.pure import format_length, truncate_to_length
|
||||
from media.monitor.pure import format_length
|
||||
import media.monitor.pure as mmp
|
||||
|
||||
# emf related stuff
|
||||
from media.metadata.process import global_reader
|
||||
import media.metadata.definitions as defs
|
||||
defs.load_definitions()
|
||||
|
||||
"""
|
||||
list of supported easy tags in mutagen version 1.20
|
||||
['albumartistsort', 'musicbrainz_albumstatus', 'lyricist', 'releasecountry',
|
||||
|
@ -43,21 +47,6 @@ airtime2mutagen = {
|
|||
"MDATA_KEY_COPYRIGHT" : "copyright",
|
||||
}
|
||||
|
||||
class FakeMutagen(dict):
|
||||
"""
|
||||
Need this fake mutagen object so that airtime_special functions
|
||||
return a proper default value instead of throwing an exceptions for
|
||||
files that mutagen doesn't recognize
|
||||
"""
|
||||
FakeInfo = namedtuple('FakeInfo','length bitrate')
|
||||
def __init__(self,path):
|
||||
self.path = path
|
||||
self.mime = ['audio/wav']
|
||||
self.info = FakeMutagen.FakeInfo(0.0, '')
|
||||
dict.__init__(self)
|
||||
def set_length(self,l):
|
||||
old_bitrate = self.info.bitrate
|
||||
self.info = FakeMutagen.FakeInfo(l, old_bitrate)
|
||||
|
||||
# Some airtime attributes are special because they must use the mutagen object
|
||||
# itself to calculate the value that they need. The lambda associated with each
|
||||
|
@ -100,6 +89,7 @@ class Metadata(Loggable):
|
|||
# little bit messy. Some of the handling is in m.m.pure while the rest is
|
||||
# here. Also interface is not very consistent
|
||||
|
||||
# TODO : what is this shit? maybe get rid of it?
|
||||
@staticmethod
|
||||
def fix_title(path):
|
||||
# If we have no title in path we will format it
|
||||
|
@ -110,39 +100,6 @@ class Metadata(Loggable):
|
|||
m[u'title'] = new_title
|
||||
m.save()
|
||||
|
||||
@staticmethod
|
||||
def airtime_dict(d):
|
||||
"""
|
||||
Converts mutagen dictionary 'd' into airtime dictionary
|
||||
"""
|
||||
temp_dict = {}
|
||||
for m_key, m_val in d.iteritems():
|
||||
# TODO : some files have multiple fields for the same metadata.
|
||||
# genre is one example. In that case mutagen will return a list
|
||||
# of values
|
||||
|
||||
if isinstance(m_val, list):
|
||||
# TODO : does it make more sense to just skip the element in
|
||||
# this case?
|
||||
if len(m_val) == 0: assign_val = ''
|
||||
else: assign_val = m_val[0]
|
||||
else: assign_val = m_val
|
||||
|
||||
temp_dict[ m_key ] = assign_val
|
||||
airtime_dictionary = {}
|
||||
for muta_k, muta_v in temp_dict.iteritems():
|
||||
# We must check if we can actually translate the mutagen key into
|
||||
# an airtime key before doing the conversion
|
||||
if muta_k in mutagen2airtime:
|
||||
airtime_key = mutagen2airtime[muta_k]
|
||||
# Apply truncation in the case where airtime_key is in our
|
||||
# truncation table
|
||||
muta_v = \
|
||||
truncate_to_length(muta_v, truncate_table[airtime_key])\
|
||||
if airtime_key in truncate_table else muta_v
|
||||
airtime_dictionary[ airtime_key ] = muta_v
|
||||
return airtime_dictionary
|
||||
|
||||
@staticmethod
|
||||
def write_unsafe(path,md):
|
||||
"""
|
||||
|
@ -157,6 +114,7 @@ class Metadata(Loggable):
|
|||
if airtime_k in airtime2mutagen:
|
||||
# The unicode cast here is mostly for integers that need to be
|
||||
# strings
|
||||
if airtime_v is None: continue
|
||||
try:
|
||||
song_file[ airtime2mutagen[airtime_k] ] = unicode(airtime_v)
|
||||
except (EasyMP4KeyError, EasyID3KeyError) as e:
|
||||
|
@ -170,44 +128,7 @@ class Metadata(Loggable):
|
|||
# Forcing the unicode through
|
||||
try : fpath = fpath.decode("utf-8")
|
||||
except : pass
|
||||
|
||||
if not mmp.file_playable(fpath): raise BadSongFile(fpath)
|
||||
|
||||
try : full_mutagen = mutagen.File(fpath, easy=True)
|
||||
except Exception : raise BadSongFile(fpath)
|
||||
|
||||
self.path = fpath
|
||||
if not os.path.exists(self.path):
|
||||
self.logger.info("Attempting to read metadata of file \
|
||||
that does not exist. Setting metadata to {}")
|
||||
self.__metadata = {}
|
||||
return
|
||||
# TODO : Simplify the way all of these rules are handled right now it's
|
||||
# extremely unclear and needs to be refactored.
|
||||
#if full_mutagen is None: raise BadSongFile(fpath)
|
||||
if full_mutagen is None: full_mutagen = FakeMutagen(fpath)
|
||||
self.__metadata = Metadata.airtime_dict(full_mutagen)
|
||||
# Now we extra the special values that are calculated from the mutagen
|
||||
# object itself:
|
||||
|
||||
if mmp.extension(fpath) == 'wav':
|
||||
full_mutagen.set_length(mmp.read_wave_duration(fpath))
|
||||
|
||||
for special_key,f in airtime_special.iteritems():
|
||||
try:
|
||||
new_val = f(full_mutagen)
|
||||
if new_val is not None:
|
||||
self.__metadata[special_key] = new_val
|
||||
except Exception as e:
|
||||
self.logger.info("Could not get special key %s for %s" %
|
||||
(special_key, fpath))
|
||||
self.logger.info(str(e))
|
||||
# Finally, we "normalize" all the metadata here:
|
||||
self.__metadata = mmp.normalized_metadata(self.__metadata, fpath)
|
||||
# Now we must load the md5:
|
||||
# TODO : perhaps we shouldn't hard code how many bytes we're reading
|
||||
# from the file?
|
||||
self.__metadata['MDATA_KEY_MD5'] = mmp.file_md5(fpath,max_length=100)
|
||||
self.__metadata = global_reader.read_mutagen(fpath)
|
||||
|
||||
def is_recorded(self):
|
||||
"""
|
||||
|
|
|
@ -10,14 +10,12 @@ from os.path import dirname
|
|||
import os.path
|
||||
|
||||
class Organizer(ReportHandler,Loggable):
|
||||
"""
|
||||
Organizer is responsible to to listening to OrganizeListener events
|
||||
and committing the appropriate changes to the filesystem. It does
|
||||
not in any interact with WatchSyncer's even when the the WatchSyncer
|
||||
is a "storage directory". The "storage" directory picks up all of
|
||||
its events through pyinotify. (These events are fed to it through
|
||||
StoreWatchListener)
|
||||
"""
|
||||
""" Organizer is responsible to to listening to OrganizeListener
|
||||
events and committing the appropriate changes to the filesystem.
|
||||
It does not in any interact with WatchSyncer's even when the the
|
||||
WatchSyncer is a "storage directory". The "storage" directory picks
|
||||
up all of its events through pyinotify. (These events are fed to it
|
||||
through StoreWatchListener) """
|
||||
|
||||
# Commented out making this class a singleton because it's just a band aid
|
||||
# for the real issue. The real issue being making multiple Organizer
|
||||
|
@ -41,11 +39,9 @@ class Organizer(ReportHandler,Loggable):
|
|||
super(Organizer, self).__init__(signal=self.channel, weak=False)
|
||||
|
||||
def handle(self, sender, event):
|
||||
"""
|
||||
Intercept events where a new file has been added to the organize
|
||||
directory and place it in the correct path (starting with
|
||||
self.target_path)
|
||||
"""
|
||||
""" Intercept events where a new file has been added to the
|
||||
organize directory and place it in the correct path (starting
|
||||
with self.target_path) """
|
||||
# Only handle this event type
|
||||
assert isinstance(event, OrganizeFile), \
|
||||
"Organizer can only handle OrganizeFile events.Given '%s'" % event
|
||||
|
|
|
@ -22,7 +22,6 @@ from configobj import ConfigObj
|
|||
|
||||
from media.monitor.exceptions import FailedToSetLocale, FailedToCreateDir
|
||||
|
||||
#supported_extensions = [u"mp3", u"ogg", u"oga"]
|
||||
supported_extensions = [u"mp3", u"ogg", u"oga", u"flac", u"wav",
|
||||
u'm4a', u'mp4']
|
||||
|
||||
|
@ -67,7 +66,6 @@ class IncludeOnly(object):
|
|||
return func(moi, event, *args, **kwargs)
|
||||
return _wrap
|
||||
|
||||
|
||||
def partition(f, alist):
|
||||
"""
|
||||
Partition is very similar to filter except that it also returns the
|
||||
|
@ -93,14 +91,13 @@ def is_file_supported(path):
|
|||
# TODO : In the future we would like a better way to find out whether a show
|
||||
# has been recorded
|
||||
def is_airtime_recorded(md):
|
||||
"""
|
||||
Takes a metadata dictionary and returns True if it belongs to a file that
|
||||
was recorded by Airtime.
|
||||
"""
|
||||
""" Takes a metadata dictionary and returns True if it belongs to a
|
||||
file that was recorded by Airtime. """
|
||||
if not 'MDATA_KEY_CREATOR' in md: return False
|
||||
return md['MDATA_KEY_CREATOR'] == u'Airtime Show Recorder'
|
||||
|
||||
def read_wave_duration(path):
|
||||
""" Read the length of .wav file (mutagen does not handle this) """
|
||||
with contextlib.closing(wave.open(path,'r')) as f:
|
||||
frames = f.getnframes()
|
||||
rate = f.getframerate()
|
||||
|
@ -108,9 +105,7 @@ def read_wave_duration(path):
|
|||
return duration
|
||||
|
||||
def clean_empty_dirs(path):
|
||||
"""
|
||||
walks path and deletes every empty directory it finds
|
||||
"""
|
||||
""" walks path and deletes every empty directory it finds """
|
||||
# TODO : test this function
|
||||
if path.endswith('/'): clean_empty_dirs(path[0:-1])
|
||||
else:
|
||||
|
@ -155,11 +150,10 @@ def no_extension_basename(path):
|
|||
else: return '.'.join(base.split(".")[0:-1])
|
||||
|
||||
def walk_supported(directory, clean_empties=False):
|
||||
"""
|
||||
A small generator wrapper around os.walk to only give us files that support
|
||||
the extensions we are considering. When clean_empties is True we
|
||||
recursively delete empty directories left over in directory after the walk.
|
||||
"""
|
||||
""" A small generator wrapper around os.walk to only give us files
|
||||
that support the extensions we are considering. When clean_empties
|
||||
is True we recursively delete empty directories left over in
|
||||
directory after the walk. """
|
||||
for root, dirs, files in os.walk(directory):
|
||||
full_paths = ( os.path.join(root, name) for name in files
|
||||
if is_file_supported(name) )
|
||||
|
@ -173,10 +167,8 @@ def file_locked(path):
|
|||
return bool(f.readlines())
|
||||
|
||||
def magic_move(old, new, after_dir_make=lambda : None):
|
||||
"""
|
||||
Moves path old to new and constructs the necessary to directories for new
|
||||
along the way
|
||||
"""
|
||||
""" Moves path old to new and constructs the necessary to
|
||||
directories for new along the way """
|
||||
new_dir = os.path.dirname(new)
|
||||
if not os.path.exists(new_dir): os.makedirs(new_dir)
|
||||
# We need this crusty hack because anytime a directory is created we must
|
||||
|
@ -186,18 +178,15 @@ def magic_move(old, new, after_dir_make=lambda : None):
|
|||
shutil.move(old,new)
|
||||
|
||||
def move_to_dir(dir_path,file_path):
|
||||
"""
|
||||
moves a file at file_path into dir_path/basename(filename)
|
||||
"""
|
||||
""" moves a file at file_path into dir_path/basename(filename) """
|
||||
bs = os.path.basename(file_path)
|
||||
magic_move(file_path, os.path.join(dir_path, bs))
|
||||
|
||||
def apply_rules_dict(d, rules):
|
||||
"""
|
||||
Consumes a dictionary of rules that maps some keys to lambdas which it
|
||||
applies to every matching element in d and returns a new dictionary with
|
||||
the rules applied. If a rule returns none then it's not applied
|
||||
"""
|
||||
""" Consumes a dictionary of rules that maps some keys to lambdas
|
||||
which it applies to every matching element in d and returns a new
|
||||
dictionary with the rules applied. If a rule returns none then it's
|
||||
not applied """
|
||||
new_d = copy.deepcopy(d)
|
||||
for k, rule in rules.iteritems():
|
||||
if k in d:
|
||||
|
@ -212,17 +201,14 @@ def default_to_f(dictionary, keys, default, condition):
|
|||
return new_d
|
||||
|
||||
def default_to(dictionary, keys, default):
|
||||
"""
|
||||
Checks if the list of keys 'keys' exists in 'dictionary'. If not then it
|
||||
returns a new dictionary with all those missing keys defaults to 'default'
|
||||
"""
|
||||
""" Checks if the list of keys 'keys' exists in 'dictionary'. If
|
||||
not then it returns a new dictionary with all those missing keys
|
||||
defaults to 'default' """
|
||||
cnd = lambda dictionary, key: key not in dictionary
|
||||
return default_to_f(dictionary, keys, default, cnd)
|
||||
|
||||
def remove_whitespace(dictionary):
|
||||
"""
|
||||
Remove values that empty whitespace in the dictionary
|
||||
"""
|
||||
""" Remove values that empty whitespace in the dictionary """
|
||||
nd = copy.deepcopy(dictionary)
|
||||
bad_keys = []
|
||||
for k,v in nd.iteritems():
|
||||
|
@ -234,6 +220,7 @@ def remove_whitespace(dictionary):
|
|||
return nd
|
||||
|
||||
def parse_int(s):
|
||||
# TODO : this function isn't used anywhere yet but it may useful for emf
|
||||
"""
|
||||
Tries very hard to get some sort of integer result from s. Defaults to 0
|
||||
when it fails
|
||||
|
@ -249,53 +236,6 @@ def parse_int(s):
|
|||
try : return str(reduce(op.add, takewhile(lambda x: x.isdigit(), s)))
|
||||
except: return None
|
||||
|
||||
def normalized_metadata(md, original_path):
|
||||
"""
|
||||
consumes a dictionary of metadata and returns a new dictionary with the
|
||||
formatted meta data. We also consume original_path because we must set
|
||||
MDATA_KEY_CREATOR based on in it sometimes
|
||||
"""
|
||||
new_md = copy.deepcopy(md)
|
||||
# replace all slashes with dashes
|
||||
#for k,v in new_md.iteritems(): new_md[k] = unicode(v).replace('/','-')
|
||||
# Specific rules that are applied in a per attribute basis
|
||||
format_rules = {
|
||||
'MDATA_KEY_TRACKNUMBER' : parse_int,
|
||||
'MDATA_KEY_FILEPATH' : lambda x: os.path.normpath(x),
|
||||
'MDATA_KEY_BPM' : lambda x: x[0:8],
|
||||
'MDATA_KEY_MIME' : lambda x: x.replace('audio/vorbis','audio/ogg'),
|
||||
# Whenever 0 is reported we change it to empty
|
||||
#'MDATA_KEY_BITRATE' : lambda x: '' if str(x) == '0' else x
|
||||
}
|
||||
|
||||
new_md = remove_whitespace(new_md) # remove whitespace fields
|
||||
# Format all the fields in format_rules
|
||||
new_md = apply_rules_dict(new_md, format_rules)
|
||||
# set filetype to audioclip by default
|
||||
new_md = default_to(dictionary=new_md, keys=['MDATA_KEY_FTYPE'],
|
||||
default=u'audioclip')
|
||||
|
||||
# Try to parse bpm but delete the whole key if that fails
|
||||
if 'MDATA_KEY_BPM' in new_md:
|
||||
new_md['MDATA_KEY_BPM'] = parse_int(new_md['MDATA_KEY_BPM'])
|
||||
if new_md['MDATA_KEY_BPM'] is None:
|
||||
del new_md['MDATA_KEY_BPM']
|
||||
|
||||
if not is_airtime_recorded(new_md):
|
||||
# Read title from filename if it does not exist
|
||||
default_title = no_extension_basename(original_path)
|
||||
default_title = re.sub(r'__\d+\.',u'.', default_title)
|
||||
if re.match(".+-%s-.+$" % unicode_unknown, default_title):
|
||||
default_title = u''
|
||||
new_md = default_to(dictionary=new_md, keys=['MDATA_KEY_TITLE'],
|
||||
default=default_title)
|
||||
new_md['MDATA_KEY_TITLE'] = re.sub(r'-\d+kbps$', u'',
|
||||
new_md['MDATA_KEY_TITLE'])
|
||||
|
||||
# TODO : wtf is this for again?
|
||||
new_md['MDATA_KEY_TITLE'] = re.sub(r'-?%s-?' % unicode_unknown, u'',
|
||||
new_md['MDATA_KEY_TITLE'])
|
||||
return new_md
|
||||
|
||||
def organized_path(old_path, root_path, orig_md):
|
||||
"""
|
||||
|
@ -355,10 +295,9 @@ def organized_path(old_path, root_path, orig_md):
|
|||
# TODO : Get rid of this function and every one of its uses. We no longer use
|
||||
# the md5 signature of a song for anything
|
||||
def file_md5(path,max_length=100):
|
||||
"""
|
||||
Get md5 of file path (if it exists). Use only max_length characters to save
|
||||
time and memory. Pass max_length=-1 to read the whole file (like in mm1)
|
||||
"""
|
||||
""" Get md5 of file path (if it exists). Use only max_length
|
||||
characters to save time and memory. Pass max_length=-1 to read the
|
||||
whole file (like in mm1) """
|
||||
if os.path.exists(path):
|
||||
with open(path, 'rb') as f:
|
||||
m = hashlib.md5()
|
||||
|
@ -374,16 +313,12 @@ def encode_to(obj, encoding='utf-8'):
|
|||
return obj
|
||||
|
||||
def convert_dict_value_to_utf8(md):
|
||||
"""
|
||||
formats a dictionary to send as a request to api client
|
||||
"""
|
||||
""" formats a dictionary to send as a request to api client """
|
||||
return dict([(item[0], encode_to(item[1], "utf-8")) for item in md.items()])
|
||||
|
||||
def get_system_locale(locale_path='/etc/default/locale'):
|
||||
"""
|
||||
Returns the configuration object for the system's default locale. Normally
|
||||
requires root access.
|
||||
"""
|
||||
""" Returns the configuration object for the system's default
|
||||
locale. Normally requires root access. """
|
||||
if os.path.exists(locale_path):
|
||||
try:
|
||||
config = ConfigObj(locale_path)
|
||||
|
@ -393,9 +328,7 @@ def get_system_locale(locale_path='/etc/default/locale'):
|
|||
permissions issue?" % locale_path)
|
||||
|
||||
def configure_locale(config):
|
||||
"""
|
||||
sets the locale according to the system's locale.
|
||||
"""
|
||||
""" sets the locale according to the system's locale. """
|
||||
current_locale = locale.getlocale()
|
||||
if current_locale[1] is None:
|
||||
default_locale = locale.getdefaultlocale()
|
||||
|
@ -412,27 +345,21 @@ def configure_locale(config):
|
|||
|
||||
def fondle(path,times=None):
|
||||
# TODO : write unit tests for this
|
||||
"""
|
||||
touch a file to change the last modified date. Beware of calling this
|
||||
function on the same file from multiple threads.
|
||||
"""
|
||||
""" touch a file to change the last modified date. Beware of calling
|
||||
this function on the same file from multiple threads. """
|
||||
with file(path, 'a'): os.utime(path, times)
|
||||
|
||||
def last_modified(path):
|
||||
"""
|
||||
return the time of the last time mm2 was ran. path refers to the index file
|
||||
whose date modified attribute contains this information. In the case when
|
||||
the file does not exist we set this time 0 so that any files on the
|
||||
filesystem were modified after it
|
||||
"""
|
||||
""" return the time of the last time mm2 was ran. path refers to the
|
||||
index file whose date modified attribute contains this information.
|
||||
In the case when the file does not exist we set this time 0 so that
|
||||
any files on the filesystem were modified after it """
|
||||
if os.path.exists(path): return os.path.getmtime(path)
|
||||
else: return 0
|
||||
|
||||
def expand_storage(store):
|
||||
"""
|
||||
A storage directory usually consists of 4 different subdirectories. This
|
||||
function returns their paths
|
||||
"""
|
||||
""" A storage directory usually consists of 4 different
|
||||
subdirectories. This function returns their paths """
|
||||
store = os.path.normpath(store)
|
||||
return {
|
||||
'organize' : os.path.join(store, 'organize'),
|
||||
|
@ -442,10 +369,8 @@ def expand_storage(store):
|
|||
}
|
||||
|
||||
def create_dir(path):
|
||||
"""
|
||||
will try and make sure that path exists at all costs. raises an exception
|
||||
if it fails at this task.
|
||||
"""
|
||||
""" will try and make sure that path exists at all costs. raises an
|
||||
exception if it fails at this task. """
|
||||
if not os.path.exists(path):
|
||||
try : os.makedirs(path)
|
||||
except Exception as e : raise FailedToCreateDir(path, e)
|
||||
|
@ -463,11 +388,10 @@ def sub_path(directory,f):
|
|||
return common == normalized
|
||||
|
||||
def owner_id(original_path):
|
||||
"""
|
||||
Given 'original_path' return the file name of the of 'identifier' file.
|
||||
return the id that is contained in it. If no file is found or nothing is
|
||||
read then -1 is returned. File is deleted after the number has been read
|
||||
"""
|
||||
""" Given 'original_path' return the file name of the of
|
||||
'identifier' file. return the id that is contained in it. If no file
|
||||
is found or nothing is read then -1 is returned. File is deleted
|
||||
after the number has been read """
|
||||
fname = "%s.identifier" % original_path
|
||||
owner_id = -1
|
||||
try:
|
||||
|
@ -483,9 +407,8 @@ def owner_id(original_path):
|
|||
return owner_id
|
||||
|
||||
def file_playable(pathname):
|
||||
"""
|
||||
Returns True if 'pathname' is playable by liquidsoap. False otherwise.
|
||||
"""
|
||||
""" Returns True if 'pathname' is playable by liquidsoap. False
|
||||
otherwise. """
|
||||
# when there is an single apostrophe inside of a string quoted by
|
||||
# apostrophes, we can only escape it by replace that apostrophe with
|
||||
# '\''. This breaks the string into two, and inserts an escaped
|
||||
|
@ -521,18 +444,14 @@ def toposort(data):
|
|||
assert not data, "A cyclic dependency exists amongst %r" % data
|
||||
|
||||
def truncate_to_length(item, length):
|
||||
"""
|
||||
Truncates 'item' to 'length'
|
||||
"""
|
||||
""" Truncates 'item' to 'length' """
|
||||
if isinstance(item, int): item = str(item)
|
||||
if isinstance(item, basestring):
|
||||
if len(item) > length: return item[0:length]
|
||||
else: return item
|
||||
|
||||
def format_length(mutagen_length):
|
||||
"""
|
||||
Convert mutagen length to airtime length
|
||||
"""
|
||||
""" Convert mutagen length to airtime length """
|
||||
t = float(mutagen_length)
|
||||
h = int(math.floor(t / 3600))
|
||||
t = t % 3600
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import threading
|
||||
|
||||
from media.monitor.exceptions import BadSongFile
|
||||
from media.monitor.log import Loggable
|
||||
import api_clients.api_client as ac
|
||||
|
||||
class ThreadedRequestSync(threading.Thread, Loggable):
|
||||
def __init__(self, rs):
|
||||
threading.Thread.__init__(self)
|
||||
self.rs = rs
|
||||
self.daemon = True
|
||||
self.start()
|
||||
|
||||
def run(self):
|
||||
self.rs.run_request()
|
||||
|
||||
class RequestSync(Loggable):
|
||||
""" This class is responsible for making the api call to send a
|
||||
request to airtime. In the process it packs the requests and retries
|
||||
for some number of times """
|
||||
@classmethod
|
||||
def create_with_api_client(cls, watcher, requests):
|
||||
apiclient = ac.AirtimeApiClient.create_right_config()
|
||||
self = cls(watcher, requests, apiclient)
|
||||
return self
|
||||
|
||||
def __init__(self, watcher, requests, apiclient):
|
||||
self.watcher = watcher
|
||||
self.requests = requests
|
||||
self.apiclient = apiclient
|
||||
|
||||
def run_request(self):
|
||||
self.logger.info("Attempting request with %d items." %
|
||||
len(self.requests))
|
||||
packed_requests = []
|
||||
for request_event in self.requests:
|
||||
try:
|
||||
for request in request_event.safe_pack():
|
||||
if isinstance(request, BadSongFile):
|
||||
self.logger.info("Bad song file: '%s'" % request.path)
|
||||
else: packed_requests.append(request)
|
||||
except Exception as e:
|
||||
self.unexpected_exception( e )
|
||||
if hasattr(request_event, 'path'):
|
||||
self.logger.info("Possibly related to path: '%s'" %
|
||||
request_event.path)
|
||||
try: self.apiclient.send_media_monitor_requests( packed_requests )
|
||||
# most likely we did not get json response as we expected
|
||||
except ValueError:
|
||||
self.logger.info("ApiController.php probably crashed, we \
|
||||
diagnose this from the fact that it did not return \
|
||||
valid json")
|
||||
self.logger.info("Trying again after %f seconds" %
|
||||
self.request_wait)
|
||||
except Exception as e: self.unexpected_exception(e)
|
||||
else: self.logger.info("Request was successful")
|
||||
self.watcher.flag_done() # poor man's condition variable
|
||||
|
|
@ -53,11 +53,11 @@ class AirtimeDB(Loggable):
|
|||
"""
|
||||
return self.id_to_dir[ dir_id ]
|
||||
|
||||
def storage_path(self): return self.base_storage
|
||||
def organize_path(self): return self.storage_paths['organize']
|
||||
def problem_path(self): return self.storage_paths['problem_files']
|
||||
def import_path(self): return self.storage_paths['imported']
|
||||
def recorded_path(self): return self.storage_paths['recorded']
|
||||
def storage_path(self) : return self.base_storage
|
||||
def organize_path(self) : return self.storage_paths['organize']
|
||||
def problem_path(self) : return self.storage_paths['problem_files']
|
||||
def import_path(self) : return self.storage_paths['imported']
|
||||
def recorded_path(self) : return self.storage_paths['recorded']
|
||||
|
||||
def list_watched(self):
|
||||
"""
|
||||
|
|
|
@ -6,69 +6,9 @@ import copy
|
|||
from media.monitor.handler import ReportHandler
|
||||
from media.monitor.log import Loggable
|
||||
from media.monitor.exceptions import BadSongFile
|
||||
from media.monitor.pure import LazyProperty
|
||||
from media.monitor.eventcontractor import EventContractor
|
||||
from media.monitor.events import EventProxy
|
||||
|
||||
import api_clients.api_client as ac
|
||||
|
||||
class RequestSync(threading.Thread,Loggable):
|
||||
"""
|
||||
This class is responsible for making the api call to send a request
|
||||
to airtime. In the process it packs the requests and retries for
|
||||
some number of times
|
||||
"""
|
||||
def __init__(self, watcher, requests):
|
||||
threading.Thread.__init__(self)
|
||||
self.watcher = watcher
|
||||
self.requests = requests
|
||||
self.retries = 1
|
||||
self.request_wait = 0.3
|
||||
|
||||
@LazyProperty
|
||||
def apiclient(self):
|
||||
return ac.AirtimeApiClient.create_right_config()
|
||||
|
||||
def run(self):
|
||||
self.logger.info("Attempting request with %d items." %
|
||||
len(self.requests))
|
||||
# Note that we must attach the appropriate mode to every
|
||||
# response. Also Not forget to attach the 'is_record' to any
|
||||
# requests that are related to recorded shows
|
||||
# TODO : recorded shows aren't flagged right
|
||||
# Is this retry shit even necessary? Consider getting rid of this.
|
||||
packed_requests = []
|
||||
for request_event in self.requests:
|
||||
try:
|
||||
for request in request_event.safe_pack():
|
||||
if isinstance(request, BadSongFile):
|
||||
self.logger.info("Bad song file: '%s'" % request.path)
|
||||
else: packed_requests.append(request)
|
||||
except Exception as e:
|
||||
self.unexpected_exception( e )
|
||||
if hasattr(request_event, 'path'):
|
||||
self.logger.info("Possibly related to path: '%s'" %
|
||||
request_event.path)
|
||||
def make_req():
|
||||
self.apiclient.send_media_monitor_requests( packed_requests )
|
||||
for try_index in range(0,self.retries):
|
||||
try: make_req()
|
||||
# most likely we did not get json response as we expected
|
||||
except ValueError:
|
||||
self.logger.info("ApiController.php probably crashed, we \
|
||||
diagnose this from the fact that it did not return \
|
||||
valid json")
|
||||
self.logger.info("Trying again after %f seconds" %
|
||||
self.request_wait)
|
||||
time.sleep( self.request_wait )
|
||||
except Exception as e: self.unexpected_exception(e)
|
||||
else:
|
||||
self.logger.info("Request worked on the '%d' try" %
|
||||
(try_index + 1))
|
||||
break
|
||||
else: self.logger.info("Failed to send request after '%d' tries..." %
|
||||
self.retries)
|
||||
self.watcher.flag_done()
|
||||
from media.monitor.request import ThreadedRequestSync, RequestSync
|
||||
|
||||
class TimeoutWatcher(threading.Thread,Loggable):
|
||||
"""
|
||||
|
@ -131,8 +71,7 @@ class WatchSyncer(ReportHandler,Loggable):
|
|||
#self.push_queue( event )
|
||||
except BadSongFile as e:
|
||||
self.fatal_exception("Received bas song file '%s'" % e.path, e)
|
||||
except Exception as e:
|
||||
self.unexpected_exception(e)
|
||||
except Exception as e: self.unexpected_exception(e)
|
||||
else:
|
||||
self.logger.info("Received event that does not implement packing.\
|
||||
Printing its representation:")
|
||||
|
@ -209,8 +148,8 @@ class WatchSyncer(ReportHandler,Loggable):
|
|||
requests = copy.copy(self.__queue)
|
||||
def launch_request():
|
||||
# Need shallow copy here
|
||||
t = RequestSync(watcher=self, requests=requests)
|
||||
t.start()
|
||||
t = ThreadedRequestSync( RequestSync.create_with_api_client(
|
||||
watcher=self, requests=requests) )
|
||||
self.__current_thread = t
|
||||
self.__requests.append(launch_request)
|
||||
self.__reset_queue()
|
||||
|
@ -218,7 +157,8 @@ class WatchSyncer(ReportHandler,Loggable):
|
|||
def __reset_queue(self): self.__queue = []
|
||||
|
||||
def __del__(self):
|
||||
# Ideally we would like to do a little more to ensure safe shutdown
|
||||
#this destructor is completely untested and it's unclear whether
|
||||
#it's even doing anything useful. consider removing it
|
||||
if self.events_in_queue():
|
||||
self.logger.warn("Terminating with events still in the queue...")
|
||||
if self.requests_in_queue():
|
||||
|
|
|
@ -59,7 +59,8 @@ def main(global_config, api_client_config, log_config,
|
|||
try:
|
||||
with open(config['index_path'], 'w') as f: f.write(" ")
|
||||
except Exception as e:
|
||||
log.info("Failed to create index file with exception: %s" % str(e))
|
||||
log.info("Failed to create index file with exception: %s" \
|
||||
% str(e))
|
||||
else:
|
||||
log.info("Created index file, reloading configuration:")
|
||||
main( global_config, api_client_config, log_config,
|
||||
|
|
|
@ -19,8 +19,8 @@ class TestApiClient(unittest.TestCase):
|
|||
self.apc.register_component("api-client-tester")
|
||||
# All of the following requests should error out in some way
|
||||
self.bad_requests = [
|
||||
{ 'mode' : 'dang it', 'is_record' : 0 },
|
||||
{ 'mode' : 'damn frank', 'is_record' : 1 },
|
||||
{ 'mode' : 'foo', 'is_record' : 0 },
|
||||
{ 'mode' : 'bar', 'is_record' : 1 },
|
||||
{ 'no_mode' : 'at_all' }, ]
|
||||
|
||||
def test_bad_requests(self):
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import unittest
|
||||
#from pprint import pprint as pp
|
||||
|
||||
from media.metadata.process import global_reader
|
||||
from media.monitor.metadata import Metadata
|
||||
|
||||
import media.metadata.definitions as defs
|
||||
defs.load_definitions()
|
||||
|
||||
class TestMMP(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.maxDiff = None
|
||||
|
||||
def metadatas(self,f):
|
||||
return global_reader.read_mutagen(f), Metadata(f).extract()
|
||||
|
||||
def test_old_metadata(self):
|
||||
path = "/home/rudi/music/Nightingale.mp3"
|
||||
m = global_reader.read_mutagen(path)
|
||||
self.assertTrue( len(m) > 0 )
|
||||
n = Metadata(path)
|
||||
self.assertEqual(n.extract(), m)
|
||||
|
||||
def test_recorded(self):
|
||||
recorded_file = "./15:15:00-Untitled Show-256kbps.ogg"
|
||||
emf, old = self.metadatas(recorded_file)
|
||||
self.assertEqual(emf, old)
|
||||
|
||||
if __name__ == '__main__': unittest.main()
|
|
@ -26,7 +26,6 @@ class TestMetadata(unittest.TestCase):
|
|||
i += 1
|
||||
print("Sample metadata: '%s'" % md)
|
||||
self.assertTrue( len( md.keys() ) > 0 )
|
||||
self.assertTrue( 'MDATA_KEY_MD5' in md )
|
||||
utf8 = md_full.utf8()
|
||||
for k,v in md.iteritems():
|
||||
if hasattr(utf8[k], 'decode'):
|
||||
|
@ -42,10 +41,4 @@ class TestMetadata(unittest.TestCase):
|
|||
x1 = 123456
|
||||
print("Formatting '%s' to '%s'" % (x1, mmm.format_length(x1)))
|
||||
|
||||
def test_truncate_to_length(self):
|
||||
s1 = "testing with non string literal"
|
||||
s2 = u"testing with unicode literal"
|
||||
self.assertEqual( len(mmm.truncate_to_length(s1, 5)), 5)
|
||||
self.assertEqual( len(mmm.truncate_to_length(s2, 8)), 8)
|
||||
|
||||
if __name__ == '__main__': unittest.main()
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import unittest
|
||||
|
||||
import media.metadata.process as md
|
||||
|
||||
class TestMetadataDef(unittest.TestCase):
|
||||
def test_simple(self):
|
||||
|
||||
with md.metadata('MDATA_TESTING') as t:
|
||||
t.optional(True)
|
||||
t.depends('ONE','TWO')
|
||||
t.default('unknown')
|
||||
t.translate(lambda kw: kw['ONE'] + kw['TWO'])
|
||||
|
||||
h = { 'ONE' : "testing", 'TWO' : "123" }
|
||||
result = md.global_reader.read('test_path',h)
|
||||
self.assertTrue( 'MDATA_TESTING' in result )
|
||||
self.assertEqual( result['MDATA_TESTING'], 'testing123' )
|
||||
h1 = { 'ONE' : 'big testing', 'two' : 'nothing' }
|
||||
result1 = md.global_reader.read('bs path', h1)
|
||||
self.assertEqual( result1['MDATA_TESTING'], 'unknown' )
|
||||
|
||||
def test_topo(self):
|
||||
with md.metadata('MDATA_TESTING') as t:
|
||||
t.depends('shen','sheni')
|
||||
t.default('megitzda')
|
||||
t.translate(lambda kw: kw['shen'] + kw['sheni'])
|
||||
|
||||
with md.metadata('shen') as t:
|
||||
t.default('vaxo')
|
||||
|
||||
with md.metadata('sheni') as t:
|
||||
t.default('gio')
|
||||
|
||||
with md.metadata('vaxo') as t:
|
||||
t.depends('shevetsi')
|
||||
|
||||
v = md.global_reader.read('bs mang', {})
|
||||
self.assertEqual(v['MDATA_TESTING'], 'vaxogio')
|
||||
self.assertTrue( 'vaxo' not in v )
|
||||
|
||||
md.global_reader.clear()
|
||||
|
||||
if __name__ == '__main__': unittest.main()
|
|
@ -2,7 +2,6 @@
|
|||
import unittest
|
||||
import os
|
||||
import media.monitor.pure as mmp
|
||||
from media.monitor.metadata import Metadata
|
||||
|
||||
class TestMMP(unittest.TestCase):
|
||||
def setUp(self):
|
||||
|
@ -34,68 +33,6 @@ class TestMMP(unittest.TestCase):
|
|||
sd = mmp.default_to(dictionary=sd, keys=def_keys, default='DEF')
|
||||
for k in def_keys: self.assertEqual( sd[k], 'DEF' )
|
||||
|
||||
def test_normalized_metadata(self):
|
||||
#Recorded show test first
|
||||
orig = Metadata.airtime_dict({
|
||||
'date' : [u'2012-08-21'],
|
||||
'tracknumber' : [u'2'],
|
||||
'title' : [u'record-2012-08-21-11:29:00'],
|
||||
'artist' : [u'Airtime Show Recorder']
|
||||
})
|
||||
orga = Metadata.airtime_dict({
|
||||
'date' : [u'2012-08-21'],
|
||||
'tracknumber' : [u'2'],
|
||||
'artist' : [u'Airtime Show Recorder'],
|
||||
'title' : [u'record-2012-08-21-11:29:00']
|
||||
})
|
||||
orga['MDATA_KEY_FTYPE'] = u'audioclip'
|
||||
orig['MDATA_KEY_BITRATE'] = u'256000'
|
||||
orga['MDATA_KEY_BITRATE'] = u'256000'
|
||||
old_path = "/home/rudi/recorded/2012-08-21-11:29:00.ogg"
|
||||
normalized = mmp.normalized_metadata(orig, old_path)
|
||||
normalized['MDATA_KEY_BITRATE'] = u'256000'
|
||||
|
||||
self.assertEqual( orga, normalized )
|
||||
|
||||
organized_base_name = "11:29:00-record-256kbps.ogg"
|
||||
base = "/srv/airtime/stor/"
|
||||
organized_path = mmp.organized_path(old_path,base, normalized)
|
||||
self.assertEqual(os.path.basename(organized_path), organized_base_name)
|
||||
|
||||
def test_normalized_metadata2(self):
|
||||
"""
|
||||
cc-4305
|
||||
"""
|
||||
orig = Metadata.airtime_dict({
|
||||
'date' : [u'2012-08-27'],
|
||||
'tracknumber' : [u'3'],
|
||||
'title' : [u'18-11-00-Untitled Show'],
|
||||
'artist' : [u'Airtime Show Recorder']
|
||||
})
|
||||
old_path = "/home/rudi/recorded/doesnt_really_matter.ogg"
|
||||
normalized = mmp.normalized_metadata(orig, old_path)
|
||||
normalized['MDATA_KEY_BITRATE'] = u'256000'
|
||||
opath = mmp.organized_path(old_path, "/srv/airtime/stor/",
|
||||
normalized)
|
||||
# TODO : add a better test than this...
|
||||
self.assertTrue( len(opath) > 0 )
|
||||
|
||||
def test_normalized_metadata3(self):
|
||||
"""
|
||||
Test the case where the metadata is empty
|
||||
"""
|
||||
orig = Metadata.airtime_dict({})
|
||||
paths_unknown_title = [
|
||||
("/testin/unknown-unknown-unknown.mp3",""),
|
||||
("/testin/01-unknown-123kbps.mp3",""),
|
||||
("/testin/02-unknown-140kbps.mp3",""),
|
||||
("/testin/unknown-unknown-123kbps.mp3",""),
|
||||
("/testin/unknown-bibimbop-unknown.mp3","bibimbop"),
|
||||
]
|
||||
for p,res in paths_unknown_title:
|
||||
normalized = mmp.normalized_metadata(orig, p)
|
||||
self.assertEqual( normalized['MDATA_KEY_TITLE'], res)
|
||||
|
||||
def test_file_md5(self):
|
||||
p = os.path.realpath(__file__)
|
||||
m1 = mmp.file_md5(p)
|
||||
|
@ -116,6 +53,13 @@ class TestMMP(unittest.TestCase):
|
|||
self.assertEqual( mmp.parse_int("123asf"), "123" )
|
||||
self.assertEqual( mmp.parse_int("asdf"), None )
|
||||
|
||||
def test_truncate_to_length(self):
|
||||
s1 = "testing with non string literal"
|
||||
s2 = u"testing with unicode literal"
|
||||
self.assertEqual( len(mmp.truncate_to_length(s1, 5)), 5)
|
||||
self.assertEqual( len(mmp.truncate_to_length(s2, 8)), 8)
|
||||
|
||||
|
||||
def test_owner_id(self):
|
||||
start_path = "testing.mp3"
|
||||
id_path = "testing.mp3.identifier"
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
import unittest
|
||||
from mock import MagicMock
|
||||
|
||||
from media.monitor.request import RequestSync
|
||||
|
||||
class TestRequestSync(unittest.TestCase):
|
||||
|
||||
def apc_mock(self):
|
||||
fake_apc = MagicMock()
|
||||
fake_apc.send_media_monitor_requests = MagicMock()
|
||||
return fake_apc
|
||||
|
||||
def watcher_mock(self):
|
||||
fake_watcher = MagicMock()
|
||||
fake_watcher.flag_done = MagicMock()
|
||||
return fake_watcher
|
||||
|
||||
def request_mock(self):
|
||||
fake_request = MagicMock()
|
||||
fake_request.safe_pack = MagicMock(return_value=[])
|
||||
return fake_request
|
||||
|
||||
def test_send_media_monitor(self):
|
||||
fake_apc = self.apc_mock()
|
||||
fake_requests = [ self.request_mock() for x in range(1,5) ]
|
||||
fake_watcher = self.watcher_mock()
|
||||
rs = RequestSync(fake_watcher, fake_requests, fake_apc)
|
||||
rs.run_request()
|
||||
self.assertEquals(fake_apc.send_media_monitor_requests.call_count, 1)
|
||||
|
||||
def test_flag_done(self):
|
||||
fake_apc = self.apc_mock()
|
||||
fake_requests = [ self.request_mock() for x in range(1,5) ]
|
||||
fake_watcher = self.watcher_mock()
|
||||
rs = RequestSync(fake_watcher, fake_requests, fake_apc)
|
||||
rs.run_request()
|
||||
self.assertEquals(fake_watcher.flag_done.call_count, 1)
|
||||
|
||||
def test_safe_pack(self):
|
||||
fake_apc = self.apc_mock()
|
||||
fake_requests = [ self.request_mock() for x in range(1,5) ]
|
||||
fake_watcher = self.watcher_mock()
|
||||
rs = RequestSync(fake_watcher, fake_requests, fake_apc)
|
||||
rs.run_request()
|
||||
for req in fake_requests:
|
||||
self.assertEquals(req.safe_pack.call_count, 1)
|
||||
|
||||
if __name__ == '__main__': unittest.main()
|
Loading…
Reference in New Issue