Reverted pypo's copy file method to how it was without cloud files
and instead directly call the cloud file's downloader method.
This commit is contained in:
parent
70ff67374b
commit
7edd993fa3
3 changed files with 28 additions and 14 deletions
|
@ -724,7 +724,7 @@ SQL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static function createFileScheduleEvent(&$data, $item, $media_id, $uri, $filesize, $object_name=null)
|
private static function createFileScheduleEvent(&$data, $item, $media_id, $uri, $object_name=null)
|
||||||
{
|
{
|
||||||
$start = self::AirtimeTimeToPypoTime($item["start"]);
|
$start = self::AirtimeTimeToPypoTime($item["start"]);
|
||||||
$end = self::AirtimeTimeToPypoTime($item["end"]);
|
$end = self::AirtimeTimeToPypoTime($item["end"]);
|
||||||
|
@ -758,8 +758,7 @@ SQL;
|
||||||
'end' => $end,
|
'end' => $end,
|
||||||
'show_name' => $item["show_name"],
|
'show_name' => $item["show_name"],
|
||||||
'replay_gain' => $replay_gain,
|
'replay_gain' => $replay_gain,
|
||||||
'independent_event' => $independent_event,
|
'independent_event' => $independent_event
|
||||||
'filesize' => $filesize
|
|
||||||
);
|
);
|
||||||
if (!is_null($object_name)) {
|
if (!is_null($object_name)) {
|
||||||
$schedule_item["object_name"] = $object_name;
|
$schedule_item["object_name"] = $object_name;
|
||||||
|
@ -902,8 +901,7 @@ SQL;
|
||||||
if ($file instanceof CloudFile) {
|
if ($file instanceof CloudFile) {
|
||||||
$object_name = $storedFile->getResourceId();
|
$object_name = $storedFile->getResourceId();
|
||||||
}
|
}
|
||||||
$filesize = $storedFile->getFileSize();
|
self::createFileScheduleEvent($data, $item, $media_id, $uri, $object_name);
|
||||||
self::createFileScheduleEvent($data, $item, $media_id, $uri, $filesize, $object_name);
|
|
||||||
}
|
}
|
||||||
elseif (!is_null($item['stream_id'])) {
|
elseif (!is_null($item['stream_id'])) {
|
||||||
//row is type "webstream"
|
//row is type "webstream"
|
||||||
|
|
|
@ -2,6 +2,7 @@ import os
|
||||||
import logging
|
import logging
|
||||||
import ConfigParser
|
import ConfigParser
|
||||||
import sys
|
import sys
|
||||||
|
import hashlib
|
||||||
|
|
||||||
from libcloud.storage.types import Provider, ObjectDoesNotExistError
|
from libcloud.storage.types import Provider, ObjectDoesNotExistError
|
||||||
from libcloud.storage.providers import get_driver
|
from libcloud.storage.providers import get_driver
|
||||||
|
@ -26,9 +27,15 @@ class CloudStorageDownloader:
|
||||||
cloud_obj = driver.get_object(container_name=self._bucket,
|
cloud_obj = driver.get_object(container_name=self._bucket,
|
||||||
object_name=obj_name)
|
object_name=obj_name)
|
||||||
except ObjectDoesNotExistError:
|
except ObjectDoesNotExistError:
|
||||||
logging.info("Could not find object: %s" % obj_name)
|
logging.info("%s does not exist on Amazon S3" % obj_name)
|
||||||
|
|
||||||
if os.path.isfile(dst) == False:
|
dst_exists = False
|
||||||
|
if (os.path.isfile(dst)):
|
||||||
|
dst_hash = hashlib.md5(open(dst).read()).hexdigest()
|
||||||
|
if dst_hash == cloud_obj.hash:
|
||||||
|
dst_exists = True
|
||||||
|
|
||||||
|
if dst_exists == False:
|
||||||
logging.info('Downloading: %s to %s' % (cloud_obj.name, dst))
|
logging.info('Downloading: %s to %s' % (cloud_obj.name, dst))
|
||||||
cloud_obj.download(destination_path=dst)
|
cloud_obj.download(destination_path=dst)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -37,7 +37,12 @@ class PypoFile(Thread):
|
||||||
"""
|
"""
|
||||||
src = media_item['uri']
|
src = media_item['uri']
|
||||||
dst = media_item['dst']
|
dst = media_item['dst']
|
||||||
src_size = media_item['filesize']
|
|
||||||
|
try:
|
||||||
|
src_size = os.path.getsize(src)
|
||||||
|
except Exception, e:
|
||||||
|
self.logger.error("Could not get size of source file: %s", src)
|
||||||
|
return
|
||||||
|
|
||||||
dst_exists = True
|
dst_exists = True
|
||||||
try:
|
try:
|
||||||
|
@ -63,10 +68,6 @@ class PypoFile(Thread):
|
||||||
"""
|
"""
|
||||||
copy will overwrite dst if it already exists
|
copy will overwrite dst if it already exists
|
||||||
"""
|
"""
|
||||||
if 'object_name' in media_item:
|
|
||||||
csd = CloudStorageDownloader()
|
|
||||||
csd.download_obj(dst, media_item['object_name'])
|
|
||||||
else:
|
|
||||||
shutil.copy(src, dst)
|
shutil.copy(src, dst)
|
||||||
|
|
||||||
#make file world readable
|
#make file world readable
|
||||||
|
@ -132,6 +133,14 @@ class PypoFile(Thread):
|
||||||
|
|
||||||
media_item = self.get_highest_priority_media_item(self.media)
|
media_item = self.get_highest_priority_media_item(self.media)
|
||||||
if media_item is not None:
|
if media_item is not None:
|
||||||
|
"""
|
||||||
|
If an object_name exists the file is stored on Amazon S3
|
||||||
|
"""
|
||||||
|
if 'object_name' in media_item:
|
||||||
|
csd = CloudStorageDownloader()
|
||||||
|
csd.download_obj(media_item['dst'], media_item['object_name'])
|
||||||
|
media_item['file_ready'] = True
|
||||||
|
else:
|
||||||
self.copy_file(media_item)
|
self.copy_file(media_item)
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
import traceback
|
import traceback
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue