Reverted pypo's copy file method to how it was without cloud files
and instead directly call the cloud file's downloader method.
This commit is contained in:
parent
70ff67374b
commit
7edd993fa3
3 changed files with 28 additions and 14 deletions
|
@ -724,7 +724,7 @@ SQL;
|
|||
}
|
||||
}
|
||||
|
||||
private static function createFileScheduleEvent(&$data, $item, $media_id, $uri, $filesize, $object_name=null)
|
||||
private static function createFileScheduleEvent(&$data, $item, $media_id, $uri, $object_name=null)
|
||||
{
|
||||
$start = self::AirtimeTimeToPypoTime($item["start"]);
|
||||
$end = self::AirtimeTimeToPypoTime($item["end"]);
|
||||
|
@ -758,8 +758,7 @@ SQL;
|
|||
'end' => $end,
|
||||
'show_name' => $item["show_name"],
|
||||
'replay_gain' => $replay_gain,
|
||||
'independent_event' => $independent_event,
|
||||
'filesize' => $filesize
|
||||
'independent_event' => $independent_event
|
||||
);
|
||||
if (!is_null($object_name)) {
|
||||
$schedule_item["object_name"] = $object_name;
|
||||
|
@ -902,8 +901,7 @@ SQL;
|
|||
if ($file instanceof CloudFile) {
|
||||
$object_name = $storedFile->getResourceId();
|
||||
}
|
||||
$filesize = $storedFile->getFileSize();
|
||||
self::createFileScheduleEvent($data, $item, $media_id, $uri, $filesize, $object_name);
|
||||
self::createFileScheduleEvent($data, $item, $media_id, $uri, $object_name);
|
||||
}
|
||||
elseif (!is_null($item['stream_id'])) {
|
||||
//row is type "webstream"
|
||||
|
|
|
@ -2,6 +2,7 @@ import os
|
|||
import logging
|
||||
import ConfigParser
|
||||
import sys
|
||||
import hashlib
|
||||
|
||||
from libcloud.storage.types import Provider, ObjectDoesNotExistError
|
||||
from libcloud.storage.providers import get_driver
|
||||
|
@ -26,9 +27,15 @@ class CloudStorageDownloader:
|
|||
cloud_obj = driver.get_object(container_name=self._bucket,
|
||||
object_name=obj_name)
|
||||
except ObjectDoesNotExistError:
|
||||
logging.info("Could not find object: %s" % obj_name)
|
||||
logging.info("%s does not exist on Amazon S3" % obj_name)
|
||||
|
||||
if os.path.isfile(dst) == False:
|
||||
dst_exists = False
|
||||
if (os.path.isfile(dst)):
|
||||
dst_hash = hashlib.md5(open(dst).read()).hexdigest()
|
||||
if dst_hash == cloud_obj.hash:
|
||||
dst_exists = True
|
||||
|
||||
if dst_exists == False:
|
||||
logging.info('Downloading: %s to %s' % (cloud_obj.name, dst))
|
||||
cloud_obj.download(destination_path=dst)
|
||||
else:
|
||||
|
|
|
@ -37,7 +37,12 @@ class PypoFile(Thread):
|
|||
"""
|
||||
src = media_item['uri']
|
||||
dst = media_item['dst']
|
||||
src_size = media_item['filesize']
|
||||
|
||||
try:
|
||||
src_size = os.path.getsize(src)
|
||||
except Exception, e:
|
||||
self.logger.error("Could not get size of source file: %s", src)
|
||||
return
|
||||
|
||||
dst_exists = True
|
||||
try:
|
||||
|
@ -63,11 +68,7 @@ class PypoFile(Thread):
|
|||
"""
|
||||
copy will overwrite dst if it already exists
|
||||
"""
|
||||
if 'object_name' in media_item:
|
||||
csd = CloudStorageDownloader()
|
||||
csd.download_obj(dst, media_item['object_name'])
|
||||
else:
|
||||
shutil.copy(src, dst)
|
||||
shutil.copy(src, dst)
|
||||
|
||||
#make file world readable
|
||||
os.chmod(dst, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
|
||||
|
@ -132,7 +133,15 @@ class PypoFile(Thread):
|
|||
|
||||
media_item = self.get_highest_priority_media_item(self.media)
|
||||
if media_item is not None:
|
||||
self.copy_file(media_item)
|
||||
"""
|
||||
If an object_name exists the file is stored on Amazon S3
|
||||
"""
|
||||
if 'object_name' in media_item:
|
||||
csd = CloudStorageDownloader()
|
||||
csd.download_obj(media_item['dst'], media_item['object_name'])
|
||||
media_item['file_ready'] = True
|
||||
else:
|
||||
self.copy_file(media_item)
|
||||
except Exception, e:
|
||||
import traceback
|
||||
top = traceback.format_exc()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue