SAAS-526: Change prefix from station name to station id

Done.
This commit is contained in:
drigato 2014-12-17 16:48:20 -05:00
parent b094e59df3
commit 19e6f1a535
4 changed files with 18 additions and 12 deletions

View File

@ -107,13 +107,16 @@ class Application_Model_RabbitMq
$data['original_filename'] = $originalFilename; $data['original_filename'] = $originalFilename;
$data['callback_url'] = $callbackUrl; $data['callback_url'] = $callbackUrl;
$data['api_key'] = $apiKey; $data['api_key'] = $apiKey;
// Pass station name to the analyzer so we can set it with the file's metadata // Pass station name to the analyzer so we can set it with the file's
// and prefix the object name with it before uploading it to the cloud. This // metadata before uploading it to the cloud. This isn't a requirement
// isn't a requirement for cloud storage, but put there as a safeguard, since // for cloud storage, but put there as a safeguard, since all Airtime
// all Airtime Pro stations will share the same bucket. // Pro stations will share the same bucket.
$data['station_domain'] = $stationDomain = Application_Model_Preference::GetStationName(); $data['station_domain'] = $stationDomain = Application_Model_Preference::GetStationName();
Logging::info(BillingController::getClientCurrentAirtimeProduct());
$data['file_prefix'] = BillingController::getClientCurrentAirtimeProduct(); // Each file uploaded to cloud storage is prefixed with the station's
// hosting id.
$clientCurrentAirtimeProduct = BillingController::getClientCurrentAirtimeProduct();
$data['file_prefix'] = $clientCurrentAirtimeProduct["id"];
$jsonData = json_encode($data); $jsonData = json_encode($data);
//self::sendMessage($exchange, 'topic', false, $jsonData, 'airtime-uploads'); //self::sendMessage($exchange, 'topic', false, $jsonData, 'airtime-uploads');

View File

@ -21,7 +21,7 @@ class AnalyzerPipeline:
""" """
@staticmethod @staticmethod
def run_analysis(queue, audio_file_path, import_directory, original_filename, station_domain, current_storage_backend): def run_analysis(queue, audio_file_path, import_directory, original_filename, station_domain, current_storage_backend, file_prefix):
"""Analyze and import an audio file, and put all extracted metadata into queue. """Analyze and import an audio file, and put all extracted metadata into queue.
Keyword arguments: Keyword arguments:
@ -56,6 +56,7 @@ class AnalyzerPipeline:
# First, we extract the ID3 tags and other metadata: # First, we extract the ID3 tags and other metadata:
metadata = dict() metadata = dict()
metadata["station_domain"] = station_domain metadata["station_domain"] = station_domain
metadata["file_prefix"] = file_prefix
metadata = MetadataAnalyzer.analyze(audio_file_path, metadata) metadata = MetadataAnalyzer.analyze(audio_file_path, metadata)
metadata = CuePointAnalyzer.analyze(audio_file_path, metadata) metadata = CuePointAnalyzer.analyze(audio_file_path, metadata)

View File

@ -61,7 +61,7 @@ class CloudStorageUploader:
# in the object name. URL encoding the object name doesn't solve the # in the object name. URL encoding the object name doesn't solve the
# problem. As a solution we will replace spaces with dashes. # problem. As a solution we will replace spaces with dashes.
file_name = file_name.replace(" ", "-") file_name = file_name.replace(" ", "-")
object_name = "%s/%s_%s%s" % (metadata["station_domain"], file_name, str(uuid.uuid4()), extension) object_name = "%s/%s_%s%s" % (metadata["file_prefix"], file_name, str(uuid.uuid4()), extension)
provider_driver_class = get_driver(getattr(Provider, self._provider)) provider_driver_class = get_driver(getattr(Provider, self._provider))
driver = provider_driver_class(self._api_key, self._api_key_secret) driver = provider_driver_class(self._api_key, self._api_key_secret)

View File

@ -152,6 +152,7 @@ class MessageListener:
api_key = "" api_key = ""
station_domain = "" station_domain = ""
current_storage_backend = "" current_storage_backend = ""
file_prefix = ""
''' Spin up a worker process. We use the multiprocessing module and multiprocessing.Queue ''' Spin up a worker process. We use the multiprocessing module and multiprocessing.Queue
to pass objects between the processes so that if the analyzer process crashes, it does not to pass objects between the processes so that if the analyzer process crashes, it does not
@ -169,8 +170,9 @@ class MessageListener:
import_directory = msg_dict["import_directory"] import_directory = msg_dict["import_directory"]
original_filename = msg_dict["original_filename"] original_filename = msg_dict["original_filename"]
current_storage_backend = msg_dict["current_storage_backend"] current_storage_backend = msg_dict["current_storage_backend"]
file_prefix = msg_dict["file_prefix"]
audio_metadata = MessageListener.spawn_analyzer_process(audio_file_path, import_directory, original_filename, station_domain, current_storage_backend)
audio_metadata = MessageListener.spawn_analyzer_process(audio_file_path, import_directory, original_filename, station_domain, current_storage_backend, file_prefix)
StatusReporter.report_success_to_callback_url(callback_url, api_key, audio_metadata) StatusReporter.report_success_to_callback_url(callback_url, api_key, audio_metadata)
except KeyError as e: except KeyError as e:
@ -209,11 +211,11 @@ class MessageListener:
channel.basic_ack(delivery_tag=method_frame.delivery_tag) channel.basic_ack(delivery_tag=method_frame.delivery_tag)
@staticmethod @staticmethod
def spawn_analyzer_process(audio_file_path, import_directory, original_filename, station_domain, current_storage_backend): def spawn_analyzer_process(audio_file_path, import_directory, original_filename, station_domain, current_storage_backend, file_prefix):
''' Spawn a child process to analyze and import a new audio file. ''' ''' Spawn a child process to analyze and import a new audio file. '''
q = multiprocessing.Queue() q = multiprocessing.Queue()
p = multiprocessing.Process(target=AnalyzerPipeline.run_analysis, p = multiprocessing.Process(target=AnalyzerPipeline.run_analysis,
args=(q, audio_file_path, import_directory, original_filename, station_domain, current_storage_backend)) args=(q, audio_file_path, import_directory, original_filename, station_domain, current_storage_backend, file_prefix))
p.start() p.start()
p.join() p.join()
if p.exitcode == 0: if p.exitcode == 0: