diff --git a/airtime_mvc/application/models/RabbitMq.php b/airtime_mvc/application/models/RabbitMq.php index 435036a6e..682ff99b0 100644 --- a/airtime_mvc/application/models/RabbitMq.php +++ b/airtime_mvc/application/models/RabbitMq.php @@ -80,7 +80,7 @@ class Application_Model_RabbitMq } public static function SendMessageToAnalyzer($tmpFilePath, $importedStorageDirectory, $originalFilename, - $callbackUrl, $apiKey, $currentStorageBackend, $filePrefix) + $callbackUrl, $apiKey, $storageBackend, $filePrefix) { //Hack for Airtime Pro. The RabbitMQ settings for communicating with airtime_analyzer are global //and shared between all instances on Airtime Pro. @@ -107,7 +107,7 @@ class Application_Model_RabbitMq $queue = 'airtime-uploads'; $autoDeleteExchange = false; $data['tmp_file_path'] = $tmpFilePath; - $data['current_storage_backend'] = $currentStorageBackend; + $data['storage_backend'] = $storageBackend; $data['import_directory'] = $importedStorageDirectory; $data['original_filename'] = $originalFilename; $data['callback_url'] = $callbackUrl; diff --git a/python_apps/airtime_analyzer/airtime_analyzer/analyzer_pipeline.py b/python_apps/airtime_analyzer/airtime_analyzer/analyzer_pipeline.py index 077fd88da..bfa053762 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/analyzer_pipeline.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/analyzer_pipeline.py @@ -22,7 +22,7 @@ class AnalyzerPipeline: """ @staticmethod - def run_analysis(queue, audio_file_path, import_directory, original_filename, file_prefix, cloud_storage_config): + def run_analysis(queue, audio_file_path, import_directory, original_filename, storage_backend, file_prefix, cloud_storage_config): """Analyze and import an audio file, and put all extracted metadata into queue. Keyword arguments: @@ -35,6 +35,7 @@ class AnalyzerPipeline: preserve. The file at audio_file_path typically has a temporary randomly generated name, which is why we want to know what the original name was. + storage_backend: String indicating the storage backend (amazon_s3 or file) file_prefix: cloud_storage_config: ConfigParser object containing the cloud storage configuration settings """ @@ -68,8 +69,8 @@ class AnalyzerPipeline: metadata = ReplayGainAnalyzer.analyze(audio_file_path, metadata) metadata = PlayabilityAnalyzer.analyze(audio_file_path, metadata) - csu = CloudStorageUploader(cloud_storage_config) - if csu.enabled(): + if storage_backend.lower() == "amazon_S3": + csu = CloudStorageUploader(cloud_storage_config) metadata = csu.upload_obj(audio_file_path, metadata) else: metadata = FileMoverAnalyzer.move(audio_file_path, import_directory, original_filename, metadata) diff --git a/python_apps/airtime_analyzer/airtime_analyzer/message_listener.py b/python_apps/airtime_analyzer/airtime_analyzer/message_listener.py index 43e6e1d11..0cb85f45e 100644 --- a/python_apps/airtime_analyzer/airtime_analyzer/message_listener.py +++ b/python_apps/airtime_analyzer/airtime_analyzer/message_listener.py @@ -169,9 +169,9 @@ class MessageListener: import_directory = msg_dict["import_directory"] original_filename = msg_dict["original_filename"] file_prefix = msg_dict["file_prefix"] + storage_backend = msg_dict["storage_backend"] - audio_metadata = MessageListener.spawn_analyzer_process(audio_file_path, import_directory, original_filename, file_prefix, self.cloud_storage_config) - + audio_metadata = MessageListener.spawn_analyzer_process(audio_file_path, import_directory, original_filename, storage_backend, file_prefix, self.cloud_storage_config) StatusReporter.report_success_to_callback_url(callback_url, api_key, audio_metadata) except KeyError as e: @@ -210,11 +210,11 @@ class MessageListener: channel.basic_ack(delivery_tag=method_frame.delivery_tag) @staticmethod - def spawn_analyzer_process(audio_file_path, import_directory, original_filename, file_prefix, cloud_storage_config): + def spawn_analyzer_process(audio_file_path, import_directory, original_filename, storage_backend, file_prefix, cloud_storage_config): ''' Spawn a child process to analyze and import a new audio file. ''' q = multiprocessing.Queue() p = multiprocessing.Process(target=AnalyzerPipeline.run_analysis, - args=(q, audio_file_path, import_directory, original_filename, file_prefix, cloud_storage_config)) + args=(q, audio_file_path, import_directory, original_filename, storage_backend, file_prefix, cloud_storage_config)) p.start() p.join() if p.exitcode == 0: