[](https://renovatebot.com) This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [psf/black-pre-commit-mirror](https://togithub.com/psf/black-pre-commit-mirror) | repository | major | `23.12.1` -> `24.1.1` | Note: The `pre-commit` manager in Renovate is not supported by the `pre-commit` maintainers or community. Please do not report any problems there, instead [create a Discussion in the Renovate repository](https://togithub.com/renovatebot/renovate/discussions/new) if you have any questions. --- ### Release Notes <details> <summary>psf/black-pre-commit-mirror (psf/black-pre-commit-mirror)</summary> ### [`v24.1.1`](https://togithub.com/psf/black-pre-commit-mirror/compare/24.1.0...24.1.1) [Compare Source](https://togithub.com/psf/black-pre-commit-mirror/compare/24.1.0...24.1.1) ### [`v24.1.0`](https://togithub.com/psf/black-pre-commit-mirror/compare/23.12.1...24.1.0) [Compare Source](https://togithub.com/psf/black-pre-commit-mirror/compare/23.12.1...24.1.0) </details> --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Enabled. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] <!-- rebase-check -->If you want to rebase/retry this PR, check this box --- This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/libretime/libretime). <!--renovate-debug:eyJjcmVhdGVkSW5WZXIiOiIzNy4xMzUuMCIsInVwZGF0ZWRJblZlciI6IjM3LjE1My4yIiwidGFyZ2V0QnJhbmNoIjoibWFpbiJ9--> --------- Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: jo <ljonas@riseup.net>
111 lines
4.1 KiB
Python
111 lines
4.1 KiB
Python
import logging
|
|
from enum import Enum
|
|
from queue import Queue
|
|
from typing import Any, Dict, Protocol
|
|
|
|
from pydantic import BaseModel
|
|
|
|
from .analyze_cuepoint import analyze_cuepoint, analyze_duration
|
|
from .analyze_metadata import analyze_metadata
|
|
from .analyze_playability import UnplayableFileError, analyze_playability
|
|
from .analyze_replaygain import analyze_replaygain
|
|
from .organise_file import organise_file
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class Step(Protocol):
|
|
@staticmethod
|
|
def __call__(filename: str, metadata: Dict[str, Any]): ...
|
|
|
|
|
|
class PipelineStatus(int, Enum):
|
|
SUCCEED = 0
|
|
PENDING = 1
|
|
FAILED = 2
|
|
|
|
|
|
class PipelineOptions(BaseModel):
|
|
analyze_cue_points: bool = False
|
|
|
|
|
|
class Pipeline:
|
|
"""Analyzes and imports an audio file into the Airtime library.
|
|
|
|
This currently performs metadata extraction (eg. gets the ID3 tags from an MP3),
|
|
then moves the file to the Airtime music library (stor/imported), and returns
|
|
the results back to the parent process.
|
|
"""
|
|
|
|
@staticmethod
|
|
def run_analysis(
|
|
queue: Queue,
|
|
audio_file_path: str,
|
|
import_directory: str,
|
|
original_filename: str,
|
|
options: PipelineOptions,
|
|
):
|
|
"""Analyze and import an audio file, and put all extracted metadata into queue.
|
|
|
|
Keyword arguments:
|
|
queue: A multiprocessing.queues.Queue which will be used to pass the
|
|
extracted metadata back to the parent process.
|
|
audio_file_path: Path on disk to the audio file to analyze.
|
|
import_directory: Path to the final Airtime "import" directory where
|
|
we will move the file.
|
|
original_filename: The original filename of the file, which we'll try to
|
|
preserve. The file at audio_file_path typically has a
|
|
temporary randomly generated name, which is why we want
|
|
to know what the original name was.
|
|
"""
|
|
try:
|
|
if not isinstance(queue, Queue):
|
|
raise TypeError("queue must be a Queue.Queue()")
|
|
if not isinstance(audio_file_path, str):
|
|
raise TypeError(
|
|
"audio_file_path must be unicode. Was of type "
|
|
+ type(audio_file_path).__name__
|
|
+ " instead."
|
|
)
|
|
if not isinstance(import_directory, str):
|
|
raise TypeError(
|
|
"import_directory must be unicode. Was of type "
|
|
+ type(import_directory).__name__
|
|
+ " instead."
|
|
)
|
|
if not isinstance(original_filename, str):
|
|
raise TypeError(
|
|
"original_filename must be unicode. Was of type "
|
|
+ type(original_filename).__name__
|
|
+ " instead."
|
|
)
|
|
|
|
# Analyze the audio file we were told to analyze:
|
|
# First, we extract the ID3 tags and other metadata:
|
|
metadata = {}
|
|
metadata = analyze_metadata(audio_file_path, metadata)
|
|
metadata = analyze_duration(audio_file_path, metadata)
|
|
if options.analyze_cue_points:
|
|
metadata = analyze_cuepoint(audio_file_path, metadata)
|
|
metadata = analyze_replaygain(audio_file_path, metadata)
|
|
metadata = analyze_playability(audio_file_path, metadata)
|
|
|
|
metadata = organise_file(
|
|
audio_file_path,
|
|
import_directory,
|
|
original_filename,
|
|
metadata,
|
|
)
|
|
|
|
metadata["import_status"] = PipelineStatus.SUCCEED
|
|
|
|
# Pass all the file metadata back to the main analyzer process
|
|
queue.put(metadata)
|
|
except UnplayableFileError as exception:
|
|
logger.exception(exception)
|
|
metadata["import_status"] = PipelineStatus.FAILED
|
|
metadata["reason"] = "The file could not be played."
|
|
raise exception
|
|
except Exception as exception:
|
|
logger.exception(exception)
|
|
raise exception
|