458 lines
12 KiB
Python
458 lines
12 KiB
Python
# Standard library imports
|
|
import datetime as dt
|
|
from email.message import EmailMessage
|
|
from typing import Optional
|
|
import os
|
|
import re
|
|
import shutil
|
|
import smtplib
|
|
import ssl
|
|
import tempfile
|
|
|
|
# PyQt imports
|
|
from PyQt6.QtWidgets import QInputDialog, QMainWindow, QMessageBox, QWidget
|
|
|
|
# Third party imports
|
|
import filetype
|
|
from mutagen.flac import FLAC # type: ignore
|
|
from mutagen.mp3 import MP3 # type: ignore
|
|
from pydub import AudioSegment, effects
|
|
from pydub.utils import mediainfo
|
|
from tinytag import TinyTag, TinyTagException # type: ignore
|
|
|
|
# App imports
|
|
from classes import AudioMetadata, ApplicationError, Tags
|
|
from config import Config
|
|
from log import log
|
|
from models import Tracks
|
|
|
|
start_time_re = re.compile(r"@\d\d:\d\d")
|
|
|
|
|
|
def ask_yes_no(
|
|
title: str,
|
|
question: str,
|
|
default_yes: bool = False,
|
|
parent: Optional[QMainWindow] = None,
|
|
) -> bool:
|
|
"""Ask question; return True for yes, False for no"""
|
|
|
|
dlg = QMessageBox(parent)
|
|
dlg.setWindowTitle(title)
|
|
dlg.setText(question)
|
|
dlg.setStandardButtons(
|
|
QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No
|
|
)
|
|
dlg.setIcon(QMessageBox.Icon.Question)
|
|
if default_yes:
|
|
dlg.setDefaultButton(QMessageBox.StandardButton.Yes)
|
|
button = dlg.exec()
|
|
|
|
return button == QMessageBox.StandardButton.Yes
|
|
|
|
|
|
def audio_file_extension(fpath: str) -> str | None:
|
|
"""
|
|
Return the correct extension for this type of file.
|
|
"""
|
|
|
|
return filetype.guess(fpath).extension
|
|
|
|
|
|
def fade_point(
|
|
audio_segment: AudioSegment,
|
|
fade_threshold: float = 0.0,
|
|
chunk_size: int = Config.AUDIO_SEGMENT_CHUNK_SIZE,
|
|
) -> int:
|
|
"""
|
|
Returns the millisecond/index of the point where the volume drops below
|
|
the maximum and doesn't get louder again.
|
|
audio_segment - the sdlg_search_database_uiegment to find silence in
|
|
fade_threshold - the upper bound for how quiet is silent in dFBS
|
|
chunk_size - chunk size for interating over the segment in ms
|
|
"""
|
|
|
|
assert chunk_size > 0 # to avoid infinite loop
|
|
|
|
segment_length: int = audio_segment.duration_seconds * 1000 # ms
|
|
trim_ms = segment_length - chunk_size
|
|
max_vol = audio_segment.dBFS
|
|
if fade_threshold == 0:
|
|
fade_threshold = max_vol
|
|
|
|
while (
|
|
audio_segment[trim_ms: trim_ms + chunk_size].dBFS < fade_threshold
|
|
and trim_ms > 0
|
|
): # noqa W503
|
|
trim_ms -= chunk_size
|
|
|
|
# if there is no trailing silence, return lenght of track (it's less
|
|
# the chunk_size, but for chunk_size = 10ms, this may be ignored)
|
|
return int(trim_ms)
|
|
|
|
|
|
def file_is_unreadable(path: Optional[str]) -> bool:
|
|
"""
|
|
Returns True if passed path is readable, else False
|
|
"""
|
|
|
|
if not path:
|
|
return True
|
|
|
|
return not os.access(path, os.R_OK)
|
|
|
|
|
|
def get_audio_segment(path: str) -> Optional[AudioSegment]:
|
|
if not path.endswith(audio_file_extension(path)):
|
|
return None
|
|
|
|
try:
|
|
if path.endswith(".mp3"):
|
|
return AudioSegment.from_mp3(path)
|
|
elif path.endswith(".flac"):
|
|
return AudioSegment.from_file(path, "flac") # type: ignore
|
|
except AttributeError:
|
|
return None
|
|
|
|
return None
|
|
|
|
|
|
def get_embedded_time(text: str) -> Optional[dt.datetime]:
|
|
"""Return datetime specified as @hh:mm in text"""
|
|
|
|
try:
|
|
match = start_time_re.search(text)
|
|
except TypeError:
|
|
return None
|
|
if not match:
|
|
return None
|
|
|
|
try:
|
|
return dt.datetime.strptime(match.group(0)[1:], Config.NOTE_TIME_FORMAT)
|
|
except ValueError:
|
|
return None
|
|
|
|
|
|
def get_all_track_metadata(filepath: str) -> dict[str, str | int | float]:
|
|
"""Return all track metadata"""
|
|
|
|
return (
|
|
get_audio_metadata(filepath)._asdict()
|
|
| get_tags(filepath)._asdict()
|
|
| dict(path=filepath)
|
|
)
|
|
|
|
|
|
def get_audio_metadata(filepath: str) -> AudioMetadata:
|
|
"""Return audio metadata"""
|
|
|
|
# Set start_gap, fade_at and silence_at
|
|
audio = get_audio_segment(filepath)
|
|
if not audio:
|
|
return AudioMetadata()
|
|
else:
|
|
return AudioMetadata(
|
|
start_gap=leading_silence(audio),
|
|
fade_at=int(
|
|
round(fade_point(audio) / 1000, Config.MILLISECOND_SIGFIGS) * 1000
|
|
),
|
|
silence_at=int(
|
|
round(trailing_silence(audio) / 1000, Config.MILLISECOND_SIGFIGS) * 1000
|
|
),
|
|
)
|
|
|
|
|
|
def get_name(prompt: str, default: str = "") -> str | None:
|
|
"""Get a name from the user"""
|
|
|
|
dlg = QInputDialog()
|
|
dlg.setInputMode(QInputDialog.InputMode.TextInput)
|
|
dlg.setLabelText(prompt)
|
|
while True:
|
|
if default:
|
|
dlg.setTextValue(default)
|
|
dlg.resize(500, 100)
|
|
ok = dlg.exec()
|
|
if ok:
|
|
return dlg.textValue()
|
|
|
|
return None
|
|
|
|
|
|
def get_relative_date(
|
|
past_date: Optional[dt.datetime], reference_date: Optional[dt.datetime] = None
|
|
) -> str:
|
|
"""
|
|
Return how long before reference_date past_date is as string.
|
|
|
|
Params:
|
|
@past_date: datetime
|
|
@reference_date: datetime, defaults to current date and time
|
|
|
|
@return: string
|
|
"""
|
|
|
|
if not past_date or past_date == Config.EPOCH:
|
|
return "Never"
|
|
if not reference_date:
|
|
reference_date = dt.datetime.now()
|
|
|
|
# Check parameters
|
|
if past_date > reference_date:
|
|
return "get_relative_date() past_date is after relative_date"
|
|
|
|
days: int
|
|
days_str: str
|
|
weeks: int
|
|
weeks_str: str
|
|
|
|
weeks, days = divmod((reference_date.date() - past_date.date()).days, 7)
|
|
if weeks == days == 0:
|
|
# Same day so return time instead
|
|
return Config.LAST_PLAYED_TODAY_STRING + " " + past_date.strftime("%H:%M")
|
|
if weeks == 1:
|
|
weeks_str = "week"
|
|
else:
|
|
weeks_str = "weeks"
|
|
if days == 1:
|
|
days_str = "day"
|
|
else:
|
|
days_str = "days"
|
|
return f"{weeks} {weeks_str}, {days} {days_str}"
|
|
|
|
|
|
def get_tags(path: str) -> Tags:
|
|
"""
|
|
Return a dictionary of title, artist, bitrate and duration-in-milliseconds.
|
|
"""
|
|
|
|
try:
|
|
tag = TinyTag.get(path)
|
|
except FileNotFoundError:
|
|
raise ApplicationError(f"File not found: {path}")
|
|
except TinyTagException:
|
|
raise ApplicationError(f"Can't read tags in {path}")
|
|
|
|
if (
|
|
tag.title is None
|
|
or tag.artist is None
|
|
or tag.bitrate is None
|
|
or tag.duration is None
|
|
):
|
|
raise ApplicationError(f"Missing tags in {path}")
|
|
|
|
return Tags(
|
|
title=tag.title,
|
|
artist=tag.artist,
|
|
bitrate=round(tag.bitrate),
|
|
duration=int(round(tag.duration, Config.MILLISECOND_SIGFIGS) * 1000),
|
|
)
|
|
|
|
|
|
def leading_silence(
|
|
audio_segment: AudioSegment,
|
|
silence_threshold: int = Config.DBFS_SILENCE,
|
|
chunk_size: int = Config.AUDIO_SEGMENT_CHUNK_SIZE,
|
|
) -> int:
|
|
"""
|
|
Returns the millisecond/index that the leading silence ends.
|
|
audio_segment - the segment to find silence in
|
|
silence_threshold - the upper bound for how quiet is silent in dFBS
|
|
chunk_size - chunk size for interating over the segment in ms
|
|
|
|
https://github.com/jiaaro/pydub/blob/master/pydub/silence.py
|
|
"""
|
|
|
|
trim_ms: int = 0 # ms
|
|
assert chunk_size > 0 # to avoid infinite loop
|
|
while audio_segment[
|
|
trim_ms : trim_ms + chunk_size
|
|
].dBFS < silence_threshold and trim_ms < len( # noqa W504
|
|
audio_segment
|
|
):
|
|
trim_ms += chunk_size
|
|
|
|
# if there is no end it should return the length of the segment
|
|
return min(trim_ms, len(audio_segment))
|
|
|
|
|
|
def ms_to_mmss(
|
|
ms: Optional[int],
|
|
decimals: int = 0,
|
|
negative: bool = False,
|
|
none: Optional[str] = None,
|
|
) -> str:
|
|
"""Convert milliseconds to mm:ss"""
|
|
|
|
minutes: int
|
|
remainder: int
|
|
seconds: float
|
|
|
|
if not ms:
|
|
if none:
|
|
return none
|
|
else:
|
|
return "-"
|
|
sign = ""
|
|
if ms < 0:
|
|
if negative:
|
|
sign = "-"
|
|
else:
|
|
ms = 0
|
|
|
|
minutes, remainder = divmod(ms, 60 * 1000)
|
|
seconds = remainder / 1000
|
|
|
|
# if seconds >= 59.5, it will be represented as 60, which looks odd.
|
|
# So, fake it under those circumstances
|
|
if seconds >= 59.5:
|
|
seconds = 59.0
|
|
|
|
return f"{sign}{minutes:.0f}:{seconds:02.{decimals}f}"
|
|
|
|
|
|
def normalise_track(path: str) -> None:
|
|
"""Normalise track"""
|
|
|
|
# Check type
|
|
ftype = os.path.splitext(path)[1][1:]
|
|
if ftype not in ["mp3", "flac"]:
|
|
log.error(
|
|
f"helpers.normalise_track({path}): " f"File type {ftype} not implemented"
|
|
)
|
|
|
|
bitrate = mediainfo(path)["bit_rate"]
|
|
audio = get_audio_segment(path)
|
|
if not audio:
|
|
return
|
|
|
|
# Get current file gid, uid and permissions
|
|
stats = os.stat(path)
|
|
try:
|
|
# Copy original file
|
|
_, temp_path = tempfile.mkstemp()
|
|
shutil.copyfile(path, temp_path)
|
|
except Exception as err:
|
|
log.debug(f"helpers.normalise_track({path}): err1: {repr(err)}")
|
|
return
|
|
|
|
# Overwrite original file with normalised output
|
|
normalised = effects.normalize(audio)
|
|
try:
|
|
normalised.export(path, format=os.path.splitext(path)[1][1:], bitrate=bitrate)
|
|
# Fix up permssions and ownership
|
|
os.chown(path, stats.st_uid, stats.st_gid)
|
|
os.chmod(path, stats.st_mode)
|
|
# Copy tags
|
|
tag_handler: type[FLAC | MP3]
|
|
if ftype == "flac":
|
|
tag_handler = FLAC
|
|
elif ftype == "mp3":
|
|
tag_handler = MP3
|
|
else:
|
|
return
|
|
src = tag_handler(temp_path)
|
|
dst = tag_handler(path)
|
|
for tag in src:
|
|
dst[tag] = src[tag]
|
|
dst.save()
|
|
except Exception as err:
|
|
log.debug(f"helpers.normalise_track({path}): err2: {repr(err)}")
|
|
# Restore original file
|
|
shutil.copyfile(path, temp_path)
|
|
finally:
|
|
if os.path.exists(temp_path):
|
|
os.remove(temp_path)
|
|
|
|
|
|
def remove_substring_case_insensitive(parent_string: str, substring: str) -> str:
|
|
"""
|
|
Remove all instances of substring from parent string, case insensitively
|
|
"""
|
|
|
|
# Convert both strings to lowercase for case-insensitive comparison
|
|
lower_parent = parent_string.lower()
|
|
lower_substring = substring.lower()
|
|
|
|
# Initialize the result string
|
|
result = parent_string
|
|
|
|
# Continue removing the substring until it's no longer found
|
|
while lower_substring in lower_parent:
|
|
# Find the index of the substring
|
|
index = lower_parent.find(lower_substring)
|
|
|
|
# Remove the substring
|
|
result = result[:index] + result[index + len(substring) :]
|
|
|
|
# Update the lowercase versions
|
|
lower_parent = result.lower()
|
|
|
|
return result
|
|
|
|
|
|
def send_mail(to_addr: str, from_addr: str, subj: str, body: str) -> None:
|
|
# From https://docs.python.org/3/library/email.examples.html
|
|
|
|
# Create a text/plain message
|
|
msg = EmailMessage()
|
|
msg.set_content(body)
|
|
|
|
msg["Subject"] = subj
|
|
msg["From"] = from_addr
|
|
msg["To"] = to_addr
|
|
|
|
# Send the message via SMTP server.
|
|
context = ssl.create_default_context()
|
|
try:
|
|
s = smtplib.SMTP(host=Config.MAIL_SERVER, port=Config.MAIL_PORT)
|
|
if Config.MAIL_USE_TLS:
|
|
s.starttls(context=context)
|
|
if Config.MAIL_USERNAME and Config.MAIL_PASSWORD:
|
|
s.login(Config.MAIL_USERNAME, Config.MAIL_PASSWORD)
|
|
s.send_message(msg)
|
|
except Exception as e:
|
|
print(e)
|
|
finally:
|
|
s.quit()
|
|
|
|
|
|
def set_track_metadata(track: Tracks) -> None:
|
|
"""Set/update track metadata in database"""
|
|
|
|
audio_metadata = get_audio_metadata(track.path)
|
|
tags = get_tags(track.path)
|
|
|
|
for audio_key in AudioMetadata._fields:
|
|
setattr(track, audio_key, getattr(audio_metadata, audio_key))
|
|
for tag_key in Tags._fields:
|
|
setattr(track, tag_key, getattr(tags, tag_key))
|
|
|
|
|
|
def show_OK(title: str, msg: str, parent: Optional[QWidget] = None) -> None:
|
|
"""Display a message to user"""
|
|
|
|
dlg = QMessageBox(parent)
|
|
dlg.setIcon(QMessageBox.Icon.Information)
|
|
dlg.setWindowTitle(title)
|
|
dlg.setText(msg)
|
|
dlg.setStandardButtons(QMessageBox.StandardButton.Ok)
|
|
|
|
_ = dlg.exec()
|
|
|
|
|
|
def show_warning(parent: Optional[QMainWindow], title: str, msg: str) -> None:
|
|
"""Display a warning to user"""
|
|
|
|
QMessageBox.warning(parent, title, msg, buttons=QMessageBox.StandardButton.Cancel)
|
|
|
|
|
|
def trailing_silence(
|
|
audio_segment: AudioSegment,
|
|
silence_threshold: int = -50,
|
|
chunk_size: int = Config.AUDIO_SEGMENT_CHUNK_SIZE,
|
|
) -> int:
|
|
"""Return fade point from start in milliseconds"""
|
|
|
|
return fade_point(audio_segment, silence_threshold, chunk_size)
|