Compare commits
22 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| a9adbc5954 | |||
| 98ea4baaad | |||
| 7452a97e64 | |||
| d2c08e92eb | |||
| c4a80e4818 | |||
| ea9aa045c8 | |||
| d357aaac80 | |||
| 1ea5cb77ff | |||
| 2690340101 | |||
| 14f40ef781 | |||
| 7c3d079cbe | |||
| 09f5aa2d38 | |||
| e9454edafc | |||
| 5a7076c117 | |||
| 8d2d272185 | |||
| 583838c4f0 | |||
| b53e79e005 | |||
| d953574733 | |||
| 79dd5316fc | |||
| 17bb9d7d0a | |||
| ccf43ab7fb | |||
| b7250156b4 |
311
MkvOpusEnc.py
Normal file
311
MkvOpusEnc.py
Normal file
@@ -0,0 +1,311 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Processes or downmixes an MKV file's audio tracks sequentially using a specific toolchain.
|
||||
This script is cross-platform and optimized for correctness and clean output.
|
||||
|
||||
This script intelligently handles audio streams in an MKV file one by one.
|
||||
- AAC/Opus audio is remuxed.
|
||||
- Multi-channel audio (DTS, AC3, etc.) can be re-encoded or optionally downmixed to stereo.
|
||||
- All other streams and metadata (title, language, delay) are preserved.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
class Tee:
|
||||
def __init__(self, *files):
|
||||
self.files = files
|
||||
def write(self, obj):
|
||||
for f in self.files:
|
||||
f.write(obj)
|
||||
f.flush()
|
||||
def flush(self):
|
||||
for f in self.files:
|
||||
f.flush()
|
||||
|
||||
def check_tools():
|
||||
"""Checks if all required command-line tools are in the system's PATH."""
|
||||
required_tools = ["ffmpeg", "ffprobe", "mkvmerge", "sox", "opusenc", "mediainfo"]
|
||||
print("--- Prerequisite Check ---")
|
||||
all_found = True
|
||||
for tool in required_tools:
|
||||
if not shutil.which(tool):
|
||||
print(f"Error: Required tool '{tool}' not found.", file=sys.stderr)
|
||||
all_found = False
|
||||
if not all_found:
|
||||
sys.exit("Please install the missing tools and ensure they are in your system's PATH.")
|
||||
print("All required tools found.")
|
||||
|
||||
def run_cmd(args, capture_output=False, check=True):
|
||||
"""Helper function to run a command and return its output."""
|
||||
process = subprocess.run(args, capture_output=capture_output, text=True, encoding='utf-8', check=check)
|
||||
return process.stdout
|
||||
|
||||
def convert_audio_track(stream_index, channels, temp_dir, source_file, should_downmix, bitrate_info):
|
||||
"""Extracts, normalizes, and encodes a single audio track to Opus."""
|
||||
temp_extracted = temp_dir / f"track_{stream_index}_extracted.flac"
|
||||
temp_normalized = temp_dir / f"track_{stream_index}_normalized.flac"
|
||||
final_opus = temp_dir / f"track_{stream_index}_final.opus"
|
||||
|
||||
# Step 1: Extract audio, with conditional downmixing
|
||||
print(" - Extracting to FLAC...")
|
||||
ffmpeg_args = ["ffmpeg", "-v", "quiet", "-stats", "-y", "-i", str(source_file), "-map", f"0:{stream_index}"]
|
||||
|
||||
final_channels = channels
|
||||
if should_downmix and channels >= 6:
|
||||
if channels == 6: # 5.1
|
||||
print(" (Downmixing 5.1 to Stereo with dialogue boost)")
|
||||
ffmpeg_args.extend(["-af", "pan=stereo|c0=c2+0.30*c0+0.30*c4|c1=c2+0.30*c1+0.30*c5"])
|
||||
final_channels = 2
|
||||
elif channels == 8: # 7.1
|
||||
print(" (Downmixing 7.1 to Stereo with dialogue boost)")
|
||||
ffmpeg_args.extend(["-af", "pan=stereo|c0=c2+0.30*c0+0.30*c4+0.30*c6|c1=c2+0.30*c1+0.30*c5+0.30*c7"])
|
||||
final_channels = 2
|
||||
else:
|
||||
print(f" ({channels}-channel source, downmixing to stereo using default -ac 2)")
|
||||
ffmpeg_args.extend(["-ac", "2"])
|
||||
final_channels = 2
|
||||
else:
|
||||
print(f" (Preserving {channels}-channel layout)")
|
||||
|
||||
ffmpeg_args.extend(["-c:a", "flac", str(temp_extracted)])
|
||||
run_cmd(ffmpeg_args)
|
||||
|
||||
# Step 2: Normalize the track with SoX
|
||||
print(" - Normalizing with SoX...")
|
||||
run_cmd(["sox", str(temp_extracted), str(temp_normalized), "-S", "--temp", str(temp_dir), "--guard", "gain", "-n"])
|
||||
|
||||
# Step 3: Encode to Opus with the correct bitrate
|
||||
bitrate = "192k" # Fallback
|
||||
|
||||
if final_channels == 2:
|
||||
bitrate = "128k"
|
||||
elif final_channels == 6:
|
||||
bitrate = "256k"
|
||||
elif final_channels == 8:
|
||||
bitrate = "384k"
|
||||
|
||||
print(f" - Encoding to Opus at {bitrate}...")
|
||||
print(f" Source: {bitrate_info} -> Destination: Opus {bitrate} ({final_channels} channels)")
|
||||
run_cmd(["opusenc", "--vbr", "--bitrate", bitrate, str(temp_normalized), str(final_opus)])
|
||||
|
||||
return final_opus, final_channels, bitrate
|
||||
|
||||
def main():
|
||||
"""Main script logic."""
|
||||
parser = argparse.ArgumentParser(description="Batch processes MKV file audio tracks to Opus.")
|
||||
parser.add_argument("--downmix", action="store_true", help="If present, multi-channel audio will be downmixed to stereo.")
|
||||
args = parser.parse_args()
|
||||
|
||||
check_tools()
|
||||
|
||||
# Define directory paths but don't create them yet
|
||||
DIR_COMPLETED = Path("completed")
|
||||
DIR_ORIGINAL = Path("original")
|
||||
DIR_LOGS = Path("conv_logs")
|
||||
current_dir = Path(".")
|
||||
|
||||
# Check if there are any MKV files to process
|
||||
files_to_process = sorted(
|
||||
f for f in current_dir.glob("*.mkv")
|
||||
if not f.name.startswith("temp-output-")
|
||||
)
|
||||
|
||||
if not files_to_process:
|
||||
print("No MKV files found to process. Exiting.")
|
||||
return # Exit without creating directories
|
||||
|
||||
# Create directories only when we actually have files to process
|
||||
DIR_COMPLETED.mkdir(exist_ok=True)
|
||||
DIR_ORIGINAL.mkdir(exist_ok=True)
|
||||
DIR_LOGS.mkdir(exist_ok=True)
|
||||
|
||||
for file_path in files_to_process:
|
||||
# Setup logging
|
||||
log_file_path = DIR_LOGS / f"{file_path.name}.log"
|
||||
log_file = open(log_file_path, 'w', encoding='utf-8')
|
||||
original_stdout = sys.stdout
|
||||
original_stderr = sys.stderr
|
||||
sys.stdout = Tee(original_stdout, log_file)
|
||||
sys.stderr = Tee(original_stderr, log_file)
|
||||
|
||||
try:
|
||||
print("-" * shutil.get_terminal_size(fallback=(80, 24)).columns)
|
||||
print(f"Starting processing for: {file_path.name}")
|
||||
print(f"Log file: {log_file_path}")
|
||||
start_time = datetime.now()
|
||||
|
||||
intermediate_output_file = current_dir / f"temp-output-{file_path.name}"
|
||||
temp_dir = Path(tempfile.mkdtemp(prefix="mkvopusenc_"))
|
||||
print(f"Temporary directory for audio created at: {temp_dir}")
|
||||
|
||||
# 3. --- Get Media Information ---
|
||||
print(f"Analyzing file: {file_path}")
|
||||
ffprobe_info_json = run_cmd(["ffprobe", "-v", "quiet", "-print_format", "json", "-show_streams", "-show_format", str(file_path)], capture_output=True)
|
||||
ffprobe_info = json.loads(ffprobe_info_json)
|
||||
|
||||
mkvmerge_info_json = run_cmd(["mkvmerge", "-J", str(file_path)], capture_output=True)
|
||||
mkv_info = json.loads(mkvmerge_info_json)
|
||||
|
||||
mediainfo_json_str = run_cmd(["mediainfo", "--Output=JSON", "-f", str(file_path)], capture_output=True)
|
||||
media_info = json.loads(mediainfo_json_str)
|
||||
|
||||
# 4. --- Prepare for Final mkvmerge Command ---
|
||||
processed_audio_files = []
|
||||
tids_of_reencoded_tracks = []
|
||||
|
||||
# 5. --- Process Each Audio Stream ---
|
||||
audio_streams = [s for s in ffprobe_info.get("streams", []) if s.get("codec_type") == "audio"]
|
||||
|
||||
# Check if the file has any audio streams
|
||||
if not audio_streams:
|
||||
print(f"Warning: No audio streams found in '{file_path.name}'. Skipping file.")
|
||||
continue
|
||||
|
||||
mkv_tracks_list = mkv_info.get("tracks", [])
|
||||
mkv_audio_tracks = [t for t in mkv_tracks_list if t.get("type") == "audio"]
|
||||
media_tracks_data = media_info.get("media", {}).get("track", [])
|
||||
mediainfo_audio_tracks = {int(t.get("StreamOrder", -1)): t for t in media_tracks_data if t.get("@type") == "Audio"}
|
||||
|
||||
print("\n=== Audio Track Analysis ===")
|
||||
for audio_stream_idx, stream in enumerate(audio_streams):
|
||||
stream_index = stream["index"]
|
||||
codec = stream.get("codec_name")
|
||||
channels = stream.get("channels", 2)
|
||||
language = stream.get("tags", {}).get("language", "und")
|
||||
|
||||
track_id = -1
|
||||
mkv_track = {}
|
||||
if audio_stream_idx < len(mkv_audio_tracks):
|
||||
mkv_track = mkv_audio_tracks[audio_stream_idx]
|
||||
track_id = mkv_track.get("id", -1)
|
||||
|
||||
if track_id == -1:
|
||||
print(f" -> Warning: Could not map ffprobe audio stream index {stream_index} to an mkvmerge track ID. Skipping this track.")
|
||||
continue
|
||||
|
||||
track_title = mkv_track.get("properties", {}).get("track_name", "")
|
||||
|
||||
track_delay = 0
|
||||
audio_track_info = mediainfo_audio_tracks.get(stream_index)
|
||||
|
||||
# Get bitrate information from mediainfo
|
||||
bitrate = "Unknown"
|
||||
if audio_track_info:
|
||||
if "BitRate" in audio_track_info:
|
||||
try:
|
||||
br_value = int(audio_track_info["BitRate"])
|
||||
bitrate = f"{int(br_value/1000)}k"
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
elif "BitRate_Nominal" in audio_track_info:
|
||||
try:
|
||||
br_value = int(audio_track_info["BitRate_Nominal"])
|
||||
bitrate = f"{int(br_value/1000)}k"
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
delay_raw = audio_track_info.get("Video_Delay") if audio_track_info else None
|
||||
if delay_raw is not None:
|
||||
try:
|
||||
delay_val = float(delay_raw)
|
||||
# If the value is a float < 1, it's seconds, so convert to ms.
|
||||
if delay_val < 1 and delay_val > -1:
|
||||
track_delay = int(round(delay_val * 1000))
|
||||
else:
|
||||
track_delay = int(round(delay_val))
|
||||
except Exception:
|
||||
track_delay = 0
|
||||
|
||||
track_info = f"Audio Stream #{stream_index} (TID: {track_id}, Codec: {codec}, Bitrate: {bitrate}, Channels: {channels})"
|
||||
if track_title:
|
||||
track_info += f", Title: '{track_title}'"
|
||||
if language != "und":
|
||||
track_info += f", Language: {language}"
|
||||
if track_delay != 0:
|
||||
track_info += f", Delay: {track_delay}ms"
|
||||
|
||||
print(f"\nProcessing {track_info}")
|
||||
|
||||
if codec in {"aac", "opus"}:
|
||||
print(f" -> Action: Remuxing track (keeping original {codec.upper()} {bitrate})")
|
||||
# This track will be kept from the original file, so we don't need to add it to a special list.
|
||||
else:
|
||||
bitrate_info = f"{codec.upper()} {bitrate}"
|
||||
print(f" -> Action: Re-encoding codec '{codec}' to Opus")
|
||||
opus_file, final_channels, final_bitrate = convert_audio_track(
|
||||
stream_index, channels, temp_dir, file_path, args.downmix, bitrate_info
|
||||
)
|
||||
processed_audio_files.append({
|
||||
"Path": opus_file,
|
||||
"Language": language,
|
||||
"Title": track_title,
|
||||
"Delay": track_delay
|
||||
})
|
||||
tids_of_reencoded_tracks.append(str(track_id))
|
||||
|
||||
# 6. --- Construct and Execute Final mkvmerge Command ---
|
||||
print("\n=== Final MKV Creation ===")
|
||||
print("Assembling final mkvmerge command...")
|
||||
mkvmerge_args = ["mkvmerge", "-o", str(intermediate_output_file)]
|
||||
|
||||
# If no audio was re-encoded, we are just doing a full remux of the original file.
|
||||
if not processed_audio_files:
|
||||
print(" -> All audio tracks are in the desired format. Performing a full remux.")
|
||||
mkvmerge_args.append(str(file_path))
|
||||
else:
|
||||
# If we re-encoded audio, copy everything from the source EXCEPT the original audio tracks that we replaced.
|
||||
mkvmerge_args.extend(["--audio-tracks", "!" + ",".join(tids_of_reencoded_tracks)])
|
||||
mkvmerge_args.append(str(file_path))
|
||||
|
||||
# Add the newly encoded Opus audio files.
|
||||
for file_info in processed_audio_files:
|
||||
mkvmerge_args.extend(["--language", f"0:{file_info['Language']}"])
|
||||
if file_info['Title']:
|
||||
mkvmerge_args.extend(["--track-name", f"0:{file_info['Title']}"])
|
||||
if file_info['Delay'] != 0:
|
||||
mkvmerge_args.extend(["--sync", f"0:{file_info['Delay']}"])
|
||||
mkvmerge_args.append(str(file_info["Path"]))
|
||||
|
||||
print(f"Executing mkvmerge...")
|
||||
run_cmd(mkvmerge_args)
|
||||
print("MKV creation complete")
|
||||
|
||||
# Move files to their final destinations
|
||||
print("\n=== File Management ===")
|
||||
print(f"Moving processed file to: {DIR_COMPLETED / file_path.name}")
|
||||
shutil.move(str(intermediate_output_file), DIR_COMPLETED / file_path.name)
|
||||
print(f"Moving original file to: {DIR_ORIGINAL / file_path.name}")
|
||||
shutil.move(str(file_path), DIR_ORIGINAL / file_path.name)
|
||||
|
||||
# Display total runtime
|
||||
runtime = datetime.now() - start_time
|
||||
runtime_str = str(runtime).split('.')[0] # Remove milliseconds
|
||||
print(f"\nTotal processing time: {runtime_str}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"\nAn error occurred while processing '{file_path.name}': {e}", file=sys.stderr)
|
||||
if intermediate_output_file.exists():
|
||||
intermediate_output_file.unlink()
|
||||
finally:
|
||||
# 7. --- Cleanup ---
|
||||
print("\n=== Cleanup ===")
|
||||
print("Cleaning up temporary files...")
|
||||
if temp_dir.exists():
|
||||
shutil.rmtree(temp_dir)
|
||||
print("Temporary directory removed.")
|
||||
|
||||
# Restore stdout/stderr and close log file
|
||||
sys.stdout = original_stdout
|
||||
sys.stderr = original_stderr
|
||||
log_file.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
78
README.md
78
README.md
@@ -1,77 +1,15 @@
|
||||
# Anime Audio Encoder
|
||||
# Encoding Scripts
|
||||
|
||||
## Overview
|
||||
This is a collection of Python scripts for various video and audio processing tasks, such as encoding video to AV1 and audio to Opus.
|
||||
|
||||
anime_audio_encoder.py is a comprehensive batch-processing script for MKV files, specifically tailored for modern anime encoding workflows. It automates the entire pipeline, including advanced video encoding with AV1 (via av1an), sophisticated audio conversion to Opus, and intelligent handling of both Variable Frame Rate (VFR) and Constant Frame Rate (CFR) sources. To ensure transparency and aid in debugging, the script also preserves detailed, per-file logs of the entire conversion process.
|
||||
## Scripts
|
||||
|
||||
## Features
|
||||
- **[anime_audio_encoder.py](anime_audio_encoder.py)**: A script tailored for encoding anime. It handles Variable Frame Rate (VFR) sources and uses `av1an` for AV1 encoding. For more details, see the [Anime Audio Encoder README](README_Anime%20Audio%20Encoder.md).
|
||||
|
||||
* **Advanced Video Encoding:** Uses a robust VapourSynth-based pipeline with av1an and SVT-AV1 for efficient, high-quality AV1 video encoding.
|
||||
- **[tv_audio_encoder.py](tv_audio_encoder.py)**: A script designed for encoding TV show episodes. It uses `alabamaEncoder` for the video encoding process. For more details, see the [TV Audio Encoder README](README_TV%20Audio%20Encoder.md).
|
||||
|
||||
* **VFR Handling:** Automatically detects Variable Frame Rate (VFR) sources using mediainfo and converts them to Constant Frame Rate (CFR) with HandBrakeCLI before encoding, ensuring broader playback compatibility.
|
||||
- **[MkvOpusEnc.py](MkvOpusEnc.py)**: A cross-platform script for batch-processing audio tracks in MKV files to the Opus format. For more details, see the [MkvOpusEnc README](README_MkvOpusEnc.md).
|
||||
|
||||
* **Detailed Logging:** Creates a separate, detailed log file for each processed MKV in the `conv_logs/` directory, capturing the full terminal output for easy review.
|
||||
- **[cropdetect.py](cropdetect.py)**: An advanced script for intelligently detecting video crop values using parallel processing and smart heuristics. For more details, see the [Cropdetect README](README_cropdetect.md).
|
||||
|
||||
* **Sophisticated Audio Processing:** Converts common audio formats to normalized Opus files. It provides an option to downmix surround sound to stereo or preserve the original channel layout.
|
||||
|
||||
* **File Organization:** Keeps your workspace tidy by automatically moving original files to `original/` and completed encodes to `completed/`.
|
||||
|
||||
* **Resumable & Efficient:** The script processes files one by one and uses av1an's resume feature, making it easy to continue an encoding batch if it's interrupted.
|
||||
|
||||
## Requirements
|
||||
|
||||
The following command-line tools must be installed and available in your system's PATH:
|
||||
|
||||
* ffmpeg
|
||||
|
||||
* ffprobe
|
||||
|
||||
* mkvmerge
|
||||
|
||||
* mkvpropedit
|
||||
|
||||
* sox
|
||||
|
||||
* opusenc
|
||||
|
||||
* mediainfo
|
||||
|
||||
* av1an
|
||||
|
||||
* HandBrakeCLI
|
||||
|
||||
## Usage
|
||||
|
||||
1. Place your `.mkv` files in the same directory as the script.
|
||||
|
||||
2. Make the script executable by running `chmod +x anime_audio_encoder.py`.
|
||||
|
||||
3. Execute the script from your terminal:
|
||||
|
||||
```bash
|
||||
./anime_audio_encoder.py
|
||||
```
|
||||
|
||||
### Optional Arguments
|
||||
|
||||
* `--no-downmix`: By default, the script downmixes surround sound audio (e.g., 5.1) to stereo. Use this flag to preserve the original audio channel layout.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
./anime_audio_encoder.py --no-downmix
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
* Processed files are moved to the `completed/` directory.
|
||||
|
||||
* Original files are moved to the `original/` directory.
|
||||
|
||||
* Per-file logs are saved in the `conv_logs/` directory.
|
||||
|
||||
## Notes
|
||||
|
||||
* The script will check if a file can be decoded by ffmpeg before processing and will skip corrupted or unsupported files.
|
||||
|
||||
* The entire process, especially scene detection and AV1 encoding, can be very time-consuming and CPU-intensive.
|
||||
For usage instructions, please refer to the individual scripts or the detailed README files.
|
||||
56
README_Anime Audio Encoder.md
Normal file
56
README_Anime Audio Encoder.md
Normal file
@@ -0,0 +1,56 @@
|
||||
# Anime Audio Encoder
|
||||
|
||||
## Overview
|
||||
|
||||
`anime_audio_encoder.py` is a comprehensive batch-processing script for MKV files, specifically tailored for modern anime encoding workflows. It automates the entire pipeline, including advanced video encoding with AV1 (via `av1an`), sophisticated audio conversion to Opus, and intelligent handling of both Variable Frame Rate (VFR) and Constant Frame Rate (CFR) sources. To ensure transparency and aid in debugging, the script also preserves detailed, per-file logs of the entire conversion process.
|
||||
|
||||
## Features
|
||||
|
||||
* **Advanced Video Encoding:** Uses a robust VapourSynth-based pipeline with `av1an` and SVT-AV1 for efficient, high-quality AV1 video encoding.
|
||||
* **VFR Handling:** Automatically detects Variable Frame Rate (VFR) sources using `mediainfo` and converts them to Constant Frame Rate (CFR) with `HandBrakeCLI` before encoding, ensuring broader playback compatibility.
|
||||
* **Detailed Logging:** Creates a separate, detailed log file for each processed MKV in the `conv_logs/` directory, capturing the full terminal output for easy review.
|
||||
* **Sophisticated Audio Processing:** Converts common audio formats to normalized Opus files. It provides an option to downmix surround sound to stereo or preserve the original channel layout.
|
||||
* **File Organization:** Keeps your workspace tidy by automatically moving original files to `original/` and completed encodes to `completed/`.
|
||||
* **Resumable & Efficient:** The script processes files one by one and uses `av1an`'s resume feature, making it easy to continue an encoding batch if it's interrupted.
|
||||
|
||||
## Requirements
|
||||
|
||||
The following command-line tools must be installed and available in your system's PATH:
|
||||
|
||||
* `ffmpeg`
|
||||
* `ffprobe`
|
||||
* `mkvmerge`
|
||||
* `mkvpropedit`
|
||||
* `sox`
|
||||
* `opusenc`
|
||||
* `mediainfo`
|
||||
* `av1an`
|
||||
* `HandBrakeCLI`
|
||||
|
||||
## Usage
|
||||
|
||||
1. Place your `.mkv` files in the same directory as the script.
|
||||
2. Make the script executable (on Linux/macOS) by running `chmod +x anime_audio_encoder.py`.
|
||||
3. Execute the script from your terminal:
|
||||
```bash
|
||||
./anime_audio_encoder.py
|
||||
```
|
||||
|
||||
### Optional Arguments
|
||||
|
||||
* `--no-downmix`: By default, the script downmixes surround sound audio (e.g., 5.1) to stereo. Use this flag to preserve the original audio channel layout.
|
||||
```bash
|
||||
./anime_audio_encoder.py --no-downmix
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
* Processed files are moved to the `completed/` directory.
|
||||
* Original files are moved to the `original/` directory.
|
||||
* Per-file logs are saved in the `conv_logs/` directory.
|
||||
|
||||
## Notes
|
||||
|
||||
* The script is primarily designed for **Linux/macOS** environments.
|
||||
* The script will check if a file can be decoded by `ffmpeg` before processing and will skip corrupted or unsupported files.
|
||||
* The entire process, especially scene detection and AV1 encoding, can be very time-consuming and
|
||||
58
README_MkvOpusEnc.md
Normal file
58
README_MkvOpusEnc.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# MkvOpusEnc
|
||||
|
||||
## Overview
|
||||
|
||||
`MkvOpusEnc.py` is a cross-platform Python script designed for batch-processing the audio tracks within MKV files. It automatically scans the current directory for MKV files and processes them sequentially. The script intelligently converts various audio codecs to the highly efficient Opus format while preserving all other tracks (video, subtitles, etc.) and metadata.
|
||||
|
||||
## Features
|
||||
|
||||
* **Automated Batch Processing:** Automatically finds and processes all MKV files in its directory, one by one.
|
||||
* **Intelligent Codec Handling:**
|
||||
* Remuxes existing `AAC` and `Opus` tracks without re-encoding to preserve quality.
|
||||
* Re-encodes all other audio formats (DTS, AC3, TrueHD, FLAC, etc.) to Opus.
|
||||
* **Advanced Downmixing:** Includes an optional `--downmix` flag that converts multi-channel audio (5.1, 7.1) to stereo using a dialogue-boosting formula.
|
||||
* **Audio Normalization:** Uses `SoX` to normalize audio levels for a consistent listening experience.
|
||||
* **Metadata Preservation:** Carefully preserves audio track metadata such as titles, language tags, and delay/sync information.
|
||||
* **Detailed Logging:** Creates a separate, detailed log file for each processed MKV in the `conv_logs/` directory, capturing the full terminal output and conversion details for easy review.
|
||||
* **File Organization:** Automatically moves the original source files to an `original/` directory and the newly processed files to a `completed/` directory, keeping your workspace clean.
|
||||
* **Cross-Platform:** As a Python script using common command-line tools, it is designed to work on Windows, macOS, and Linux.
|
||||
|
||||
## Requirements
|
||||
|
||||
The following command-line tools must be installed and available in your system's PATH:
|
||||
|
||||
* `ffmpeg`
|
||||
* `ffprobe`
|
||||
* `mkvmerge`
|
||||
* `sox`
|
||||
* `opusenc`
|
||||
* `mediainfo`
|
||||
|
||||
## Usage
|
||||
|
||||
1. Place your `.mkv` files in the same directory as the script.
|
||||
2. Execute the script from your terminal:
|
||||
|
||||
```bash
|
||||
python MkvOpusEnc.py
|
||||
```
|
||||
|
||||
### Optional Arguments
|
||||
|
||||
* `--downmix`: By default, the script preserves the original audio channel layout. Use this flag to downmix multi-channel audio to stereo.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
python MkvOpusEnc.py --downmix
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
* Processed files are moved to the `completed/` directory.
|
||||
* Original files are moved to the `original/` directory.
|
||||
* Per-file logs are saved in the `conv_logs/` directory, containing detailed information about:
|
||||
* Original audio track properties (codec, bitrate, channels)
|
||||
* Track titles, languages, and delay information
|
||||
* Conversion details for each track, including target bitrates
|
||||
* Any errors or warnings encountered during processing
|
||||
58
README_TV Audio Encoder.md
Normal file
58
README_TV Audio Encoder.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# TV Audio Encoder
|
||||
|
||||
## Overview
|
||||
|
||||
`tv_audio_encoder.py` is a comprehensive batch-processing script for MKV files, specifically designed for encoding TV show episodes. It automates the entire pipeline, including VMAF-targeted video encoding with AV1 (via `alabamaEncoder`) and sophisticated audio conversion to Opus. To ensure transparency and aid in debugging, the script also preserves detailed, per-file logs of the entire conversion process.
|
||||
|
||||
## Features
|
||||
|
||||
* **Advanced Video Encoding:** Uses `alabamaEncoder` for a simplified yet powerful VMAF-targeted AV1 encoding workflow, aiming for consistent quality.
|
||||
* **Stable Workflow:** Creates a lossless UTVideo intermediate file from the source video, providing a stable and reliable input for the main encoding process.
|
||||
* **Detailed Logging:** Creates a separate, detailed log file for each processed MKV in the `conv_logs/` directory, capturing the full terminal output for easy review.
|
||||
* **Sophisticated Audio Processing:** Converts common audio formats to normalized Opus files. It provides an option to downmix surround sound to stereo or preserve the original channel layout.
|
||||
* **File Organization:** Keeps your workspace tidy by automatically moving original files to `original/` and completed encodes to `completed/`.
|
||||
* **Platform Specificity:** The script is designed for Linux systems, as `alabamaEncoder` is not supported on Windows.
|
||||
|
||||
## Requirements
|
||||
|
||||
The following command-line tools must be installed and available in your system's PATH:
|
||||
|
||||
* `ffmpeg`
|
||||
* `ffprobe`
|
||||
* `mkvmerge`
|
||||
* `mkvpropedit`
|
||||
* `sox`
|
||||
* `opusenc`
|
||||
* `mediainfo`
|
||||
* `alabamaEncoder`
|
||||
|
||||
## Usage
|
||||
|
||||
1. Place your `.mkv` files in the same directory as the script.
|
||||
2. Make the script executable by running `chmod +x tv_audio_encoder.py`.
|
||||
3. Execute the script from your terminal:
|
||||
|
||||
```bash
|
||||
./tv_audio_encoder.py
|
||||
```
|
||||
|
||||
### Optional Arguments
|
||||
|
||||
* `--no-downmix`: By default, the script downmixes surround sound audio (e.g., 5.1, 7.1) to stereo. Use this flag to preserve the original audio channel layout.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
./tv_audio_encoder.py --no-downmix
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
* Processed files are moved to the `completed/` directory.
|
||||
* Original files are moved to the `original/` directory.
|
||||
* Per-file logs are saved in the `conv_logs/` directory.
|
||||
|
||||
## Notes
|
||||
|
||||
* This script is intended for use on **Linux** only.
|
||||
* The entire process, especially the AV1 encoding, can be very time-consuming and CPU
|
||||
124
README_cropdetect.md
Normal file
124
README_cropdetect.md
Normal file
@@ -0,0 +1,124 @@
|
||||
# Advanced Crop Detection Script
|
||||
|
||||
This Python script provides a robust and intelligent way to detect the correct crop values for video files. It goes far beyond a simple `ffmpeg-cropdetect` wrapper by using parallel processing and a series of smart heuristics to provide accurate and reliable recommendations, even for complex videos with mixed aspect ratios.
|
||||
|
||||
## Key Features
|
||||
|
||||
- **Parallel Processing**: Analyzes video segments in parallel to significantly speed up the detection process on multi-core systems.
|
||||
- **Smart Aspect Ratio Snapping**: Automatically "snaps" detected crop values to known cinematic standards (e.g., 1.85:1, 2.39:1, 16:9, 4:3), correcting for minor detection errors.
|
||||
- **Mixed Aspect Ratio Detection**: Intelligently identifies videos that switch aspect ratios (e.g., IMAX scenes in a widescreen movie) and warns the user against applying a single, destructive crop.
|
||||
- **Credits & Logo Filtering**: Automatically detects and ignores crop values that only appear in the first or last 5% of the video, preventing opening logos or closing credits from influencing the result.
|
||||
- **Luma Verification**: Performs a second analysis pass on frames with unidentified aspect ratios. If a frame is too dark, the detection is discarded as unreliable, preventing false positives from dark scenes.
|
||||
- **Sanity Checks**: Provides context-aware warnings, such as when it suggests cropping a 4:3 video into a widescreen format.
|
||||
- **"No Crop" Logic**: If a video is overwhelmingly detected as not needing a crop (>95% of samples), it will confidently recommend leaving it as is, ignoring insignificant variations.
|
||||
- **User-Friendly Output**: Uses color-coded text to make recommendations and warnings easy to read at a glance.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. **Python 3**: The script is written for Python 3.
|
||||
2. **FFmpeg**: Both `ffmpeg` and `ffprobe` must be installed and accessible in your system's `PATH`. The script will check for these on startup.
|
||||
|
||||
## Installation
|
||||
|
||||
No complex installation is required. Simply save the script as `cropdetect.py` and ensure it is executable.
|
||||
|
||||
## Usage
|
||||
|
||||
Run the script from your terminal, passing the path to the video file as an argument.
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```bash
|
||||
python cropdetect.py "path/to/your/video.mkv"
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
- `-j, --jobs`: Specify the number of parallel processes to use for analysis. By default, it uses half of your available CPU cores.
|
||||
```bash
|
||||
# Use 8 parallel jobs
|
||||
python cropdetect.py "path/to/video.mkv" --jobs 8
|
||||
```
|
||||
- `-i, --interval`: Set the time interval (in seconds) between video samples. A smaller interval is more thorough but slower. The default is 30 seconds.
|
||||
```bash
|
||||
# Analyze the video every 15 seconds
|
||||
python cropdetect.py "path/to/video.mkv" --interval 15
|
||||
```
|
||||
|
||||
## Example Output
|
||||
|
||||
### Confident Crop Recommendation
|
||||
|
||||
For a standard widescreen movie, the output will be clear and simple.
|
||||
|
||||
```
|
||||
--- Prerequisite Check ---
|
||||
All required tools found.
|
||||
|
||||
Video properties: 3840x2160, 7588.66s. Analyzing with up to 16 parallel jobs...
|
||||
|
||||
--- Starting Analysis ---
|
||||
Analyzing Segments: 252/252 completed...
|
||||
|
||||
--- Final Verdict ---
|
||||
--- Credits/Logo Detection ---
|
||||
Ignoring 55 crop value(s) that appear only in the first/last 5% of the video.
|
||||
|
||||
--- Luma Verification ---
|
||||
Verifying scenes: 97/97 completed...
|
||||
Ignoring 347 detections that occurred in very dark scenes.
|
||||
|
||||
Analysis complete.
|
||||
The video consistently uses the 'Widescreen (Flat)' aspect ratio.
|
||||
Recommended crop filter: -vf crop=3840:2080:0:40
|
||||
```
|
||||
|
||||
### Mixed Aspect Ratio Warning
|
||||
|
||||
For a movie with changing aspect ratios, the script will advise against cropping.
|
||||
|
||||
```
|
||||
--- Prerequisite Check ---
|
||||
All required tools found.
|
||||
|
||||
Video properties: 1920x1080, 3640.90s. Analyzing with up to 16 parallel jobs...
|
||||
|
||||
--- Starting Analysis ---
|
||||
Analyzing Segments: 121/121 completed...
|
||||
|
||||
--- Final Verdict ---
|
||||
--- Credits/Logo Detection ---
|
||||
Ignoring 15 crop value(s) that appear only in the first/last 5% of the video.
|
||||
|
||||
--- Luma Verification ---
|
||||
Verifying scenes: 121/121 completed...
|
||||
Ignoring 737 detections that occurred in very dark scenes.
|
||||
|
||||
--- WARNING: Potentially Mixed Aspect Ratios Detected! ---
|
||||
The dominant aspect ratio is 'Widescreen (Scope)' (crop=1920:808:0:136), found in 96.2% of samples.
|
||||
However, other significantly different aspect ratios were also detected, although less frequently.
|
||||
|
||||
Recommendation: Manually check the video before applying a single crop.
|
||||
You can review the next most common detections below:
|
||||
- 'Fullscreen (4:3)' (crop=1440:1080:240:0) was detected 69 time(s) (3.8%).
|
||||
```
|
||||
|
||||
### No Crop Needed
|
||||
|
||||
For a video that is already perfectly formatted (e.g., a 4:3 TV show), the script will recommend doing nothing.
|
||||
|
||||
```
|
||||
--- Prerequisite Check ---
|
||||
All required tools found.
|
||||
|
||||
Video properties: 768x576, 1770.78s. Analyzing with up to 16 parallel jobs...
|
||||
|
||||
--- Starting Analysis ---
|
||||
Analyzing Segments: 58/58 completed...
|
||||
|
||||
--- Final Verdict ---
|
||||
Analysis complete.
|
||||
The video is overwhelmingly 'Fullscreen (4:3)' and does not require cropping.
|
||||
Minor aspect ratio variations were detected but are considered insignificant due to their low frequency.
|
||||
Recommendation: No crop needed.
|
||||
```
|
||||
@@ -209,12 +209,24 @@ def is_ffmpeg_decodable(file_path):
|
||||
|
||||
def main(no_downmix=False):
|
||||
check_tools()
|
||||
|
||||
current_dir = Path(".")
|
||||
|
||||
# Check if there are any MKV files to process before creating directories
|
||||
files_to_process = sorted(
|
||||
f for f in current_dir.glob("*.mkv")
|
||||
if not (f.name.endswith(".ut.mkv") or f.name.startswith("temp-") or f.name.startswith("output-") or f.name.endswith(".cfr_temp.mkv"))
|
||||
)
|
||||
|
||||
if not files_to_process:
|
||||
print("No MKV files found to process. Exiting.")
|
||||
return # Exit without creating directories
|
||||
|
||||
# Only create directories when we actually have files to process
|
||||
DIR_COMPLETED.mkdir(exist_ok=True, parents=True)
|
||||
DIR_ORIGINAL.mkdir(exist_ok=True, parents=True)
|
||||
DIR_CONV_LOGS.mkdir(exist_ok=True, parents=True) # Create conv_logs directory
|
||||
|
||||
current_dir = Path(".")
|
||||
|
||||
while True:
|
||||
files_to_process = sorted(
|
||||
f for f in current_dir.glob("*.mkv")
|
||||
@@ -376,10 +388,15 @@ def main(no_downmix=False):
|
||||
# Find mediainfo track by StreamOrder
|
||||
audio_track_info = mediainfo_audio_tracks.get(stream_index)
|
||||
track_delay = 0
|
||||
delay_in_seconds = audio_track_info.get("Video_Delay") if audio_track_info else None
|
||||
if delay_in_seconds is not None:
|
||||
delay_raw = audio_track_info.get("Video_Delay") if audio_track_info else None
|
||||
if delay_raw is not None:
|
||||
try:
|
||||
track_delay = round(float(delay_in_seconds) * 1000)
|
||||
delay_val = float(delay_raw)
|
||||
# If the value is a float < 1, it's seconds, so convert to ms.
|
||||
if delay_val < 1:
|
||||
track_delay = int(round(delay_val * 1000))
|
||||
else:
|
||||
track_delay = int(round(delay_val))
|
||||
except Exception:
|
||||
track_delay = 0
|
||||
|
||||
|
||||
432
cropdetect.py
Normal file
432
cropdetect.py
Normal file
@@ -0,0 +1,432 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
from collections import Counter
|
||||
import shutil
|
||||
import multiprocessing
|
||||
import json
|
||||
|
||||
# ANSI color codes
|
||||
COLOR_GREEN = "\033[92m"
|
||||
COLOR_RED = "\033[91m"
|
||||
COLOR_YELLOW = "\033[93m"
|
||||
COLOR_RESET = "\033[0m"
|
||||
|
||||
def check_prerequisites():
|
||||
"""Checks if required tools are available."""
|
||||
print("--- Prerequisite Check ---")
|
||||
all_found = True
|
||||
for tool in ['ffmpeg', 'ffprobe']:
|
||||
if not shutil.which(tool):
|
||||
print(f"Error: '{tool}' command not found. Is it installed and in your PATH?")
|
||||
all_found = False
|
||||
if not all_found:
|
||||
sys.exit(1)
|
||||
print("All required tools found.")
|
||||
|
||||
def analyze_segment(task_args):
|
||||
"""Function to be run by each worker process. Analyzes one video segment."""
|
||||
seek_time, input_file, width, height = task_args
|
||||
|
||||
ffmpeg_args = [
|
||||
'ffmpeg', '-hide_banner',
|
||||
'-ss', str(seek_time),
|
||||
'-i', input_file, '-t', '1', '-vf', 'cropdetect',
|
||||
'-f', 'null', '-'
|
||||
]
|
||||
|
||||
result = subprocess.run(ffmpeg_args, capture_output=True, text=True, encoding='utf-8')
|
||||
|
||||
if result.returncode != 0:
|
||||
return [] # Return empty list on error
|
||||
|
||||
crop_detections = re.findall(r'crop=(\d+):(\d+):(\d+):(\d+)', result.stderr)
|
||||
|
||||
significant_crops = []
|
||||
for w_str, h_str, x_str, y_str in crop_detections:
|
||||
w, h, x, y = map(int, [w_str, h_str, x_str, y_str])
|
||||
|
||||
# Return the crop string along with the timestamp it was found at
|
||||
significant_crops.append((f"crop={w}:{h}:{x}:{y}", seek_time))
|
||||
|
||||
return significant_crops
|
||||
|
||||
def get_frame_luma(input_file, seek_time):
|
||||
"""Analyzes a single frame at a given timestamp to get its average luma."""
|
||||
ffmpeg_args = [
|
||||
'ffmpeg', '-hide_banner',
|
||||
'-ss', str(seek_time),
|
||||
'-i', input_file,
|
||||
'-t', '1',
|
||||
'-vf', 'signalstats',
|
||||
'-f', 'null', '-'
|
||||
]
|
||||
result = subprocess.run(ffmpeg_args, capture_output=True, text=True, encoding='utf-8')
|
||||
|
||||
if result.returncode != 0:
|
||||
return None # Error during analysis
|
||||
|
||||
# Find the average luma (YAVG) for the frame
|
||||
match = re.search(r'YAVG:([0-9.]+)', result.stderr)
|
||||
if match:
|
||||
return float(match.group(1))
|
||||
|
||||
return None
|
||||
|
||||
def check_luma_for_group(task_args):
|
||||
"""Worker function to check the luma for a single group."""
|
||||
group_key, sample_ts, input_file, luma_threshold = task_args
|
||||
luma = get_frame_luma(input_file, sample_ts)
|
||||
is_bright = luma is not None and luma >= luma_threshold
|
||||
return (group_key, is_bright)
|
||||
|
||||
KNOWN_ASPECT_RATIOS = [
|
||||
{"name": "HDTV (16:9)", "ratio": 16/9},
|
||||
{"name": "Widescreen (Scope)", "ratio": 2.39},
|
||||
{"name": "Widescreen (Flat)", "ratio": 1.85},
|
||||
{"name": "IMAX Digital (1.90:1)", "ratio": 1.90},
|
||||
{"name": "Fullscreen (4:3)", "ratio": 4/3},
|
||||
{"name": "IMAX 70mm (1.43:1)", "ratio": 1.43},
|
||||
]
|
||||
|
||||
def snap_to_known_ar(w, h, x, y, video_w, video_h, tolerance=0.03):
|
||||
"""Snaps a crop rectangle to the nearest standard aspect ratio if it's close enough."""
|
||||
if h == 0: return f"crop={w}:{h}:{x}:{y}", None
|
||||
detected_ratio = w / h
|
||||
|
||||
best_match = None
|
||||
smallest_diff = float('inf')
|
||||
|
||||
for ar in KNOWN_ASPECT_RATIOS:
|
||||
diff = abs(detected_ratio - ar['ratio'])
|
||||
if diff < smallest_diff:
|
||||
smallest_diff = diff
|
||||
best_match = ar
|
||||
|
||||
# If the best match is not within the tolerance, return the original
|
||||
if not best_match or (smallest_diff / best_match['ratio']) >= tolerance:
|
||||
return f"crop={w}:{h}:{x}:{y}", None
|
||||
|
||||
# Match found, now snap the dimensions.
|
||||
# Heuristic: if width is close to full video width, it's letterboxed.
|
||||
if abs(w - video_w) < 16:
|
||||
new_h = round(video_w / best_match['ratio'])
|
||||
|
||||
# Round height up to the nearest multiple of 8 for cleaner dimensions and less aggressive cropping.
|
||||
if new_h % 8 != 0:
|
||||
new_h = new_h + (8 - (new_h % 8))
|
||||
|
||||
new_y = round((video_h - new_h) / 2)
|
||||
# Ensure y offset is an even number for compatibility.
|
||||
if new_y % 2 != 0:
|
||||
new_y -= 1
|
||||
|
||||
return f"crop={video_w}:{new_h}:0:{new_y}", best_match['name']
|
||||
|
||||
# Heuristic: if height is close to full video height, it's pillarboxed.
|
||||
if abs(h - video_h) < 16:
|
||||
new_w = round(video_h * best_match['ratio'])
|
||||
|
||||
# Round width up to the nearest multiple of 8.
|
||||
if new_w % 8 != 0:
|
||||
new_w = new_w + (8 - (new_w % 8))
|
||||
|
||||
new_x = round((video_w - new_w) / 2)
|
||||
# Ensure x offset is an even number.
|
||||
if new_x % 2 != 0:
|
||||
new_x -= 1
|
||||
|
||||
return f"crop={new_w}:{video_h}:{new_x}:0", best_match['name']
|
||||
|
||||
# If not clearly letterboxed or pillarboxed, don't snap.
|
||||
return f"crop={w}:{h}:{x}:{y}", None
|
||||
|
||||
def cluster_crop_values(crop_counts, tolerance=8):
|
||||
"""Groups similar crop values into clusters based on the top-left corner."""
|
||||
clusters = []
|
||||
temp_counts = crop_counts.copy()
|
||||
|
||||
while temp_counts:
|
||||
# Get the most frequent remaining crop as the new cluster center
|
||||
center_str, _ = temp_counts.most_common(1)[0]
|
||||
|
||||
try:
|
||||
_, values = center_str.split('=')
|
||||
cw, ch, cx, cy = map(int, values.split(':'))
|
||||
except (ValueError, IndexError):
|
||||
del temp_counts[center_str] # Skip malformed strings
|
||||
continue
|
||||
|
||||
cluster_total_count = 0
|
||||
crops_to_remove = []
|
||||
|
||||
# Find all crops "close" to the center
|
||||
for crop_str, count in temp_counts.items():
|
||||
try:
|
||||
_, values = crop_str.split('=')
|
||||
w, h, x, y = map(int, values.split(':'))
|
||||
if abs(x - cx) <= tolerance and abs(y - cy) <= tolerance:
|
||||
cluster_total_count += count
|
||||
crops_to_remove.append(crop_str)
|
||||
except (ValueError, IndexError):
|
||||
continue
|
||||
|
||||
if cluster_total_count > 0:
|
||||
clusters.append({'center': center_str, 'count': cluster_total_count})
|
||||
|
||||
# Remove the clustered crops from the temporary counter
|
||||
for crop_str in crops_to_remove:
|
||||
del temp_counts[crop_str]
|
||||
|
||||
clusters.sort(key=lambda c: c['count'], reverse=True)
|
||||
return clusters
|
||||
|
||||
def parse_crop_string(crop_str):
|
||||
"""Parses a 'crop=w:h:x:y' string into a dictionary of integers."""
|
||||
try:
|
||||
_, values = crop_str.split('=')
|
||||
w, h, x, y = map(int, values.split(':'))
|
||||
return {'w': w, 'h': h, 'x': x, 'y': y}
|
||||
except (ValueError, IndexError):
|
||||
return None
|
||||
|
||||
def calculate_bounding_box(crop_keys):
|
||||
"""Calculates a bounding box that contains all given crop rectangles."""
|
||||
min_x = min_w = min_y = min_h = float('inf')
|
||||
max_x = max_w = max_y = max_h = float('-inf')
|
||||
|
||||
for key in crop_keys:
|
||||
parsed = parse_crop_string(key)
|
||||
if not parsed:
|
||||
continue
|
||||
|
||||
w, h, x, y = parsed['w'], parsed['h'], parsed['x'], parsed['y']
|
||||
|
||||
min_x = min(min_x, x)
|
||||
min_y = min(min_y, y)
|
||||
max_x = max(max_x, x + w)
|
||||
max_y = max(max_y, y + h)
|
||||
|
||||
min_w = min(min_w, w)
|
||||
min_h = min(min_h, h)
|
||||
max_w = max(max_w, w)
|
||||
max_h = max(max_h, h)
|
||||
|
||||
# Heuristic: if the bounding box is very close to the min/max, it means all crops were similar
|
||||
if (max_x - min_x) <= 2 and (max_y - min_y) <= 2:
|
||||
return None # Too uniform, don't create a bounding box
|
||||
|
||||
# Create a crop that spans the entire bounding box
|
||||
bounding_crop = f"crop={max_x - min_x}:{max_y - min_y}:{min_x}:{min_y}"
|
||||
|
||||
return bounding_crop
|
||||
|
||||
def is_major_crop(crop_str, video_w, video_h, min_crop_size):
|
||||
"""Checks if a crop is significant enough to be recommended by checking if any side is cropped by at least min_crop_size pixels."""
|
||||
parsed = parse_crop_string(crop_str)
|
||||
if not parsed:
|
||||
return False
|
||||
|
||||
w, h, x, y = parsed['w'], parsed['h'], parsed['x'], parsed['y']
|
||||
|
||||
# Calculate how much is cropped from each side
|
||||
crop_top = y
|
||||
crop_bottom = video_h - (y + h)
|
||||
crop_left = x
|
||||
crop_right = video_w - (x + w)
|
||||
|
||||
# Return True if the largest crop on any single side meets the threshold
|
||||
if max(crop_top, crop_bottom, crop_left, crop_right) >= min_crop_size:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def analyze_video(input_file, duration, width, height, num_workers, significant_crop_threshold, min_crop, debug=False):
|
||||
"""Main analysis function for the video."""
|
||||
print(f"\n--- Analyzing Video: {os.path.basename(input_file)} ---")
|
||||
|
||||
# Step 1: Analyze video in segments to detect crops
|
||||
num_tasks = num_workers * 4
|
||||
segment_duration = max(1, duration // num_tasks)
|
||||
tasks = [(i * segment_duration, input_file, width, height) for i in range(num_tasks)]
|
||||
|
||||
print(f"Analyzing {len(tasks)} segments across {num_workers} worker(s)...")
|
||||
|
||||
crop_results = []
|
||||
with multiprocessing.Pool(processes=num_workers) as pool:
|
||||
total_tasks = len(tasks)
|
||||
results_iterator = pool.imap_unordered(analyze_segment, tasks)
|
||||
|
||||
for i, result in enumerate(results_iterator, 1):
|
||||
crop_results.append(result)
|
||||
progress_message = f"Analyzing Segments: {i}/{total_tasks} completed..."
|
||||
sys.stdout.write(f"\r{progress_message}")
|
||||
sys.stdout.flush()
|
||||
print()
|
||||
|
||||
all_crops_with_ts = [crop for sublist in crop_results for crop in sublist]
|
||||
all_crop_strings = [item[0] for item in all_crops_with_ts]
|
||||
if not all_crop_strings:
|
||||
print(f"\n{COLOR_GREEN}Analysis complete. No black bars detected.{COLOR_RESET}")
|
||||
return
|
||||
|
||||
crop_counts = Counter(all_crop_strings)
|
||||
|
||||
if debug:
|
||||
print("\n--- Debug: Most Common Raw Detections ---")
|
||||
for crop_str, count in crop_counts.most_common(10):
|
||||
print(f" - {crop_str} (Count: {count})")
|
||||
|
||||
# Step 2: Cluster similar crop values
|
||||
clusters = cluster_crop_values(crop_counts)
|
||||
total_detections = sum(c['count'] for c in clusters)
|
||||
|
||||
if debug:
|
||||
print("\n--- Debug: Detected Clusters ---")
|
||||
for cluster in clusters:
|
||||
percentage = (cluster['count'] / total_detections) * 100
|
||||
print(f" - Center: {cluster['center']}, Count: {cluster['count']} ({percentage:.1f}%)")
|
||||
|
||||
# Step 3: Filter clusters that are below the significance threshold
|
||||
significant_clusters = []
|
||||
for cluster in clusters:
|
||||
percentage = (cluster['count'] / total_detections) * 100
|
||||
if percentage >= significant_crop_threshold:
|
||||
significant_clusters.append(cluster)
|
||||
|
||||
# Step 4: Determine final recommendation based on significant clusters
|
||||
print("\n--- Determining Final Crop Recommendation ---")
|
||||
|
||||
for cluster in significant_clusters:
|
||||
parsed_crop = parse_crop_string(cluster['center'])
|
||||
if parsed_crop:
|
||||
_, ar_label = snap_to_known_ar(
|
||||
parsed_crop['w'], parsed_crop['h'], parsed_crop['x'], parsed_crop['y'], width, height
|
||||
)
|
||||
cluster['ar_label'] = ar_label
|
||||
else:
|
||||
cluster['ar_label'] = None
|
||||
|
||||
if not significant_clusters:
|
||||
print(f"{COLOR_RED}No single crop value meets the {significant_crop_threshold}% significance threshold.{COLOR_RESET}")
|
||||
print("Recommendation: Do not crop. Try lowering the -sct threshold.")
|
||||
|
||||
elif len(significant_clusters) == 1:
|
||||
dominant_cluster = significant_clusters[0]
|
||||
parsed_crop = parse_crop_string(dominant_cluster['center'])
|
||||
snapped_crop, ar_label = snap_to_known_ar(
|
||||
parsed_crop['w'], parsed_crop['h'], parsed_crop['x'], parsed_crop['y'], width, height
|
||||
)
|
||||
|
||||
print("A single dominant aspect ratio was found.")
|
||||
if ar_label:
|
||||
print(f"The detected crop snaps to the '{ar_label}' aspect ratio.")
|
||||
|
||||
# Check if the final crop is a no-op (i.e., matches source dimensions)
|
||||
parsed_snapped = parse_crop_string(snapped_crop)
|
||||
if parsed_snapped and parsed_snapped['w'] == width and parsed_snapped['h'] == height:
|
||||
print(f"\n{COLOR_GREEN}The detected crop matches the source resolution. No crop is needed.{COLOR_RESET}")
|
||||
else:
|
||||
print(f"\n{COLOR_GREEN}Recommended crop filter: -vf {snapped_crop}{COLOR_RESET}")
|
||||
|
||||
else: # len > 1, mixed AR case
|
||||
print(f"{COLOR_YELLOW}Mixed aspect ratios detected (e.g., IMAX scenes).{COLOR_RESET}")
|
||||
print("Calculating a safe 'master' crop to contain all significant scenes.")
|
||||
|
||||
crop_keys = [c['center'] for c in significant_clusters]
|
||||
bounding_box_crop = calculate_bounding_box(crop_keys)
|
||||
|
||||
if bounding_box_crop:
|
||||
parsed_bb = parse_crop_string(bounding_box_crop)
|
||||
snapped_crop, ar_label = snap_to_known_ar(
|
||||
parsed_bb['w'], parsed_bb['h'], parsed_bb['x'], parsed_bb['y'], width, height
|
||||
)
|
||||
|
||||
print("\n--- Detected Significant Ratios ---")
|
||||
for cluster in significant_clusters:
|
||||
percentage = (cluster['count'] / total_detections) * 100
|
||||
label = f"'{cluster['ar_label']}'" if cluster['ar_label'] else "Custom AR"
|
||||
print(f" - {label} ({cluster['center']}) was found in {percentage:.1f}% of samples.")
|
||||
|
||||
print(f"\n{COLOR_GREEN}Analysis complete.{COLOR_RESET}")
|
||||
if ar_label:
|
||||
print(f"The calculated master crop snaps to the '{ar_label}' aspect ratio.")
|
||||
|
||||
# Check if the final crop is a no-op
|
||||
parsed_snapped = parse_crop_string(snapped_crop)
|
||||
if parsed_snapped and parsed_snapped['w'] == width and parsed_snapped['h'] == height:
|
||||
print(f"{COLOR_GREEN}The final calculated crop matches the source resolution. No crop is needed.{COLOR_RESET}")
|
||||
else:
|
||||
print(f"{COLOR_GREEN}Recommended safe crop filter: -vf {snapped_crop}{COLOR_RESET}")
|
||||
else:
|
||||
print(f"{COLOR_RED}Could not calculate a bounding box. Manual review is required.{COLOR_RESET}")
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Analyzes a video file to detect black bars and recommend crop values. "
|
||||
"Handles mixed aspect ratios by calculating a safe bounding box.",
|
||||
formatter_class=argparse.RawTextHelpFormatter
|
||||
)
|
||||
parser.add_argument("input", help="Input video file")
|
||||
parser.add_argument("-n", "--num_workers", type=int, default=max(1, multiprocessing.cpu_count() // 2), help="Number of worker threads. Defaults to half of available cores.")
|
||||
parser.add_argument("-sct", "--significant_crop_threshold", type=float, default=5.0, help="Percentage a crop must be present to be considered 'significant'. Default is 5.0.")
|
||||
parser.add_argument("-mc", "--min_crop", type=int, default=10, help="Minimum pixels to crop on any side for it to be considered a 'major' crop. Default is 10.")
|
||||
parser.add_argument("--debug", action="store_true", help="Enable detailed debug logging.")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
input_file = args.input
|
||||
num_workers = args.num_workers
|
||||
significant_crop_threshold = args.significant_crop_threshold
|
||||
min_crop = args.min_crop
|
||||
|
||||
# Validate input file
|
||||
if not os.path.isfile(input_file):
|
||||
print(f"{COLOR_RED}Error: Input file does not exist.{COLOR_RESET}")
|
||||
sys.exit(1)
|
||||
|
||||
# Always probe the video file for metadata
|
||||
print("--- Probing video file for metadata ---")
|
||||
|
||||
try:
|
||||
probe_duration_args = [
|
||||
'ffprobe', '-v', 'error', '-show_entries', 'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1',
|
||||
input_file
|
||||
]
|
||||
duration_str = subprocess.check_output(probe_duration_args, stderr=subprocess.STDOUT, text=True)
|
||||
duration = int(float(duration_str))
|
||||
print(f"Detected duration: {duration}s")
|
||||
|
||||
probe_res_args = [
|
||||
'ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=width,height', '-of', 'csv=s=x:p=0',
|
||||
input_file
|
||||
]
|
||||
resolution_str = subprocess.check_output(probe_res_args, stderr=subprocess.STDOUT, text=True)
|
||||
width, height = map(int, resolution_str.strip().split('x'))
|
||||
print(f"Detected resolution: {width}x{height}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"{COLOR_RED}Error probing video file: {e}{COLOR_RESET}")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"\n--- Video Analysis Parameters ---")
|
||||
print(f"Input File: {os.path.basename(input_file)}")
|
||||
print(f"Duration: {duration}s")
|
||||
print(f"Resolution: {width}x{height}")
|
||||
print(f"Number of Workers: {num_workers}")
|
||||
print(f"Significance Threshold: {significant_crop_threshold}%")
|
||||
print(f"Minimum Crop Size: {min_crop}px")
|
||||
|
||||
# Check for required tools
|
||||
check_prerequisites()
|
||||
|
||||
# Analyze the video
|
||||
analyze_video(input_file, duration, width, height, num_workers, significant_crop_threshold, min_crop, args.debug)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -8,6 +8,17 @@ import json
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
class Tee:
|
||||
def __init__(self, *files):
|
||||
self.files = files
|
||||
def write(self, obj):
|
||||
for f in self.files:
|
||||
f.write(obj)
|
||||
f.flush()
|
||||
def flush(self):
|
||||
for f in self.files:
|
||||
f.flush()
|
||||
|
||||
REQUIRED_TOOLS_MAP = {
|
||||
"ffmpeg": "extra/ffmpeg",
|
||||
"ffprobe": "extra/ffmpeg", # Part of ffmpeg package
|
||||
@@ -20,9 +31,10 @@ REQUIRED_TOOLS_MAP = {
|
||||
}
|
||||
DIR_COMPLETED = Path("completed")
|
||||
DIR_ORIGINAL = Path("original")
|
||||
DIR_LOGS = Path("conv_logs")
|
||||
|
||||
REMUX_CODECS = {"aac", "opus"} # Using a set for efficient lookups
|
||||
CONVERT_CODECS = {"dts", "ac3", "eac3", "flac", "wavpack", "alac"}
|
||||
# Removed CONVERT_CODECS, now all non-remux codecs will be converted
|
||||
|
||||
def check_tools():
|
||||
if sys.platform == "win32":
|
||||
@@ -149,144 +161,194 @@ def convert_video(source_file_base, source_file_full):
|
||||
|
||||
def main(no_downmix=False):
|
||||
check_tools()
|
||||
DIR_COMPLETED.mkdir(exist_ok=True, parents=True)
|
||||
DIR_ORIGINAL.mkdir(exist_ok=True, parents=True)
|
||||
|
||||
|
||||
current_dir = Path(".")
|
||||
|
||||
# Check if there are any MKV files to process before creating directories
|
||||
files_to_process = sorted(
|
||||
f for f in current_dir.glob("*.mkv")
|
||||
if not (f.name.endswith(".ut.mkv") or f.name.startswith("temp-") or f.name.startswith("output-"))
|
||||
)
|
||||
|
||||
if not files_to_process:
|
||||
print("No .mkv files found to process in the current directory.")
|
||||
return
|
||||
print("No MKV files found to process. Exiting.")
|
||||
return # Exit without creating directories
|
||||
|
||||
# Only create directories when we actually have files to process
|
||||
DIR_COMPLETED.mkdir(exist_ok=True, parents=True)
|
||||
DIR_ORIGINAL.mkdir(exist_ok=True, parents=True)
|
||||
DIR_LOGS.mkdir(exist_ok=True, parents=True)
|
||||
|
||||
for file_path in files_to_process:
|
||||
print("-" * shutil.get_terminal_size(fallback=(80, 24)).columns)
|
||||
print(f"Starting full processing for: {file_path.name}")
|
||||
date = datetime.now()
|
||||
input_file_abs = file_path.resolve()
|
||||
intermediate_output_file = current_dir / f"output-{file_path.name}"
|
||||
audio_temp_dir = None # Initialize to None
|
||||
created_ut_video_path = None
|
||||
created_encoded_video_path = None
|
||||
while True:
|
||||
files_to_process = sorted(
|
||||
f for f in current_dir.glob("*.mkv")
|
||||
if not (f.name.endswith(".ut.mkv") or f.name.startswith("temp-") or f.name.startswith("output-"))
|
||||
)
|
||||
|
||||
if not files_to_process:
|
||||
print("No more .mkv files found to process in the current directory. The script will now exit.")
|
||||
break
|
||||
|
||||
file_path = files_to_process[0]
|
||||
|
||||
# Setup logging
|
||||
log_file_path = DIR_LOGS / f"{file_path.name}.log"
|
||||
log_file = open(log_file_path, 'w', encoding='utf-8')
|
||||
original_stdout = sys.stdout
|
||||
original_stderr = sys.stderr
|
||||
sys.stdout = Tee(original_stdout, log_file)
|
||||
sys.stderr = Tee(original_stderr, log_file)
|
||||
|
||||
try:
|
||||
audio_temp_dir = tempfile.mkdtemp(prefix="tv_audio_") # UUID is not strictly needed for uniqueness
|
||||
print(f"Audio temporary directory created at: {audio_temp_dir}")
|
||||
print(f"Analyzing file: {input_file_abs}")
|
||||
print("-" * shutil.get_terminal_size(fallback=(80, 24)).columns)
|
||||
print(f"Starting full processing for: {file_path.name}")
|
||||
date = datetime.now()
|
||||
input_file_abs = file_path.resolve()
|
||||
intermediate_output_file = current_dir / f"output-{file_path.name}"
|
||||
audio_temp_dir = None # Initialize to None
|
||||
created_ut_video_path = None
|
||||
created_encoded_video_path = None
|
||||
|
||||
ffprobe_info_json = run_cmd([
|
||||
"ffprobe", "-v", "quiet", "-print_format", "json", "-show_streams", "-show_format", str(input_file_abs)
|
||||
], capture_output=True)
|
||||
ffprobe_info = json.loads(ffprobe_info_json)
|
||||
try:
|
||||
audio_temp_dir = tempfile.mkdtemp(prefix="tv_audio_") # UUID is not strictly needed for uniqueness
|
||||
print(f"Audio temporary directory created at: {audio_temp_dir}")
|
||||
print(f"Analyzing file: {input_file_abs}")
|
||||
|
||||
mkvmerge_info_json = run_cmd([
|
||||
"mkvmerge", "-J", str(input_file_abs)
|
||||
], capture_output=True)
|
||||
mkv_info = json.loads(mkvmerge_info_json)
|
||||
ffprobe_info_json = run_cmd([
|
||||
"ffprobe", "-v", "quiet", "-print_format", "json", "-show_streams", "-show_format", str(input_file_abs)
|
||||
], capture_output=True)
|
||||
ffprobe_info = json.loads(ffprobe_info_json)
|
||||
|
||||
mediainfo_json = run_cmd([
|
||||
"mediainfo", "--Output=JSON", "-f", str(input_file_abs)
|
||||
], capture_output=True)
|
||||
media_info = json.loads(mediainfo_json)
|
||||
mkvmerge_info_json = run_cmd([
|
||||
"mkvmerge", "-J", str(input_file_abs)
|
||||
], capture_output=True)
|
||||
mkv_info = json.loads(mkvmerge_info_json)
|
||||
|
||||
created_ut_video_path, created_encoded_video_path = convert_video(file_path.stem, str(input_file_abs))
|
||||
mediainfo_json = run_cmd([
|
||||
"mediainfo", "--Output=JSON", "-f", str(input_file_abs)
|
||||
], capture_output=True)
|
||||
media_info = json.loads(mediainfo_json)
|
||||
|
||||
print("--- Starting Audio Processing ---")
|
||||
processed_audio_files = []
|
||||
audio_tracks_to_remux = []
|
||||
audio_streams = [s for s in ffprobe_info.get("streams", []) if s.get("codec_type") == "audio"]
|
||||
created_ut_video_path, created_encoded_video_path = convert_video(file_path.stem, str(input_file_abs))
|
||||
|
||||
for stream in audio_streams:
|
||||
stream_index = stream["index"]
|
||||
codec = stream.get("codec_name")
|
||||
channels = stream.get("channels", 2)
|
||||
language = stream.get("tags", {}).get("language", "und")
|
||||
mkv_track = mkv_info.get("tracks", [])[stream_index] if stream_index < len(mkv_info.get("tracks", [])) else {}
|
||||
track_id = mkv_track.get("id", -1)
|
||||
track_title = mkv_track.get("properties", {}).get("track_name", "")
|
||||
track_delay = 0
|
||||
print("--- Starting Audio Processing ---")
|
||||
processed_audio_files = []
|
||||
audio_tracks_to_remux = []
|
||||
audio_streams = [s for s in ffprobe_info.get("streams", []) if s.get("codec_type") == "audio"]
|
||||
|
||||
# Build mkvmerge audio track list
|
||||
mkv_audio_tracks_list = [t for t in mkv_info.get("tracks", []) if t.get("type") == "audio"]
|
||||
|
||||
# Build mediainfo track mapping by StreamOrder
|
||||
media_tracks_data = media_info.get("media", {}).get("track", [])
|
||||
audio_track_info = next((t for t in media_tracks_data if t.get("@type") == "Audio" and int(t.get("StreamOrder", -1)) == stream_index), None)
|
||||
delay_in_seconds = audio_track_info.get("Video_Delay") if audio_track_info else None
|
||||
if delay_in_seconds is not None:
|
||||
try:
|
||||
track_delay = round(float(delay_in_seconds) * 1000)
|
||||
except Exception:
|
||||
track_delay = 0
|
||||
mediainfo_audio_tracks = {int(t.get("StreamOrder", -1)): t for t in media_tracks_data if t.get("@type") == "Audio"}
|
||||
|
||||
print(f"Processing Audio Stream #{stream_index} (TID: {track_id}, Codec: {codec}, Channels: {channels})")
|
||||
if codec in REMUX_CODECS:
|
||||
audio_tracks_to_remux.append(str(track_id))
|
||||
elif codec in CONVERT_CODECS:
|
||||
opus_file = convert_audio_track(
|
||||
stream_index, channels, language, audio_temp_dir, str(input_file_abs), not no_downmix
|
||||
)
|
||||
processed_audio_files.append({
|
||||
"Path": opus_file,
|
||||
"Language": language,
|
||||
"Title": track_title,
|
||||
"Delay": track_delay
|
||||
})
|
||||
for audio_idx, stream in enumerate(audio_streams):
|
||||
stream_index = stream["index"]
|
||||
codec = stream.get("codec_name")
|
||||
channels = stream.get("channels", 2)
|
||||
language = stream.get("tags", {}).get("language", "und")
|
||||
|
||||
# More robustly find the mkvmerge track by matching ffprobe's stream index
|
||||
# to mkvmerge's 'stream_id' property.
|
||||
mkv_track = next((t for t in mkv_info.get("tracks", []) if t.get("properties", {}).get("stream_id") == stream_index), None)
|
||||
if not mkv_track:
|
||||
# Fallback to the less reliable index-based method if stream_id isn't found
|
||||
mkv_track = mkv_audio_tracks_list[audio_idx] if audio_idx < len(mkv_audio_tracks_list) else {}
|
||||
|
||||
track_id = mkv_track.get("id", -1)
|
||||
track_title = mkv_track.get("properties", {}).get("track_name", "")
|
||||
track_delay = 0
|
||||
audio_track_info = mediainfo_audio_tracks.get(stream_index)
|
||||
delay_raw = audio_track_info.get("Video_Delay") if audio_track_info else None
|
||||
if delay_raw is not None:
|
||||
try:
|
||||
delay_val = float(delay_raw)
|
||||
if delay_val < 1:
|
||||
track_delay = int(round(delay_val * 1000))
|
||||
else:
|
||||
track_delay = int(round(delay_val))
|
||||
except Exception:
|
||||
track_delay = 0
|
||||
|
||||
print(f"Processing Audio Stream #{stream_index} (TID: {track_id}, Codec: {codec}, Channels: {channels})")
|
||||
if codec in REMUX_CODECS:
|
||||
audio_tracks_to_remux.append(str(track_id))
|
||||
else:
|
||||
opus_file = convert_audio_track(
|
||||
stream_index, channels, language, audio_temp_dir, str(input_file_abs), not no_downmix
|
||||
)
|
||||
processed_audio_files.append({
|
||||
"Path": opus_file,
|
||||
"Language": language,
|
||||
"Title": track_title,
|
||||
"Delay": track_delay
|
||||
})
|
||||
|
||||
print("--- Finished Audio Processing ---")
|
||||
|
||||
# Final mux
|
||||
print("Assembling final file with mkvmerge...")
|
||||
mkvmerge_args = ["mkvmerge", "-o", str(intermediate_output_file), str(created_encoded_video_path)]
|
||||
|
||||
for file_info in processed_audio_files:
|
||||
mkvmerge_args.extend(["--language", f"0:{file_info['Language']}"])
|
||||
if file_info['Title']: # Only add track name if it exists
|
||||
mkvmerge_args.extend(["--track-name", f"0:{file_info['Title']}"])
|
||||
if file_info['Delay']:
|
||||
mkvmerge_args.extend(["--sync", f"0:{file_info['Delay']}"])
|
||||
mkvmerge_args.append(str(file_info["Path"]))
|
||||
|
||||
source_copy_args = ["--no-video"]
|
||||
|
||||
if audio_tracks_to_remux:
|
||||
source_copy_args += ["--audio-tracks", ",".join(audio_tracks_to_remux)]
|
||||
else:
|
||||
print(f"Warning: Unsupported codec '{codec}'. Remuxing as is.", file=sys.stderr)
|
||||
audio_tracks_to_remux.append(str(track_id))
|
||||
source_copy_args += ["--no-audio"]
|
||||
mkvmerge_args += source_copy_args + [str(input_file_abs)]
|
||||
run_cmd(mkvmerge_args)
|
||||
|
||||
print("--- Finished Audio Processing ---")
|
||||
# Move files
|
||||
print("Moving files to final destinations...")
|
||||
shutil.move(str(file_path), DIR_ORIGINAL / file_path.name)
|
||||
shutil.move(str(intermediate_output_file), DIR_COMPLETED / file_path.name)
|
||||
|
||||
# Final mux
|
||||
print("Assembling final file with mkvmerge...")
|
||||
mkvmerge_args = ["mkvmerge", "-o", str(intermediate_output_file), str(created_encoded_video_path)]
|
||||
for file_info in processed_audio_files:
|
||||
sync_switch = ["--sync", f"0:{file_info['Delay']}"] if file_info["Delay"] else []
|
||||
mkvmerge_args += [
|
||||
"--language", f"0:{file_info['Language']}",
|
||||
"--track-name", f"0:{file_info['Title']}"
|
||||
] + sync_switch + [str(file_info["Path"])]
|
||||
except Exception as e:
|
||||
print(f"An error occurred while processing '{file_path.name}': {e}", file=sys.stderr)
|
||||
finally:
|
||||
print("--- Starting Cleanup ---")
|
||||
if audio_temp_dir and Path(audio_temp_dir).exists():
|
||||
print(" - Cleaning up disposable audio temporary directory...")
|
||||
shutil.rmtree(audio_temp_dir, ignore_errors=True)
|
||||
|
||||
source_copy_args = ["--no-video"]
|
||||
if audio_tracks_to_remux:
|
||||
source_copy_args += ["--audio-tracks", ",".join(audio_tracks_to_remux)]
|
||||
else:
|
||||
source_copy_args += ["--no-audio"]
|
||||
mkvmerge_args += source_copy_args + [str(input_file_abs)]
|
||||
run_cmd(mkvmerge_args)
|
||||
if intermediate_output_file.exists():
|
||||
print(" - Cleaning up intermediate output file...")
|
||||
intermediate_output_file.unlink()
|
||||
|
||||
# Move files
|
||||
print("Moving files to final destinations...")
|
||||
shutil.move(str(file_path), DIR_ORIGINAL / file_path.name)
|
||||
shutil.move(str(intermediate_output_file), DIR_COMPLETED / file_path.name)
|
||||
print(" - Cleaning up persistent video temporary files...")
|
||||
if created_ut_video_path and created_ut_video_path.exists():
|
||||
print(f" - Deleting UT video file: {created_ut_video_path}")
|
||||
created_ut_video_path.unlink()
|
||||
if created_encoded_video_path and created_encoded_video_path.exists():
|
||||
print(f" - Deleting encoded video temp file: {created_encoded_video_path}")
|
||||
created_encoded_video_path.unlink()
|
||||
|
||||
except Exception as e:
|
||||
print(f"An error occurred while processing '{file_path.name}': {e}", file=sys.stderr)
|
||||
alabama_dirs = list(current_dir.glob('.alabamatemp-*'))
|
||||
if alabama_dirs:
|
||||
print(" - Cleaning up AlabamaEncoder temporary directories...")
|
||||
for temp_dir_alabama in alabama_dirs:
|
||||
if temp_dir_alabama.is_dir():
|
||||
shutil.rmtree(temp_dir_alabama, ignore_errors=True)
|
||||
print("--- Finished Cleanup ---")
|
||||
|
||||
runtime = datetime.now() - date
|
||||
runtime_str = str(runtime).split('.')[0] # Format to remove milliseconds
|
||||
print(f"Total runtime for {file_path.name}: {runtime_str}")
|
||||
finally:
|
||||
print("--- Starting Cleanup ---")
|
||||
print(" - Cleaning up disposable audio temporary directory...")
|
||||
if audio_temp_dir and Path(audio_temp_dir).exists():
|
||||
shutil.rmtree(audio_temp_dir, ignore_errors=True)
|
||||
|
||||
print(" - Cleaning up intermediate output file (if any)...")
|
||||
intermediate_output_file.unlink(missing_ok=True)
|
||||
|
||||
print(" - Cleaning up persistent video temporary files...")
|
||||
if created_ut_video_path and created_ut_video_path.exists():
|
||||
print(f" Deleting UT video file: {created_ut_video_path}")
|
||||
created_ut_video_path.unlink(missing_ok=True)
|
||||
if created_encoded_video_path and created_encoded_video_path.exists():
|
||||
print(f" Deleting encoded video temp file: {created_encoded_video_path}")
|
||||
created_encoded_video_path.unlink(missing_ok=True)
|
||||
|
||||
print(" - Cleaning up AlabamaEncoder temporary directories...")
|
||||
for temp_dir_alabama in current_dir.glob('.alabamatemp-*'):
|
||||
if temp_dir_alabama.is_dir():
|
||||
shutil.rmtree(temp_dir_alabama, ignore_errors=True)
|
||||
print("--- Finished Cleanup ---")
|
||||
|
||||
runtime = datetime.now() - date
|
||||
runtime_str = str(runtime).split('.')[0] # Format to remove milliseconds
|
||||
print(f"Total runtime for {file_path.name}: {runtime_str}")
|
||||
# Restore stdout/stderr and close log file
|
||||
sys.stdout = original_stdout
|
||||
sys.stderr = original_stderr
|
||||
log_file.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
Reference in New Issue
Block a user