From 932983ee5eeb65e32d46bf6070a5d1842820d92a Mon Sep 17 00:00:00 2001 From: trojaner1404 Date: Wed, 14 Aug 2024 00:12:43 +0200 Subject: [PATCH] Revert "Added asynchron Requests for Jellyfin Updates" This reverts commit 55408e2d --- main.py | 56 +++++++-------- src/getIDs.py | 77 ++++++++++----------- src/mediux_downloader.py | 7 +- src/updateCover.py | 146 ++++++++++++++++++++------------------- src/webhook.py | 19 +++-- 5 files changed, 149 insertions(+), 156 deletions(-) diff --git a/main.py b/main.py index a1c6a2d..5f19a2a 100644 --- a/main.py +++ b/main.py @@ -6,7 +6,6 @@ from src.constants import * import tempfile import errno -import asyncio try: @@ -38,6 +37,7 @@ def clean_log_files(): os.remove(log_file) Path(log_file).touch() + def acquire_lock(lock_file): try: if os.path.exists(lock_file): @@ -69,7 +69,7 @@ def pid_exists(pid): return True -async def main(): +def main(): """Main function for processing covers and updating Jellyfin.""" lock_file = os.path.join(tempfile.gettempdir(), 'jellyfin_cover_manager.lock') lock = acquire_lock(lock_file) @@ -81,17 +81,17 @@ async def main(): try: clean_log_files() organize_covers() - await start_get_and_save_series_and_movie() + start_get_and_save_series_and_movie() try: clean_json_names(OUTPUT_FILENAME) except json.JSONDecodeError as json_error: log(f"JSON decoding error: {str(json_error)}. Creating new files...", success=False) - await delete_corrupted_files() + delete_corrupted_files() return missing_folders.clear() - await assign_images_and_update_jellyfin(OUTPUT_FILENAME) + assign_images_and_update_jellyfin(OUTPUT_FILENAME) if missing_folders: if os.path.exists(MISSING_FOLDER): @@ -108,7 +108,7 @@ async def main(): finally: release_lock(lock) -async def delete_corrupted_files(): +def delete_corrupted_files(): """Delete existing files and recreate them with fresh data.""" files_to_recreate = [RAW_FILENAME, OUTPUT_FILENAME, ID_CACHE_FILENAME] @@ -118,14 +118,14 @@ async def delete_corrupted_files(): os.remove(file) log(f"Deleted existing file: {file}", success=True) - await start_get_and_save_series_and_movie() + start_get_and_save_series_and_movie() clean_json_names(OUTPUT_FILENAME) log("Successfully recreated and populated new files", success=True) except Exception as e: log(f"Error recreating files: {str(e)}", success=False) -async def check_raw_cover(): +def check_raw_cover(): """Check Raw Cover directory every 10 seconds for new files.""" while not stop_thread.is_set(): try: @@ -133,63 +133,59 @@ async def check_raw_cover(): if file.suffix.lower() in ['.filepart']: while file.exists(): print(f"Waiting for {file.name} to finish transferring...") - await asyncio.sleep(1) + time.sleep(1) continue if file.suffix.lower() in ['.zip', '.png', '.jpg', '.jpeg', '.webp']: initial_size = file.stat().st_size - await asyncio.sleep(1) + time.sleep(1) if file.stat().st_size == initial_size: log(f"Found new file: {file.name}") - await main() + main() break if os.path.getsize(MEDIUX_FILE) != 0: log("mediux.txt is not empty. Running mediux_downloader.") log("waiting for additional links") - await asyncio.sleep(10) - await mediux_downloader() + time.sleep(10) + mediux_downloader() except Exception as e: error_message = f"Error checking raw cover: {str(e)}" log(error_message, success=False) - await asyncio.sleep(5) + time.sleep(5) print("Checker thread stopped.") -async def run_program(run_main_immediately=False): +def run_program(run_main_immediately=False): """Main program entry point.""" setup_directories() - try: - if os.path.getsize(MEDIUX_FILE) != 0: - await mediux_downloader() - except FileNotFoundError: - if not os.path.exists(MEDIUX_FILE): - with open(MEDIUX_FILE, 'w') as f: - pass + if os.path.getsize(MEDIUX_FILE) != 0: + mediux_downloader() + if run_main_immediately: - await main() + main() - checker_task = asyncio.create_task(check_raw_cover()) + checker_thread = threading.Thread(target=check_raw_cover) + checker_thread.start() try: while not stop_thread.is_set(): - await start_get_and_save_series_and_movie() - await asyncio.sleep(30) + start_get_and_save_series_and_movie() + time.sleep(30) except KeyboardInterrupt: print("Main program is closing...") stop_thread.set() - await checker_task - print("Checker task has been terminated.") + checker_thread.join() + print("Checker thread has been terminated.") if __name__ == '__main__': try: - clean_log_files() parser = argparse.ArgumentParser(description="Jellyfin Cover Manager") parser.add_argument("--main", action="store_true", help="Run the main function immediately after start") args = parser.parse_args() - asyncio.run(run_program(run_main_immediately=args.main)) + run_program(run_main_immediately=args.main) except Exception as e: print(f"Unhandled exception in main script: {e}") traceback.print_exc() \ No newline at end of file diff --git a/src/getIDs.py b/src/getIDs.py index 05074ae..074771f 100644 --- a/src/getIDs.py +++ b/src/getIDs.py @@ -5,8 +5,6 @@ import time from typing import List, Dict, Set, Tuple, Optional from requests.exceptions import RequestException -import asyncio -import aiohttp from src.config import JELLYFIN_URL, API_KEY, TMDB_API_KEY, USE_TMDB from src.utils import log, ensure_dir @@ -14,14 +12,13 @@ from src.constants import RAW_FILENAME, OUTPUT_FILENAME, ID_CACHE_FILENAME, MISSING_FOLDER -async def start_get_and_save_series_and_movie(): - media_list = await get_and_save_series_and_movies() +def start_get_and_save_series_and_movie(): + media_list = get_and_save_series_and_movies() if media_list: new_ids, has_processing_tags, items_with_tags, items_with_unknown_years = process_media_list(media_list) old_ids = load_cached_ids() unknown_years = any(item.get('Year') == 'Unknown' and item['Type'] in ['Series', 'Movie'] for item in media_list) - if has_processing_tags or unknown_years: log("IMDB or TVDB tags detected or unknown years found. Waiting 30 seconds before refreshing...") if has_processing_tags: @@ -32,17 +29,17 @@ async def start_get_and_save_series_and_movie(): log("Items with unknown years:") for item in items_with_unknown_years: log(f" - {item}") - await asyncio.sleep(30) + time.sleep(30) if os.path.exists(ID_CACHE_FILENAME): os.remove(ID_CACHE_FILENAME) - return await start_get_and_save_series_and_movie() # Restart the process + return start_get_and_save_series_and_movie() # Restart the process if new_ids != old_ids: log("Changes in media items detected. Running main function...") - clean_json_names(RAW_FILENAME) + clean_json_names(RAW_FILENAME) # Clean the raw file first new_sorted_data = sort_series_and_movies(RAW_FILENAME) if new_sorted_data: - await save_if_different(OUTPUT_FILENAME, new_sorted_data) + save_if_different(OUTPUT_FILENAME, new_sorted_data) save_cached_ids(new_ids) else: log("No changes detected in media items.") @@ -51,7 +48,7 @@ async def start_get_and_save_series_and_movie(): log("Failed to retrieve series and movies data.", success=False) -async def get_and_save_series_and_movies(use_local_file: bool = False) -> Optional[List[Dict]]: +def get_and_save_series_and_movies(use_local_file: bool = False) -> Optional[List[Dict]]: # Useful for Debugging use_local_file = False @@ -80,35 +77,35 @@ async def get_and_save_series_and_movies(use_local_file: bool = False) -> Option attempt = 0 retry_delay = 5 - async with aiohttp.ClientSession() as session: - while True: - attempt += 1 - try: - async with session.get(url, headers=headers, params=params) as response: - if response.status == 401: - log("Invalid API Key. Please check your API key and try again.", success=False) - await asyncio.sleep(retry_delay) - continue - - response.raise_for_status() - data = await response.json() - items = data.get('Items') - if not items: - log("No items found in the response", success=False) - time.sleep(retry_delay) - continue - - media_list = [create_media_info(item) for item in items] - - with open(RAW_FILENAME, 'w', encoding='utf-8') as f: - json.dump(media_list, f, ensure_ascii=False, indent=4) - - return media_list - - except RequestException as e: - log(f"Request failed (Attempt {attempt}): {e}", success=False) - log(f"Retrying in {retry_delay} seconds (Attempt {attempt})...") + while True: + attempt += 1 + try: + response = requests.get(url, headers=headers, params=params) + + if response.status_code == 401: + log("Invalid API Key. Please check your API key and try again.", success=False) + time.sleep(retry_delay) + continue + + response.raise_for_status() + + items = response.json().get('Items') + if not items: + log("No items found in the response", success=False) time.sleep(retry_delay) + continue + + media_list = [create_media_info(item) for item in items] + + with open(RAW_FILENAME, 'w', encoding='utf-8') as f: + json.dump(media_list, f, ensure_ascii=False, indent=4) + + return media_list + + except RequestException as e: + log(f"Request failed (Attempt {attempt}): {e}", success=False) + log(f"Retrying in {retry_delay} seconds (Attempt {attempt})...") + time.sleep(retry_delay) return None @@ -317,7 +314,7 @@ def create_boxset_info(boxset: Dict) -> Dict: return boxset_info -async def save_if_different(filename: str, new_data: List[Dict]): +def save_if_different(filename: str, new_data: List[Dict]): try: if os.path.exists(filename): with open(filename, 'r', encoding='utf-8') as file: @@ -350,7 +347,7 @@ async def save_if_different(filename: str, new_data: List[Dict]): os.remove(MISSING_FOLDER) try: - await assign_images_and_update_jellyfin(filename) + assign_images_and_update_jellyfin(filename) except OSError as exc: if exc.errno == 36: log(f"Filename too long {str(exc)}", success=False) diff --git a/src/mediux_downloader.py b/src/mediux_downloader.py index 016cbac..00b4813 100644 --- a/src/mediux_downloader.py +++ b/src/mediux_downloader.py @@ -24,10 +24,7 @@ def mediux_downloader(): - if asyncio.get_event_loop().is_running(): - return asyncio.create_task(async_mediux_downloader()) - else: - asyncio.run(async_mediux_downloader()) + asyncio.run(async_mediux_downloader()) async def async_mediux_downloader(): @@ -38,7 +35,7 @@ async def async_mediux_downloader(): for index, download_url in enumerate(download_urls): if not download_url.startswith("https://mediux.pro/sets"): log("Please select a set link instead of a collection link.") - log(f"Invialid Link: {download_url}", success=False) + print("Invialid Link:", download_url) sys.exit(1) log(f'Downloading set information for URL {index + 1}') diff --git a/src/updateCover.py b/src/updateCover.py index 8ce3593..f91a7f2 100644 --- a/src/updateCover.py +++ b/src/updateCover.py @@ -1,11 +1,13 @@ # src/update_cover.py +import os import json +import time +import requests +import subprocess from base64 import b64encode from typing import List, Dict, Tuple, Optional from pathlib import Path -import asyncio -import aiohttp -import os +from difflib import SequenceMatcher from src.config import * from src.utils import log, get_content_type @@ -16,7 +18,7 @@ used_folders: List[Path] = [] -async def get_english_title(title, year, media_type='movie'): +def get_english_title(title, year, media_type='movie'): url = f"https://api.themoviedb.org/3/search/{media_type}" params = { "api_key": TMDB_API_KEY, @@ -24,19 +26,22 @@ async def get_english_title(title, year, media_type='movie'): "year": year, "language": "en-US" } - async with aiohttp.ClientSession() as session: - async with session.get(url, params=params) as response: - if response.status == 200: - results = await response.json() - results = results.get("results", []) - if results: - sorted_results = sorted(results, key=lambda x: x.get('popularity', 0), reverse=True) - for result in sorted_results[:3]: - result_title = result['title'] if media_type == 'movie' else result['name'] - result_year = result['release_date'][:4] if media_type == 'movie' else result['first_air_date'][:4] - if result_year == str(year) and all(ord(c) < 128 for c in result_title): - return result_title - return None + response = requests.get(url, params=params) + if response.status_code == 200: + results = response.json().get("results", []) + if results: + # Sort results by popularity (assuming more popular results are more likely to be correct) + sorted_results = sorted(results, key=lambda x: x.get('popularity', 0), reverse=True) + + for result in sorted_results[:3]: # Check top 3 results + result_title = result['title'] if media_type == 'movie' else result['name'] + result_year = result['release_date'][:4] if media_type == 'movie' else result['first_air_date'][:4] + + # Check if the result is in English and matches the year + if result_year == str(year) and all(ord(c) < 128 for c in result_title): + return result_title + + return None # Return None if no suitable English title is found def clean_json_names(json_filename: str): json_path = Path(json_filename) @@ -61,7 +66,7 @@ def clean_name(name: str) -> str: name = name.replace(char, '') return name -async def assign_images_and_update_jellyfin(json_filename: str): +def assign_images_and_update_jellyfin(json_filename: str): json_path = Path(json_filename) if not json_path.exists(): @@ -71,55 +76,59 @@ async def assign_images_and_update_jellyfin(json_filename: str): with json_path.open('r', encoding='utf-8') as f: json_data = json.load(f) - tasks = [process_item(item) for item in json_data] - await asyncio.gather(*tasks) + for item in json_data: + process_item(item) with json_path.open('w', encoding='utf-8') as f: json.dump(json_data, f, indent=4) log("Updated all posters and added English titles where applicable") if USE_HA: - await webhook(HA_WEBHOOK_URL, HA_WEBHOOK_ID) + webhook(HA_WEBHOOK_URL, HA_WEBHOOK_ID) save_missing_folders() - if os.path.getsize(MEDIUX_FILE) != 0: - with open(MEDIUX_FILE, 'w') as file: - log("Reset mediux.txt") - + with open(MEDIUX_FILE, 'w') as file: + log("Reset mediux.txt") -async def process_item(item: Dict): +def process_item(item: Dict): clean_json_names(OUTPUT_FILENAME) + # Check if EnglishTitle is missing or invalid if 'EnglishTitle' not in item or not all(ord(c) < 128 for c in item['EnglishTitle']): if USE_TMDB: media_type = 'tv' if 'Seasons' in item else 'movie' - english_title = await get_english_title(item.get('OriginalTitle', item.get('Name')), item.get('Year'), media_type) + english_title = get_english_title(item.get('OriginalTitle', item.get('Name')), item.get('Year'), media_type) if english_title: item['EnglishTitle'] = english_title - await update_sorted_series_item(item) + # Save the updated item immediately + update_sorted_series_item(item) else: log(f"TMDB lookup disabled. Skipping English title retrieval for {item.get('Name')}", success=False) + # Process posters and seasons item_dir = get_item_directory(item) if not item_dir: return item main_poster_path = find_main_poster(item_dir) if main_poster_path: - await update_jellyfin(item['Id'], main_poster_path, f"{clean_name(item.get('Name'))} ({item.get('Year')})", 'Primary') + update_jellyfin(item['Id'], main_poster_path, f"{clean_name(item.get('Name'))} ({item.get('Year')})", 'Primary') else: log(f"Main Cover not Found for item: {clean_name(item.get('Name'))} ({item.get('Year')})", success=False) missing_folders.append(f"Main Cover not Found: {item_dir / 'poster'}") - if 'Seasons' in item: - backdrop_path = find_backdrop(item_dir) - if backdrop_path: - await update_jellyfin(item['Id'], backdrop_path, f"{clean_name(item.get('Name'))} ({item.get('Year')})", 'Backdrop') + backdrop_path = find_backdrop(item_dir) + if backdrop_path: + update_jellyfin(item['Id'], backdrop_path, f"{clean_name(item.get('Name'))} ({item.get('Year')})", 'Backdrop') + else: + log(f"Backdrop not Found for item: {clean_name(item.get('Name'))} ({item.get('Year')})", success=False) - await process_seasons(item, item_dir) + + if 'Seasons' in item: + process_seasons(item, item_dir) return item -async def update_sorted_series_item(updated_item): +def update_sorted_series_item(updated_item): sorted_series_path = Path(OUTPUT_FILENAME) if not sorted_series_path.exists(): log(f"The file {OUTPUT_FILENAME} could not be found.", success=False) @@ -136,25 +145,6 @@ async def update_sorted_series_item(updated_item): with sorted_series_path.open('w', encoding='utf-8') as f: json.dump(sorted_series, f, indent=4, ensure_ascii=False) -async def process_seasons(item: Dict, item_dir: Path): - tasks = [] - for season_name, season_data in item.get('Seasons', {}).items(): - if 'Id' in season_data: - season_number = season_name.split(" ")[-1] - season_image_filename = f'Season{season_number.zfill(2)}' - season_image_path = find_season_image(item_dir, season_image_filename) - - if season_image_path: - tasks.append(update_jellyfin(season_data['Id'], season_image_path, f"{clean_name(item.get('Name'))} ({item.get('Year')}) - {season_name}", 'Primary')) - else: - log(f"Season image not found for item - {clean_name(item.get('Name'))} ({item.get('Year')}) - {season_name}", success=False) - missing_folders.append(f"Season Cover not Found: {item_dir / season_image_filename}") - - tasks.extend(await process_episodes(item, season_data, item_dir, season_number)) - - await asyncio.gather(*tasks) - - def get_item_directory(item: Dict) -> Optional[Path]: item_type = item.get('Type', 'Series' if 'Seasons' in item else 'Movie') @@ -203,16 +193,30 @@ def find_main_poster(item_dir: Path) -> Path: return poster_path return None +def process_seasons(item: Dict, item_dir: Path): + for season_name, season_data in item.get('Seasons', {}).items(): + if 'Id' in season_data: + season_number = season_name.split(" ")[-1] + season_image_filename = f'Season{season_number.zfill(2)}' + season_image_path = find_season_image(item_dir, season_image_filename) -async def process_episodes(item: Dict, season_data: Dict, item_dir: Path, season_number: str): - tasks = [] + if season_image_path: + update_jellyfin(season_data['Id'], season_image_path, f"{clean_name(item.get('Name'))} ({item.get('Year')}) - {season_name}", 'Primary') + else: + log(f"Season image not found for item - {clean_name(item.get('Name'))} ({item.get('Year')}) - {season_name}", success=False) + missing_folders.append(f"Season Cover not Found: {item_dir / season_image_filename}") + + # Process episodes + process_episodes(item, season_data, item_dir, season_number) + +def process_episodes(item: Dict, season_data: Dict, item_dir: Path, season_number: str): for episode_number, episode_id in season_data.get('Episodes', {}).items(): if not episode_id: log(f"Skipping episode due to missing Id: S{season_number}E{episode_number} in {item.get('Name', 'Unknown Series')}", success=False) continue try: - int(episode_number) + int(episode_number) # This will raise ValueError if episode_number is not a valid integer except ValueError: log(f"Skipping episode due to invalid episode number: S{season_number}E{episode_number} in {item.get('Name', 'Unknown Series')}", success=False) continue @@ -221,9 +225,10 @@ async def process_episodes(item: Dict, season_data: Dict, item_dir: Path, season episode_image_path = find_episode_image(item_dir, episode_image_filename) if episode_image_path: - tasks.append(update_jellyfin(episode_id, episode_image_path, f"{clean_name(item.get('Name', 'Unknown'))} ({item.get('Year', 'Unknown')}) - S{season_number}E{episode_number}", 'Primary')) - - return tasks + try: + update_jellyfin(episode_id, episode_image_path, f"{clean_name(item.get('Name', 'Unknown'))} ({item.get('Year', 'Unknown')}) - S{season_number}E{episode_number}", 'Primary') + except Exception as e: + log(f"Error updating image for episode S{season_number}E{episode_number} in {item.get('Name', 'Unknown Series')}: {str(e)}", success=False) def find_backdrop(item_dir: Path) -> Optional[Path]: for ext in ['png', 'jpg', 'jpeg', 'webp']: @@ -256,7 +261,7 @@ def find_season_image(item_dir: Path, season_image_filename: str) -> Path: return season_image_path return None -async def update_jellyfin(id: str, image_path: Path, item_name: str, image_type: str = 'Primary'): +def update_jellyfin(id: str, image_path: Path, item_name: str, image_type: str = 'Primary'): endpoint = f'/Items/{id}/Images/{image_type}/0' url = f"{JELLYFIN_URL}{endpoint}" headers = { @@ -272,16 +277,15 @@ async def update_jellyfin(id: str, image_path: Path, item_name: str, image_type: image_data = file.read() image_base64 = b64encode(image_data) - async with aiohttp.ClientSession() as session: - try: - async with session.post(url, headers=headers, data=image_base64) as response: - response.raise_for_status() - log(f'Updated {image_type} image for {clean_name(item_name)} successfully.') - except aiohttp.ClientError as e: - status_code = e.status if hasattr(e, 'status') else "N/A" - response_text = await e.text() if hasattr(e, 'text') else "N/A" - log(f'Error updating {image_type} image for {clean_name(item_name)}. Status Code: {status_code}', success=False) - log(f'Response: {response_text}', success=False) + try: + response = requests.post(url, headers=headers, data=image_base64) + response.raise_for_status() + log(f'Updated {image_type} image for {clean_name(item_name)} successfully.') + except requests.RequestException as e: + status_code = e.response.status_code if e.response else "N/A" + response_text = e.response.text if e.response else "N/A" + log(f'Error updating {image_type} image for {clean_name(item_name)}. Status Code: {status_code}', success=False) + log(f'Response: {response_text}', success=False) if __name__ == "__main__": # This block can be used for testing the module independently diff --git a/src/webhook.py b/src/webhook.py index 99c193b..70a4116 100644 --- a/src/webhook.py +++ b/src/webhook.py @@ -1,16 +1,15 @@ +import requests from src.utils import * -import aiohttp -async def webhook(HA_WEBHOOK_URL, HA_WEBHOOK_ID): +def webhook(HA_WEBHOOK_URL, HA_WEBHOOK_ID): try: webhook_url = f"{HA_WEBHOOK_URL}/api/webhook/{HA_WEBHOOK_ID}" - async with aiohttp.ClientSession() as session: - async with session.post(webhook_url) as response: - if response.status == 200: - log("Webhook sent successfully!") - else: - print(f"Statuscode: {response.status}") + response = requests.post(webhook_url) + response.raise_for_status() + log("Webhook sent successfully!") + if response.status_code != 200: + print(f"Statuscode: {response.status_code}") - except aiohttp.ClientError as e: - print(f"Error while sending webhook: {e}") \ No newline at end of file + except requests.exceptions.RequestException as e: + print(f"Error while sending webhook: {e}")