Spaces:
Sleeping
Sleeping
Commit
·
0b7f1e4
1
Parent(s):
37d2ac3
unstable update
Browse files- LoadBalancer.py +39 -33
- tvdb.py +18 -5
- tvdbApiClient.py +15 -13
LoadBalancer.py
CHANGED
|
@@ -3,6 +3,7 @@ from indexer import indexer
|
|
| 3 |
import re
|
| 4 |
from tvdb import fetch_and_cache_json
|
| 5 |
from threading import Event, Thread
|
|
|
|
| 6 |
import time
|
| 7 |
import logging
|
| 8 |
from utils import convert_to_gb
|
|
@@ -10,8 +11,6 @@ from api import InstancesAPI
|
|
| 10 |
|
| 11 |
CACHE_DIR = os.getenv("CACHE_DIR")
|
| 12 |
|
| 13 |
-
download_progress = {}
|
| 14 |
-
|
| 15 |
class LoadBalancer:
|
| 16 |
def __init__(self, cache_dir, token, repo, polling_interval=4, max_retries=3, initial_delay=1):
|
| 17 |
self.version = "0.0.0.1 Alpha"
|
|
@@ -33,8 +32,8 @@ class LoadBalancer:
|
|
| 33 |
if not os.path.exists(self.CACHE_DIR):
|
| 34 |
os.makedirs(self.CACHE_DIR)
|
| 35 |
|
| 36 |
-
#
|
| 37 |
-
self.file_structure=indexer()
|
| 38 |
self.start_prefetching()
|
| 39 |
|
| 40 |
# Start polling and file checking in separate threads
|
|
@@ -42,6 +41,42 @@ class LoadBalancer:
|
|
| 42 |
polling_thread.daemon = True
|
| 43 |
polling_thread.start()
|
| 44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
def register_instance(self, instance_url):
|
| 46 |
if instance_url not in self.instances:
|
| 47 |
self.instances.append(instance_url)
|
|
@@ -113,10 +148,6 @@ class LoadBalancer:
|
|
| 113 |
logging.info("Stopping polling.")
|
| 114 |
self.stop_event.set()
|
| 115 |
|
| 116 |
-
def start_prefetching(self):
|
| 117 |
-
"""Start the metadata prefetching in a separate thread."""
|
| 118 |
-
self.prefetch_metadata()
|
| 119 |
-
|
| 120 |
def update_instances_health(self, instance, cache_size):
|
| 121 |
self.instances_health[instance] = {"used":cache_size["cache_size"],
|
| 122 |
"total": "50 GB"}
|
|
@@ -241,31 +272,6 @@ class LoadBalancer:
|
|
| 241 |
"""Generate a film ID based on the title."""
|
| 242 |
return title.replace(" ", "_").lower()
|
| 243 |
|
| 244 |
-
def prefetch_metadata(self):
|
| 245 |
-
"""Prefetch metadata for all items in the file structure."""
|
| 246 |
-
for item in self.file_structure:
|
| 247 |
-
if 'contents' in item:
|
| 248 |
-
for sub_item in item['contents']:
|
| 249 |
-
original_title = sub_item['path'].split('/')[-1]
|
| 250 |
-
media_type = 'series' if item['path'].startswith('tv') else 'movie'
|
| 251 |
-
title = original_title
|
| 252 |
-
year = None
|
| 253 |
-
|
| 254 |
-
# Extract year from the title if available
|
| 255 |
-
match = re.search(r'\((\d{4})\)', original_title)
|
| 256 |
-
if match:
|
| 257 |
-
year_str = match.group(1)
|
| 258 |
-
if year_str.isdigit() and len(year_str) == 4:
|
| 259 |
-
title = original_title[:match.start()].strip()
|
| 260 |
-
year = int(year_str)
|
| 261 |
-
else:
|
| 262 |
-
parts = original_title.rsplit(' ', 1)
|
| 263 |
-
if len(parts) > 1 and parts[-1].isdigit() and len(parts[-1]) == 4:
|
| 264 |
-
title = parts[0].strip()
|
| 265 |
-
year = int(parts[-1])
|
| 266 |
-
|
| 267 |
-
fetch_and_cache_json(original_title, title, media_type, year)
|
| 268 |
-
|
| 269 |
def get_all_tv_shows(self):
|
| 270 |
"""Get all TV shows from the indexed cache structure JSON file."""
|
| 271 |
tv_shows = {}
|
|
|
|
| 3 |
import re
|
| 4 |
from tvdb import fetch_and_cache_json
|
| 5 |
from threading import Event, Thread
|
| 6 |
+
import asyncio
|
| 7 |
import time
|
| 8 |
import logging
|
| 9 |
from utils import convert_to_gb
|
|
|
|
| 11 |
|
| 12 |
CACHE_DIR = os.getenv("CACHE_DIR")
|
| 13 |
|
|
|
|
|
|
|
| 14 |
class LoadBalancer:
|
| 15 |
def __init__(self, cache_dir, token, repo, polling_interval=4, max_retries=3, initial_delay=1):
|
| 16 |
self.version = "0.0.0.1 Alpha"
|
|
|
|
| 32 |
if not os.path.exists(self.CACHE_DIR):
|
| 33 |
os.makedirs(self.CACHE_DIR)
|
| 34 |
|
| 35 |
+
# Initialize file structure and start prefetching
|
| 36 |
+
self.file_structure = indexer()
|
| 37 |
self.start_prefetching()
|
| 38 |
|
| 39 |
# Start polling and file checking in separate threads
|
|
|
|
| 41 |
polling_thread.daemon = True
|
| 42 |
polling_thread.start()
|
| 43 |
|
| 44 |
+
def start_prefetching(self):
|
| 45 |
+
"""Start the metadata prefetching in a separate thread."""
|
| 46 |
+
loop = asyncio.new_event_loop()
|
| 47 |
+
asyncio.set_event_loop(loop)
|
| 48 |
+
loop.run_in_executor(None, self.prefetch_metadata)
|
| 49 |
+
|
| 50 |
+
async def prefetch_metadata(self):
|
| 51 |
+
"""Prefetch metadata for all items in the file structure."""
|
| 52 |
+
tasks = []
|
| 53 |
+
for item in self.file_structure:
|
| 54 |
+
if 'contents' in item:
|
| 55 |
+
for sub_item in item['contents']:
|
| 56 |
+
original_title = sub_item['path'].split('/')[-1]
|
| 57 |
+
media_type = 'series' if item['path'].startswith('tv') else 'movie'
|
| 58 |
+
title = original_title
|
| 59 |
+
year = None
|
| 60 |
+
|
| 61 |
+
# Extract year from the title if available
|
| 62 |
+
match = re.search(r'\((\d{4})\)', original_title)
|
| 63 |
+
if match:
|
| 64 |
+
year_str = match.group(1)
|
| 65 |
+
if year_str.isdigit() and len(year_str) == 4:
|
| 66 |
+
title = original_title[:match.start()].strip()
|
| 67 |
+
year = int(year_str)
|
| 68 |
+
else:
|
| 69 |
+
parts = original_title.rsplit(' ', 1)
|
| 70 |
+
if len(parts) > 1 and parts[-1].isdigit() and len(parts[-1]) == 4:
|
| 71 |
+
title = parts[0].strip()
|
| 72 |
+
year = int(parts[-1])
|
| 73 |
+
|
| 74 |
+
# Schedule the fetch and cache task
|
| 75 |
+
tasks.append(fetch_and_cache_json(original_title, title, media_type, year))
|
| 76 |
+
|
| 77 |
+
# Run all tasks concurrently
|
| 78 |
+
await asyncio.gather(*tasks)
|
| 79 |
+
|
| 80 |
def register_instance(self, instance_url):
|
| 81 |
if instance_url not in self.instances:
|
| 82 |
self.instances.append(instance_url)
|
|
|
|
| 148 |
logging.info("Stopping polling.")
|
| 149 |
self.stop_event.set()
|
| 150 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
def update_instances_health(self, instance, cache_size):
|
| 152 |
self.instances_health[instance] = {"used":cache_size["cache_size"],
|
| 153 |
"total": "50 GB"}
|
|
|
|
| 272 |
"""Generate a film ID based on the title."""
|
| 273 |
return title.replace(" ", "_").lower()
|
| 274 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 275 |
def get_all_tv_shows(self):
|
| 276 |
"""Get all TV shows from the indexed cache structure JSON file."""
|
| 277 |
tv_shows = {}
|
tvdb.py
CHANGED
|
@@ -5,7 +5,9 @@ import urllib.parse
|
|
| 5 |
from datetime import datetime, timedelta
|
| 6 |
from dotenv import load_dotenv
|
| 7 |
import json
|
| 8 |
-
|
|
|
|
|
|
|
| 9 |
|
| 10 |
load_dotenv()
|
| 11 |
THETVDB_API_KEY = os.getenv("THETVDB_API_KEY")
|
|
@@ -37,7 +39,7 @@ def get_thetvdb_token():
|
|
| 37 |
authenticate_thetvdb()
|
| 38 |
return THETVDB_TOKEN
|
| 39 |
|
| 40 |
-
def fetch_and_cache_json(original_title, title, media_type, year=None):
|
| 41 |
if year:
|
| 42 |
search_url = f"{THETVDB_API_URL}/search?query={urllib.parse.quote(title)}&type={media_type}&year={year}"
|
| 43 |
else:
|
|
@@ -74,7 +76,7 @@ def fetch_and_cache_json(original_title, title, media_type, year=None):
|
|
| 74 |
extended_url = f"{THETVDB_API_URL}/movies/{tvdb_id}/extended?meta=translations"
|
| 75 |
elif media_type == 'series':
|
| 76 |
extended_url = f"{THETVDB_API_URL}/series/{tvdb_id}/extended?meta=translations"
|
| 77 |
-
fetch_and_cache_seasons(tvdb_id)
|
| 78 |
else:
|
| 79 |
print(f"Unsupported media type: {media_type}")
|
| 80 |
return
|
|
@@ -86,8 +88,19 @@ def fetch_and_cache_json(original_title, title, media_type, year=None):
|
|
| 86 |
|
| 87 |
# Cache the extended JSON response
|
| 88 |
json_cache_path = os.path.join(CACHE_DIR, f"{urllib.parse.quote(original_title)}.json")
|
| 89 |
-
|
| 90 |
-
json.dump(extended_data, f)
|
| 91 |
|
| 92 |
except requests.RequestException as e:
|
| 93 |
print(f"Error fetching data: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
from datetime import datetime, timedelta
|
| 6 |
from dotenv import load_dotenv
|
| 7 |
import json
|
| 8 |
+
import asyncio
|
| 9 |
+
import aiofiles
|
| 10 |
+
from tvdbApiClient import fetch_and_cache_seasons, save_to_json
|
| 11 |
|
| 12 |
load_dotenv()
|
| 13 |
THETVDB_API_KEY = os.getenv("THETVDB_API_KEY")
|
|
|
|
| 39 |
authenticate_thetvdb()
|
| 40 |
return THETVDB_TOKEN
|
| 41 |
|
| 42 |
+
async def fetch_and_cache_json(original_title, title, media_type, year=None):
|
| 43 |
if year:
|
| 44 |
search_url = f"{THETVDB_API_URL}/search?query={urllib.parse.quote(title)}&type={media_type}&year={year}"
|
| 45 |
else:
|
|
|
|
| 76 |
extended_url = f"{THETVDB_API_URL}/movies/{tvdb_id}/extended?meta=translations"
|
| 77 |
elif media_type == 'series':
|
| 78 |
extended_url = f"{THETVDB_API_URL}/series/{tvdb_id}/extended?meta=translations"
|
| 79 |
+
await fetch_and_cache_seasons(tvdb_id) # Using await for the async function
|
| 80 |
else:
|
| 81 |
print(f"Unsupported media type: {media_type}")
|
| 82 |
return
|
|
|
|
| 88 |
|
| 89 |
# Cache the extended JSON response
|
| 90 |
json_cache_path = os.path.join(CACHE_DIR, f"{urllib.parse.quote(original_title)}.json")
|
| 91 |
+
await save_to_json(extended_data, json_cache_path)
|
|
|
|
| 92 |
|
| 93 |
except requests.RequestException as e:
|
| 94 |
print(f"Error fetching data: {e}")
|
| 95 |
+
|
| 96 |
+
def main():
|
| 97 |
+
# Replace with your series ID and other parameters
|
| 98 |
+
original_title = "The Listner (2009)"
|
| 99 |
+
title = "The Listner"
|
| 100 |
+
media_type = "series" # or "movie"
|
| 101 |
+
year = 2009
|
| 102 |
+
|
| 103 |
+
asyncio.run(fetch_and_cache_json(original_title, title, media_type, year))
|
| 104 |
+
|
| 105 |
+
if __name__ == "__main__":
|
| 106 |
+
main()
|
tvdbApiClient.py
CHANGED
|
@@ -3,10 +3,11 @@ import os
|
|
| 3 |
import logging
|
| 4 |
from pathlib import Path
|
| 5 |
import tvdb_v4_official
|
|
|
|
| 6 |
|
| 7 |
THETVDB_API_KEY = os.getenv("THETVDB_API_KEY")
|
| 8 |
CACHE_DIR = os.getenv("CACHE_DIR")
|
| 9 |
-
SAVE_DIR = os.path.join(CACHE_DIR,"metadata")
|
| 10 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 11 |
|
| 12 |
# Initialize TVDB client
|
|
@@ -41,17 +42,17 @@ def filter_episode_data(episode):
|
|
| 41 |
"year": episode.get("year")
|
| 42 |
}
|
| 43 |
|
| 44 |
-
def save_to_json(data, path):
|
| 45 |
-
"""Save data to a JSON file."""
|
| 46 |
try:
|
| 47 |
-
with open(path, 'w', encoding='utf-8') as f:
|
| 48 |
-
json.
|
| 49 |
logging.info(f"Data saved to {path}")
|
| 50 |
except IOError as e:
|
| 51 |
logging.error(f"Error saving data to {path}: {e}")
|
| 52 |
|
| 53 |
-
def fetch_and_cache_seasons(series_id):
|
| 54 |
-
"""Fetch and cache episodes for a given series ID."""
|
| 55 |
series_info = get_series_info(series_id)
|
| 56 |
if not series_info:
|
| 57 |
logging.error("Series info could not be fetched.")
|
|
@@ -78,20 +79,21 @@ def fetch_and_cache_seasons(series_id):
|
|
| 78 |
logging.error(f"Error fetching season info for {season_key}: {e}")
|
| 79 |
|
| 80 |
# Create folder for the series
|
| 81 |
-
series_folder = Path(SAVE_DIR) / series_id
|
| 82 |
series_folder.mkdir(parents=True, exist_ok=True)
|
| 83 |
|
| 84 |
# Save episodes for each season in separate JSON files
|
| 85 |
for season_key, episodes in sorted(all_seasons.items()):
|
| 86 |
episodes_sorted = sorted(episodes, key=lambda e: e.get('number'))
|
| 87 |
season_file = series_folder / f"{season_key}.json"
|
| 88 |
-
save_to_json(episodes_sorted, season_file)
|
| 89 |
|
| 90 |
-
def main(series_id):
|
| 91 |
-
"""Main function to fetch and cache episodes."""
|
| 92 |
-
fetch_and_cache_seasons(series_id)
|
| 93 |
|
| 94 |
if __name__ == "__main__":
|
|
|
|
| 95 |
# Replace with your series ID
|
| 96 |
SERIES_ID = "315103"
|
| 97 |
-
main(SERIES_ID)
|
|
|
|
| 3 |
import logging
|
| 4 |
from pathlib import Path
|
| 5 |
import tvdb_v4_official
|
| 6 |
+
import aiofiles
|
| 7 |
|
| 8 |
THETVDB_API_KEY = os.getenv("THETVDB_API_KEY")
|
| 9 |
CACHE_DIR = os.getenv("CACHE_DIR")
|
| 10 |
+
SAVE_DIR = os.path.join(CACHE_DIR, "metadata")
|
| 11 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 12 |
|
| 13 |
# Initialize TVDB client
|
|
|
|
| 42 |
"year": episode.get("year")
|
| 43 |
}
|
| 44 |
|
| 45 |
+
async def save_to_json(data, path):
|
| 46 |
+
"""Save data to a JSON file asynchronously."""
|
| 47 |
try:
|
| 48 |
+
async with aiofiles.open(path, 'w', encoding='utf-8') as f:
|
| 49 |
+
await f.write(json.dumps(data, indent=4, ensure_ascii=False))
|
| 50 |
logging.info(f"Data saved to {path}")
|
| 51 |
except IOError as e:
|
| 52 |
logging.error(f"Error saving data to {path}: {e}")
|
| 53 |
|
| 54 |
+
async def fetch_and_cache_seasons(series_id):
|
| 55 |
+
"""Fetch and cache episodes for a given series ID asynchronously."""
|
| 56 |
series_info = get_series_info(series_id)
|
| 57 |
if not series_info:
|
| 58 |
logging.error("Series info could not be fetched.")
|
|
|
|
| 79 |
logging.error(f"Error fetching season info for {season_key}: {e}")
|
| 80 |
|
| 81 |
# Create folder for the series
|
| 82 |
+
series_folder = Path(SAVE_DIR) / str(series_id)
|
| 83 |
series_folder.mkdir(parents=True, exist_ok=True)
|
| 84 |
|
| 85 |
# Save episodes for each season in separate JSON files
|
| 86 |
for season_key, episodes in sorted(all_seasons.items()):
|
| 87 |
episodes_sorted = sorted(episodes, key=lambda e: e.get('number'))
|
| 88 |
season_file = series_folder / f"{season_key}.json"
|
| 89 |
+
await save_to_json(episodes_sorted, season_file)
|
| 90 |
|
| 91 |
+
async def main(series_id):
|
| 92 |
+
"""Main function to fetch and cache episodes asynchronously."""
|
| 93 |
+
await fetch_and_cache_seasons(series_id)
|
| 94 |
|
| 95 |
if __name__ == "__main__":
|
| 96 |
+
import asyncio
|
| 97 |
# Replace with your series ID
|
| 98 |
SERIES_ID = "315103"
|
| 99 |
+
asyncio.run(main(SERIES_ID))
|