Spaces:
Sleeping
Sleeping
Commit
·
fb99609
1
Parent(s):
96e3d15
0.0.0.3 Alpha
Browse files- LoadBalancer.py +1 -1
- app.py +94 -23
- services.py +88 -16
- tvdb.py +16 -2
- tvdbApiClient.py +2 -9
- utils.py +19 -1
LoadBalancer.py
CHANGED
|
@@ -14,7 +14,7 @@ CACHE_DIR = os.getenv("CACHE_DIR")
|
|
| 14 |
|
| 15 |
class LoadBalancer:
|
| 16 |
def __init__(self, cache_dir, token, repo, polling_interval=4, max_retries=3, initial_delay=1):
|
| 17 |
-
self.version = "0.0.0.
|
| 18 |
self.instances = []
|
| 19 |
self.instances_health = {}
|
| 20 |
self.polling_interval = polling_interval
|
|
|
|
| 14 |
|
| 15 |
class LoadBalancer:
|
| 16 |
def __init__(self, cache_dir, token, repo, polling_interval=4, max_retries=3, initial_delay=1):
|
| 17 |
+
self.version = "0.0.0.3 Alpha"
|
| 18 |
self.instances = []
|
| 19 |
self.instances_health = {}
|
| 20 |
self.polling_interval = polling_interval
|
app.py
CHANGED
|
@@ -1,11 +1,12 @@
|
|
| 1 |
-
from fastapi import FastAPI,HTTPException, Request
|
| 2 |
from fastapi.responses import JSONResponse
|
|
|
|
| 3 |
from LoadBalancer import LoadBalancer
|
| 4 |
import logging
|
| 5 |
import os
|
| 6 |
import urllib.parse
|
| 7 |
from utils import read_json_file, is_valid_url
|
| 8 |
-
from tvdb import recent_list
|
| 9 |
|
| 10 |
CACHE_DIR = os.getenv("CACHE_DIR")
|
| 11 |
TOKEN = os.getenv("TOKEN")
|
|
@@ -48,19 +49,19 @@ async def register_instance(request: Request):
|
|
| 48 |
async def get_file_structure():
|
| 49 |
return load_balancer.file_structure
|
| 50 |
|
| 51 |
-
@app.get("/api/get/
|
| 52 |
-
async def
|
| 53 |
return load_balancer.FILM_STORE
|
| 54 |
|
| 55 |
-
@app.get("/api/get/
|
| 56 |
-
async def
|
| 57 |
return load_balancer.TV_STORE
|
| 58 |
|
| 59 |
-
@app.get("/api/get/
|
| 60 |
-
async def
|
| 61 |
return load_balancer.get_all_films()
|
| 62 |
|
| 63 |
-
@app.get("/api/get/
|
| 64 |
async def get_all_tvshows_api():
|
| 65 |
return load_balancer.get_all_tv_shows()
|
| 66 |
|
|
@@ -76,17 +77,43 @@ async def get_recent_items(limit: int = 5):
|
|
| 76 |
|
| 77 |
# Return combined results
|
| 78 |
return JSONResponse(content={
|
| 79 |
-
'
|
| 80 |
'series': limited_series
|
| 81 |
})
|
| 82 |
|
| 83 |
-
@app.get("/api/get/
|
| 84 |
-
async def
|
| 85 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
if not title:
|
| 87 |
raise HTTPException(status_code=400, detail="No title provided")
|
| 88 |
|
| 89 |
-
|
|
|
|
| 90 |
|
| 91 |
if os.path.exists(json_cache_path):
|
| 92 |
data = await read_json_file(json_cache_path)
|
|
@@ -94,17 +121,39 @@ async def get_film_metadata_api(title: str):
|
|
| 94 |
|
| 95 |
raise HTTPException(status_code=404, detail="Metadata not found")
|
| 96 |
|
| 97 |
-
@app.get("/api/get/
|
| 98 |
-
async def
|
| 99 |
-
"""Endpoint to get the
|
| 100 |
if not title:
|
| 101 |
raise HTTPException(status_code=400, detail="No title provided")
|
| 102 |
|
| 103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
|
| 105 |
if os.path.exists(json_cache_path):
|
| 106 |
data = await read_json_file(json_cache_path)
|
| 107 |
-
|
| 108 |
# Add the file structure to the metadata
|
| 109 |
tv_structure_data = load_balancer.get_tv_structure(title)
|
| 110 |
if tv_structure_data:
|
|
@@ -114,8 +163,30 @@ async def get_tv_metadata_api(title: str):
|
|
| 114 |
|
| 115 |
raise HTTPException(status_code=404, detail="Metadata not found")
|
| 116 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
|
| 118 |
-
@app.get("/api/get/
|
| 119 |
async def get_season_metadata_api(series_id: int, season: str):
|
| 120 |
"""Endpoint to get the TV show season metadata by id and season."""
|
| 121 |
if not season:
|
|
@@ -139,13 +210,13 @@ async def get_instances():
|
|
| 139 |
async def get_instances_health():
|
| 140 |
return load_balancer.instances_health
|
| 141 |
|
| 142 |
-
@app.get("/api/get/
|
| 143 |
async def get_movie_api(title: str):
|
| 144 |
"""Endpoint to get the movie by title."""
|
| 145 |
if not title:
|
| 146 |
raise HTTPException(status_code=400, detail="Title parameter is required")
|
| 147 |
|
| 148 |
-
# Check if the
|
| 149 |
if title in load_balancer.FILM_STORE:
|
| 150 |
url = load_balancer.FILM_STORE[title]
|
| 151 |
return JSONResponse(content={"url": url})
|
|
@@ -160,7 +231,7 @@ async def get_movie_api(title: str):
|
|
| 160 |
if response:
|
| 161 |
return JSONResponse(content=response)
|
| 162 |
|
| 163 |
-
@app.get("/api/get/
|
| 164 |
async def get_tv_show_api(title: str, season: str, episode: str):
|
| 165 |
"""Endpoint to get the TV show by title, season, and episode."""
|
| 166 |
if not title or not season or not episode:
|
|
|
|
| 1 |
+
from fastapi import FastAPI,HTTPException, Request, Query
|
| 2 |
from fastapi.responses import JSONResponse
|
| 3 |
+
from typing import Optional
|
| 4 |
from LoadBalancer import LoadBalancer
|
| 5 |
import logging
|
| 6 |
import os
|
| 7 |
import urllib.parse
|
| 8 |
from utils import read_json_file, is_valid_url
|
| 9 |
+
from tvdb import recent_list, genre_list
|
| 10 |
|
| 11 |
CACHE_DIR = os.getenv("CACHE_DIR")
|
| 12 |
TOKEN = os.getenv("TOKEN")
|
|
|
|
| 49 |
async def get_file_structure():
|
| 50 |
return load_balancer.file_structure
|
| 51 |
|
| 52 |
+
@app.get("/api/get/movie/store")
|
| 53 |
+
async def get_movie_store():
|
| 54 |
return load_balancer.FILM_STORE
|
| 55 |
|
| 56 |
+
@app.get("/api/get/series/store")
|
| 57 |
+
async def get_series_store():
|
| 58 |
return load_balancer.TV_STORE
|
| 59 |
|
| 60 |
+
@app.get("/api/get/movie/all")
|
| 61 |
+
async def get_all_movies_api():
|
| 62 |
return load_balancer.get_all_films()
|
| 63 |
|
| 64 |
+
@app.get("/api/get/series/all")
|
| 65 |
async def get_all_tvshows_api():
|
| 66 |
return load_balancer.get_all_tv_shows()
|
| 67 |
|
|
|
|
| 77 |
|
| 78 |
# Return combined results
|
| 79 |
return JSONResponse(content={
|
| 80 |
+
'movies': limited_films,
|
| 81 |
'series': limited_series
|
| 82 |
})
|
| 83 |
|
| 84 |
+
@app.get("/api/get/genre")
|
| 85 |
+
async def get_genre_items(genre: str, media_type: Optional[str] = None, limit: int = 5):
|
| 86 |
+
"""
|
| 87 |
+
Get recent items from a specified genre with an optional media type filter and a limit on the number of results.
|
| 88 |
+
|
| 89 |
+
:param genre: The genre to filter by (e.g., 'Comedy').
|
| 90 |
+
:param media_type: Optional. Filter by media type ('movie' or 'series').
|
| 91 |
+
:param limit: The maximum number of items to return.
|
| 92 |
+
:return: A JSON response containing the filtered items.
|
| 93 |
+
"""
|
| 94 |
+
# Get sorted entries based on genre and media type
|
| 95 |
+
entries = genre_list.get_sorted_entries(genre, media_type=media_type)
|
| 96 |
+
|
| 97 |
+
# Slice the list to only return the desired number of items
|
| 98 |
+
limited_entries = entries[:limit]
|
| 99 |
+
|
| 100 |
+
# Organize the results by media type
|
| 101 |
+
results = {
|
| 102 |
+
'movies': [entry for entry in limited_entries if entry[4] == 'movie'],
|
| 103 |
+
'series': [entry for entry in limited_entries if entry[4] == 'series']
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
# Return the results in a JSON response
|
| 107 |
+
return JSONResponse(content=results)
|
| 108 |
+
|
| 109 |
+
@app.get("/api/get/movie/metadata/{title}")
|
| 110 |
+
async def get_movie_metadata_api(title: str):
|
| 111 |
+
"""Endpoint to get the movie metadata by title."""
|
| 112 |
if not title:
|
| 113 |
raise HTTPException(status_code=400, detail="No title provided")
|
| 114 |
|
| 115 |
+
full_dir_path = os.path.join(CACHE_DIR, 'movie')
|
| 116 |
+
json_cache_path = os.path.join(full_dir_path,f"{urllib.parse.quote(title)}.json")
|
| 117 |
|
| 118 |
if os.path.exists(json_cache_path):
|
| 119 |
data = await read_json_file(json_cache_path)
|
|
|
|
| 121 |
|
| 122 |
raise HTTPException(status_code=404, detail="Metadata not found")
|
| 123 |
|
| 124 |
+
@app.get("/api/get/movie/card/{title}")
|
| 125 |
+
async def get_movie_card_api(title: str):
|
| 126 |
+
"""Endpoint to get the movie metadata by title."""
|
| 127 |
if not title:
|
| 128 |
raise HTTPException(status_code=400, detail="No title provided")
|
| 129 |
|
| 130 |
+
full_dir_path = os.path.join(CACHE_DIR, 'movie')
|
| 131 |
+
json_cache_path = os.path.join(full_dir_path,f"{urllib.parse.quote(title)}.json")
|
| 132 |
+
|
| 133 |
+
if os.path.exists(json_cache_path):
|
| 134 |
+
data = await read_json_file(json_cache_path)
|
| 135 |
+
image = data['data']['image']
|
| 136 |
+
eng_title = None
|
| 137 |
+
if data['data'].get('translations') and data['data']['translations'].get('nameTranslations'):
|
| 138 |
+
for name in data['data']['translations']['nameTranslations']:
|
| 139 |
+
if name['language'] == 'eng':
|
| 140 |
+
eng_title = name.get('name')
|
| 141 |
+
break
|
| 142 |
+
year = data['data']['year']
|
| 143 |
+
return JSONResponse(content={'title':eng_title, 'year': year, 'image': image})
|
| 144 |
+
|
| 145 |
+
raise HTTPException(status_code=404, detail="Card not found")
|
| 146 |
+
|
| 147 |
+
@app.get("/api/get/series/metadata/{title}")
|
| 148 |
+
async def get_series_metadata_api(title: str):
|
| 149 |
+
"""Endpoint to get the TV show metadata by title."""
|
| 150 |
+
if not title:
|
| 151 |
+
raise HTTPException(status_code=400, detail="No title provided")
|
| 152 |
+
full_dir_path = os.path.join(CACHE_DIR, 'series')
|
| 153 |
+
json_cache_path = os.path.join(full_dir_path,f"{urllib.parse.quote(title)}.json")
|
| 154 |
|
| 155 |
if os.path.exists(json_cache_path):
|
| 156 |
data = await read_json_file(json_cache_path)
|
|
|
|
| 157 |
# Add the file structure to the metadata
|
| 158 |
tv_structure_data = load_balancer.get_tv_structure(title)
|
| 159 |
if tv_structure_data:
|
|
|
|
| 163 |
|
| 164 |
raise HTTPException(status_code=404, detail="Metadata not found")
|
| 165 |
|
| 166 |
+
@app.get("/api/get/series/card/{title}")
|
| 167 |
+
async def get_series_card_api(title: str):
|
| 168 |
+
"""Endpoint to get the TV show metadata by title."""
|
| 169 |
+
if not title:
|
| 170 |
+
raise HTTPException(status_code=400, detail="No title provided")
|
| 171 |
+
full_dir_path = os.path.join(CACHE_DIR, 'series')
|
| 172 |
+
json_cache_path = os.path.join(full_dir_path,f"{urllib.parse.quote(title)}.json")
|
| 173 |
+
|
| 174 |
+
if os.path.exists(json_cache_path):
|
| 175 |
+
data = await read_json_file(json_cache_path)
|
| 176 |
+
image = data['data']['image']
|
| 177 |
+
eng_title = None
|
| 178 |
+
if data['data'].get('translations') and data['data']['translations'].get('nameTranslations'):
|
| 179 |
+
for name in data['data']['translations']['nameTranslations']:
|
| 180 |
+
if name['language'] == 'eng':
|
| 181 |
+
eng_title = name.get('name')
|
| 182 |
+
break
|
| 183 |
+
year = data['data']['year']
|
| 184 |
+
return JSONResponse(content={'title':eng_title, 'year': year, 'image': image})
|
| 185 |
+
|
| 186 |
+
raise HTTPException(status_code=404, detail="Card not found")
|
| 187 |
+
|
| 188 |
|
| 189 |
+
@app.get("/api/get/series/metadata/{series_id}/{season}")
|
| 190 |
async def get_season_metadata_api(series_id: int, season: str):
|
| 191 |
"""Endpoint to get the TV show season metadata by id and season."""
|
| 192 |
if not season:
|
|
|
|
| 210 |
async def get_instances_health():
|
| 211 |
return load_balancer.instances_health
|
| 212 |
|
| 213 |
+
@app.get("/api/get/movie/{title}")
|
| 214 |
async def get_movie_api(title: str):
|
| 215 |
"""Endpoint to get the movie by title."""
|
| 216 |
if not title:
|
| 217 |
raise HTTPException(status_code=400, detail="Title parameter is required")
|
| 218 |
|
| 219 |
+
# Check if the movie is already cached
|
| 220 |
if title in load_balancer.FILM_STORE:
|
| 221 |
url = load_balancer.FILM_STORE[title]
|
| 222 |
return JSONResponse(content={"url": url})
|
|
|
|
| 231 |
if response:
|
| 232 |
return JSONResponse(content=response)
|
| 233 |
|
| 234 |
+
@app.get("/api/get/series/{title}/{season}/{episode}")
|
| 235 |
async def get_tv_show_api(title: str, season: str, episode: str):
|
| 236 |
"""Endpoint to get the TV show by title, season, and episode."""
|
| 237 |
if not title or not season or not episode:
|
services.py
CHANGED
|
@@ -44,22 +44,94 @@ class RecentList:
|
|
| 44 |
# Get sorted series with details
|
| 45 |
return [(title, -year, self.series[title][1], self.series[title][2]) for year, title in self.sorted_series]
|
| 46 |
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
|
|
|
| 50 |
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
media_list.add_entry("Film B", 2021, "Description C", "http://link-to-image-c.com", 'film')
|
| 55 |
-
media_list.add_entry("Film A", 2024, "Updated Description A", "http://updated-link-to-image-a.com", 'film') # Updating the year of "Film A"
|
| 56 |
-
media_list.add_entry("Series B", 2021, "Description D", "http://link-to-image-d.com", 'series')
|
| 57 |
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
# Get sorted series with details
|
| 45 |
return [(title, -year, self.series[title][1], self.series[title][2]) for year, title in self.sorted_series]
|
| 46 |
|
| 47 |
+
class GenreList:
|
| 48 |
+
def __init__(self):
|
| 49 |
+
# Initialize a dictionary to store genres and their associated data
|
| 50 |
+
self.genres = {}
|
| 51 |
|
| 52 |
+
def add_entry(self, genres, title, year, description, image_link, media_type):
|
| 53 |
+
"""
|
| 54 |
+
Add an entry to multiple genres.
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
+
:param genres: A list of genre dictionaries, each containing 'id', 'name', and 'slug'.
|
| 57 |
+
:param title: The title of the media.
|
| 58 |
+
:param year: The release year of the media.
|
| 59 |
+
:param description: A brief description of the media.
|
| 60 |
+
:param image_link: A URL to an image representing the media.
|
| 61 |
+
:param media_type: The type of media ('movie' or 'series').
|
| 62 |
+
"""
|
| 63 |
+
for genre in genres:
|
| 64 |
+
genre_name = genre['name']
|
| 65 |
+
if genre_name not in self.genres:
|
| 66 |
+
# Initialize the genre with an empty dictionary and sorted list
|
| 67 |
+
self.genres[genre_name] = {'entries': {}, 'sorted_entries': []}
|
| 68 |
+
|
| 69 |
+
# Update or add the entry in the specified genre
|
| 70 |
+
self._update_genre(self.genres[genre_name]['entries'], self.genres[genre_name]['sorted_entries'], title, year, description, image_link, media_type)
|
| 71 |
|
| 72 |
+
def _update_genre(self, dictionary, sorted_list, title, year, description, image_link, media_type):
|
| 73 |
+
try:
|
| 74 |
+
# Convert year to integer
|
| 75 |
+
year = int(year)
|
| 76 |
+
except ValueError:
|
| 77 |
+
raise ValueError(f"Invalid year: {year}. Year must be an integer.")
|
| 78 |
+
|
| 79 |
+
if title in dictionary:
|
| 80 |
+
# Remove the old entry from the sorted list if it exists
|
| 81 |
+
old_year = dictionary[title][0] # Get the old year
|
| 82 |
+
try:
|
| 83 |
+
sorted_list.remove((-old_year, title))
|
| 84 |
+
except ValueError:
|
| 85 |
+
pass # Ignore if the old entry does not exist in the sorted list
|
| 86 |
+
|
| 87 |
+
# Update or add the new entry in the genre dictionary
|
| 88 |
+
dictionary[title] = (year, description, image_link, media_type)
|
| 89 |
+
|
| 90 |
+
# Insert the new year and title into the sorted list
|
| 91 |
+
bisect.insort(sorted_list, (-year, title))
|
| 92 |
+
|
| 93 |
+
def get_sorted_entries(self, genre_name, media_type=None):
|
| 94 |
+
"""
|
| 95 |
+
Get sorted entries for a specified genre and optional media type.
|
| 96 |
+
|
| 97 |
+
:param genre_name: The name of the genre to retrieve entries from.
|
| 98 |
+
:param media_type: Optional. Filter by media type ('movie' or 'series').
|
| 99 |
+
:return: A list of tuples containing title, year, description, image_link, and media_type.
|
| 100 |
+
"""
|
| 101 |
+
if genre_name in self.genres:
|
| 102 |
+
entries = [
|
| 103 |
+
(title, -year, self.genres[genre_name]['entries'][title][1],
|
| 104 |
+
self.genres[genre_name]['entries'][title][2], self.genres[genre_name]['entries'][title][3])
|
| 105 |
+
for year, title in self.genres[genre_name]['sorted_entries']
|
| 106 |
+
]
|
| 107 |
+
if media_type:
|
| 108 |
+
entries = [entry for entry in entries if entry[4] == media_type]
|
| 109 |
+
return entries
|
| 110 |
+
else:
|
| 111 |
+
return []
|
| 112 |
+
|
| 113 |
+
def remove_genre(self, genre_name):
|
| 114 |
+
"""Remove a genre entirely from the list."""
|
| 115 |
+
if genre_name in self.genres:
|
| 116 |
+
del self.genres[genre_name]
|
| 117 |
+
|
| 118 |
+
def remove_entry_from_genre(self, genre_name, title):
|
| 119 |
+
"""Remove a specific title from a specific genre."""
|
| 120 |
+
if genre_name in self.genres and title in self.genres[genre_name]['entries']:
|
| 121 |
+
old_year = self.genres[genre_name]['entries'][title][0]
|
| 122 |
+
del self.genres[genre_name]['entries'][title]
|
| 123 |
+
self.genres[genre_name]['sorted_entries'].remove((-old_year, title))
|
| 124 |
+
|
| 125 |
+
# Example usage:
|
| 126 |
+
# genre_list = GenreList()
|
| 127 |
+
# genres = [
|
| 128 |
+
# {"id": 15, "name": "Comedy", "slug": "comedy"},
|
| 129 |
+
# {"id": 17, "name": "Animation", "slug": "animation"},
|
| 130 |
+
# {"id": 27, "name": "Anime", "slug": "anime"}
|
| 131 |
+
# ]
|
| 132 |
+
# genre_list.add_entry(genres, 'Movie Title', 2023, 'Description here', 'image_link_here', 'movie')
|
| 133 |
+
# genre_list.add_entry(genres, 'Series Title', 2022, 'Series Description', 'series_image_link_here', 'series')
|
| 134 |
+
# sorted_comedy_movies = genre_list.get_sorted_entries('Comedy', media_type='movie')
|
| 135 |
+
# sorted_comedy_series = genre_list.get_sorted_entries('Comedy')
|
| 136 |
+
# print(sorted_comedy_movies)
|
| 137 |
+
# print(sorted_comedy_series)
|
tvdb.py
CHANGED
|
@@ -8,7 +8,7 @@ import json
|
|
| 8 |
import asyncio
|
| 9 |
import aiofiles
|
| 10 |
from tvdbApiClient import fetch_and_cache_seasons, save_to_json
|
| 11 |
-
from services import RecentList
|
| 12 |
|
| 13 |
load_dotenv()
|
| 14 |
THETVDB_API_KEY = os.getenv("THETVDB_API_KEY")
|
|
@@ -17,6 +17,7 @@ CACHE_DIR = os.getenv("CACHE_DIR")
|
|
| 17 |
TOKEN_EXPIRY = None
|
| 18 |
THETVDB_TOKEN = None
|
| 19 |
recent_list = RecentList()
|
|
|
|
| 20 |
|
| 21 |
def authenticate_thetvdb():
|
| 22 |
global THETVDB_TOKEN, TOKEN_EXPIRY
|
|
@@ -59,6 +60,7 @@ def clean_data(data):
|
|
| 59 |
'originalLanguage': None,
|
| 60 |
'translations': {},
|
| 61 |
'artworks': [],
|
|
|
|
| 62 |
'characters': [],
|
| 63 |
'spoken_languages': [],
|
| 64 |
'translations': {}
|
|
@@ -126,6 +128,11 @@ async def fetch_and_cache_json(original_title, title, media_type, year=None):
|
|
| 126 |
|
| 127 |
cleaned_data = clean_data(extended_data)
|
| 128 |
print(f"cleaning.. {original_title}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
|
| 130 |
description = None
|
| 131 |
if cleaned_data['data'].get('translations') and cleaned_data['data']['translations'].get('overviewTranslations'):
|
|
@@ -145,11 +152,18 @@ async def fetch_and_cache_json(original_title, title, media_type, year=None):
|
|
| 145 |
|
| 146 |
if media_type == 'movie':
|
| 147 |
recent_list.add_entry(original_title, cleaned_data['data']['year'], description, image_link, 'film')
|
|
|
|
| 148 |
elif media_type == 'series':
|
| 149 |
recent_list.add_entry(original_title, cleaned_data['data']['year'], description, image_link, 'series')
|
|
|
|
| 150 |
print(f"adding.. {original_title}")
|
| 151 |
|
| 152 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
await save_to_json(cleaned_data, json_cache_path)
|
| 154 |
print(f"Data saved to JSON at: {json_cache_path}")
|
| 155 |
else:
|
|
|
|
| 8 |
import asyncio
|
| 9 |
import aiofiles
|
| 10 |
from tvdbApiClient import fetch_and_cache_seasons, save_to_json
|
| 11 |
+
from services import RecentList, GenreList
|
| 12 |
|
| 13 |
load_dotenv()
|
| 14 |
THETVDB_API_KEY = os.getenv("THETVDB_API_KEY")
|
|
|
|
| 17 |
TOKEN_EXPIRY = None
|
| 18 |
THETVDB_TOKEN = None
|
| 19 |
recent_list = RecentList()
|
| 20 |
+
genre_list = GenreList()
|
| 21 |
|
| 22 |
def authenticate_thetvdb():
|
| 23 |
global THETVDB_TOKEN, TOKEN_EXPIRY
|
|
|
|
| 60 |
'originalLanguage': None,
|
| 61 |
'translations': {},
|
| 62 |
'artworks': [],
|
| 63 |
+
'genres':[],
|
| 64 |
'characters': [],
|
| 65 |
'spoken_languages': [],
|
| 66 |
'translations': {}
|
|
|
|
| 128 |
|
| 129 |
cleaned_data = clean_data(extended_data)
|
| 130 |
print(f"cleaning.. {original_title}")
|
| 131 |
+
|
| 132 |
+
genres = None
|
| 133 |
+
if cleaned_data['data'].get('genres'):
|
| 134 |
+
genres = cleaned_data['data'].get('genres')
|
| 135 |
+
print(f"genres extracted: {genres}")
|
| 136 |
|
| 137 |
description = None
|
| 138 |
if cleaned_data['data'].get('translations') and cleaned_data['data']['translations'].get('overviewTranslations'):
|
|
|
|
| 152 |
|
| 153 |
if media_type == 'movie':
|
| 154 |
recent_list.add_entry(original_title, cleaned_data['data']['year'], description, image_link, 'film')
|
| 155 |
+
genre_list.add_entry(genres, original_title, cleaned_data['data']['year'], description, image_link, 'movie')
|
| 156 |
elif media_type == 'series':
|
| 157 |
recent_list.add_entry(original_title, cleaned_data['data']['year'], description, image_link, 'series')
|
| 158 |
+
genre_list.add_entry(genres, original_title, cleaned_data['data']['year'], description, image_link, 'series')
|
| 159 |
print(f"adding.. {original_title}")
|
| 160 |
|
| 161 |
+
# Create the full directory path if it doesn't exist
|
| 162 |
+
full_dir_path = os.path.join(CACHE_DIR, media_type)
|
| 163 |
+
os.makedirs(full_dir_path, exist_ok=True)
|
| 164 |
+
|
| 165 |
+
# Now create the JSON cache path
|
| 166 |
+
json_cache_path = os.path.join(full_dir_path, f"{urllib.parse.quote(original_title)}.json")
|
| 167 |
await save_to_json(cleaned_data, json_cache_path)
|
| 168 |
print(f"Data saved to JSON at: {json_cache_path}")
|
| 169 |
else:
|
tvdbApiClient.py
CHANGED
|
@@ -3,7 +3,7 @@ import os
|
|
| 3 |
import logging
|
| 4 |
from pathlib import Path
|
| 5 |
import tvdb_v4_official
|
| 6 |
-
import
|
| 7 |
|
| 8 |
THETVDB_API_KEY = os.getenv("THETVDB_API_KEY")
|
| 9 |
CACHE_DIR = os.getenv("CACHE_DIR")
|
|
@@ -42,14 +42,7 @@ def filter_episode_data(episode):
|
|
| 42 |
"year": episode.get("year")
|
| 43 |
}
|
| 44 |
|
| 45 |
-
|
| 46 |
-
"""Save data to a JSON file asynchronously."""
|
| 47 |
-
try:
|
| 48 |
-
async with aiofiles.open(path, 'w', encoding='utf-8') as f:
|
| 49 |
-
await f.write(json.dumps(data, indent=4, ensure_ascii=False))
|
| 50 |
-
logging.info(f"Data saved to {path}")
|
| 51 |
-
except IOError as e:
|
| 52 |
-
logging.error(f"Error saving data to {path}: {e}")
|
| 53 |
|
| 54 |
async def fetch_and_cache_seasons(series_id):
|
| 55 |
"""Fetch and cache episodes for a given series ID asynchronously."""
|
|
|
|
| 3 |
import logging
|
| 4 |
from pathlib import Path
|
| 5 |
import tvdb_v4_official
|
| 6 |
+
from utils import save_to_json
|
| 7 |
|
| 8 |
THETVDB_API_KEY = os.getenv("THETVDB_API_KEY")
|
| 9 |
CACHE_DIR = os.getenv("CACHE_DIR")
|
|
|
|
| 42 |
"year": episode.get("year")
|
| 43 |
}
|
| 44 |
|
| 45 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
async def fetch_and_cache_seasons(series_id):
|
| 48 |
"""Fetch and cache episodes for a given series ID asynchronously."""
|
utils.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
import re
|
| 2 |
import aiofiles
|
| 3 |
import json
|
|
|
|
| 4 |
|
| 5 |
def is_valid_url(url):
|
| 6 |
"""
|
|
@@ -72,4 +73,21 @@ async def read_json_file(file_path: str):
|
|
| 72 |
data = await f.read()
|
| 73 |
return json.loads(data)
|
| 74 |
except Exception as e:
|
| 75 |
-
raise e
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import re
|
| 2 |
import aiofiles
|
| 3 |
import json
|
| 4 |
+
import logging
|
| 5 |
|
| 6 |
def is_valid_url(url):
|
| 7 |
"""
|
|
|
|
| 73 |
data = await f.read()
|
| 74 |
return json.loads(data)
|
| 75 |
except Exception as e:
|
| 76 |
+
raise e
|
| 77 |
+
|
| 78 |
+
async def read_json_file(file_path):
|
| 79 |
+
try:
|
| 80 |
+
async with aiofiles.open(file_path, 'r', encoding='utf-8') as f:
|
| 81 |
+
data = await f.read()
|
| 82 |
+
return json.loads(data)
|
| 83 |
+
except Exception as e:
|
| 84 |
+
raise e
|
| 85 |
+
|
| 86 |
+
async def save_to_json(data, path):
|
| 87 |
+
"""Save data to a JSON file asynchronously."""
|
| 88 |
+
try:
|
| 89 |
+
async with aiofiles.open(path, 'w', encoding='utf-8') as f:
|
| 90 |
+
await f.write(json.dumps(data, indent=4, ensure_ascii=False))
|
| 91 |
+
logging.info(f"Data saved to {path}")
|
| 92 |
+
except IOError as e:
|
| 93 |
+
logging.error(f"Error saving data to {path}: {e}")
|