ChandimaPrabath commited on
Commit
65e40ba
·
1 Parent(s): b9c407e

music adaptation

Browse files
Files changed (7) hide show
  1. LoadBalancer.py +38 -191
  2. TODO.md +0 -4
  3. api.py +6 -29
  4. app.py +23 -221
  5. services.py +0 -181
  6. tvdb.py +0 -193
  7. tvdbApiClient.py +0 -95
LoadBalancer.py CHANGED
@@ -1,8 +1,5 @@
1
  import os
2
  from indexer import indexer
3
- import re
4
- import urllib.parse
5
- from tvdb import fetch_and_cache_json
6
  from threading import Event, Thread
7
  import asyncio
8
  import time
@@ -14,7 +11,7 @@ CACHE_DIR = os.getenv("CACHE_DIR")
14
 
15
  class LoadBalancer:
16
  def __init__(self, cache_dir, token, repo, polling_interval=4, max_retries=3, initial_delay=1):
17
- self.version = "0.0.0.4 Alpha"
18
  self.instances = []
19
  self.instances_health = {}
20
  self.polling_interval = polling_interval
@@ -25,10 +22,8 @@ class LoadBalancer:
25
  self.CACHE_DIR = cache_dir
26
  self.TOKEN = token
27
  self.REPO = repo
28
- self.FILM_STORE = {}
29
- self.TV_STORE = {}
30
  self.file_structure = None
31
- self.previous_file_structure = None # To keep track of previous content
32
 
33
  # Ensure CACHE_DIR exists
34
  if not os.path.exists(self.CACHE_DIR):
@@ -36,7 +31,6 @@ class LoadBalancer:
36
 
37
  # Initialize file structure and start prefetching
38
  self.file_structure = indexer()
39
- self.start_prefetching()
40
 
41
  # Start polling and file checking in separate threads
42
  polling_thread = Thread(target=self.start_polling)
@@ -53,84 +47,33 @@ class LoadBalancer:
53
  await self.start_prefetching() # Start prefetching
54
  await asyncio.sleep(300) # Sleep for 5 minutes
55
 
56
- def start_prefetching(self):
57
- """Start the metadata prefetching in the FastAPI event loop."""
58
- return asyncio.create_task(self.prefetch_metadata())
59
-
60
- async def prefetch_metadata(self):
61
- """Prefetch metadata for all items in the file structure."""
62
- tasks = []
63
- for item in self.file_structure:
64
- if 'contents' in item:
65
- for sub_item in item['contents']:
66
- original_title = sub_item['path'].split('/')[-1]
67
- media_type = 'series' if item['path'].startswith('tv') else 'movie'
68
- title = original_title
69
- year = None
70
-
71
- # Extract year from the title if available
72
- match = re.search(r'\((\d{4})\)', original_title)
73
- if match:
74
- year_str = match.group(1)
75
- if year_str.isdigit() and len(year_str) == 4:
76
- title = original_title[:match.start()].strip()
77
- year = int(year_str)
78
- else:
79
- parts = original_title.rsplit(' ', 1)
80
- if len(parts) > 1 and parts[-1].isdigit() and len(parts[-1]) == 4:
81
- title = parts[0].strip()
82
- year = int(parts[-1])
83
-
84
- # Schedule the fetch and cache task
85
- json_cache_path = os.path.join(self.CACHE_DIR, f"{urllib.parse.quote(original_title)}.json")
86
- if not os.path.exists(json_cache_path):
87
- tasks.append(fetch_and_cache_json(original_title, title, media_type, year))
88
- logging.info(f"Skipping.. {original_title} metadata already cached")
89
-
90
- # Run all tasks concurrently
91
- await asyncio.gather(*tasks)
92
-
93
  def get_reports(self):
94
  reports = self.instances_api.fetch_reports()
95
- temp_film_store = {}
96
- temp_tv_store = {}
97
 
98
  for instance_url in self.instances[:]:
99
  if instance_url in reports:
100
  report = reports[instance_url]
101
  logging.info(f"Report from {instance_url}: {report}")
102
- self.process_report(instance_url, report, temp_film_store, temp_tv_store)
103
  else:
104
  logging.error(f"Failed to get report from {instance_url}. Removing instance.")
105
  self.remove_instance(instance_url)
106
 
107
- self.FILM_STORE = temp_film_store
108
- self.TV_STORE = temp_tv_store
109
 
110
- def process_report(self, instance_url, report, temp_film_store, temp_tv_store):
111
- film_store = report.get('film_store', {})
112
- tv_store = report.get('tv_store', {})
113
  cache_size = report.get('cache_size')
114
 
115
  logging.info(f"Processing report from {instance_url}")
116
 
117
- # Update temporary film store
118
- for title, path in film_store.items():
119
- url = f"{instance_url}/api/get/film/{title.replace(' ', '%20')}"
120
- temp_film_store[title] = url
121
-
122
- # Update temporary TV store
123
- for title, seasons in tv_store.items():
124
- if title not in temp_tv_store:
125
- temp_tv_store[title] = {}
126
- for season, episodes in seasons.items():
127
- if season not in temp_tv_store[title]:
128
- temp_tv_store[title][season] = {}
129
- for episode, path in episodes.items():
130
- url = f"{instance_url}/api/get/tv/{title.replace(' ', '%20')}/{season.replace(' ', '%20')}/{episode.replace(' ', '%20')}"
131
- temp_tv_store[title][season][episode] = url
132
 
133
- logging.info("Film and TV Stores processed successfully.")
134
  self.update_instances_health(instance=instance_url, cache_size=cache_size)
135
 
136
  def start_polling(self):
@@ -160,156 +103,60 @@ class LoadBalancer:
160
  logging.info(f"Instance {instance_url} not found for removal.")
161
 
162
  def update_instances_health(self, instance, cache_size):
163
- self.instances_health[instance] = {"used":cache_size["cache_size"],
164
- "total": "50 GB"}
165
  logging.info(f"Updated instance {instance} with cache size {cache_size}")
166
 
167
- def download_film_to_best_instance(self, title):
168
  """
169
- Downloads a film to the first instance that has more free space on the self.instance_health list variable.
170
- The instance_health looks like this:
171
- {
172
- "https://unicone-studio-instance1.hf.space": {
173
- "total": "50 GB",
174
- "used": "3.33 GB"
175
- }
176
- }
177
- Args:
178
- title (str): The title of the film.
179
  """
180
  best_instance = None
181
  max_free_space = -1
182
-
183
- # Calculate free space for each instance
184
- for instance_url, space_info in self.instances_health.items():
185
- total_space = convert_to_gb(space_info['total'])
186
- used_space = convert_to_gb(space_info['used'])
187
- free_space = total_space - used_space
188
-
189
- if free_space > max_free_space:
190
- max_free_space = free_space
191
- best_instance = instance_url
192
-
193
- if best_instance:
194
- result = self.instances_api.download_film(best_instance, title)
195
- film_id = result["film_id"]
196
- status = result["status"]
197
- progress_url = f'{best_instance}/api/get/progress/{film_id}'
198
- response = {
199
- "film_id":film_id,
200
- "status":status,
201
- "progress_url":progress_url
202
- }
203
-
204
- return response
205
- else:
206
- logging.error("No suitable instance found for downloading the film.")
207
- return {"error": "No suitable instance found for downloading the film."}
208
 
209
- def download_episode_to_best_instance(self, title, season, episode):
210
- """
211
- Downloads a episode to the first instance that has more free space on the self.instance_health list variable.
212
- The instance_health looks like this:
213
- {
214
- "https://unicone-studio-instance1.hf.space": {
215
- "total": "50 GB",
216
- "used": "3.33 GB"
217
- }
218
- }
219
- Args:
220
- title (str): The title of the Tv show.
221
- season (str): The season of the Tv show.
222
- episode (str): The title of the Tv show.
223
- """
224
- best_instance = None
225
- max_free_space = -1
226
-
227
- # Calculate free space for each instance
228
  for instance_url, space_info in self.instances_health.items():
229
  total_space = convert_to_gb(space_info['total'])
230
  used_space = convert_to_gb(space_info['used'])
231
  free_space = total_space - used_space
232
-
233
  if free_space > max_free_space:
234
  max_free_space = free_space
235
  best_instance = instance_url
236
-
237
  if best_instance:
238
- result = self.instances_api.download_episode(best_instance, title, season, episode)
239
- episode_id = result["episode_id"]
240
  status = result["status"]
241
- progress_url = f'{best_instance}/api/get/progress/{episode_id}'
242
  response = {
243
- "episode_id":episode_id,
244
- "status":status,
245
- "progress_url":progress_url
246
  }
247
 
248
  return response
249
  else:
250
- logging.error("No suitable instance found for downloading the film.")
251
- return {"error": "No suitable instance found for downloading the film."}
252
 
253
- def find_movie_path(self, title):
254
- """Find the path of the movie in the JSON data based on the title."""
255
  for directory in self.file_structure:
256
- if directory['type'] == 'directory' and directory['path'] == 'films':
257
  for sub_directory in directory['contents']:
258
- if sub_directory['type'] == 'directory':
259
- for item in sub_directory['contents']:
260
- if item['type'] == 'file' and title.lower() in item['path'].lower():
261
- return item['path']
262
- return None
263
-
264
- def find_tv_path(self, title):
265
- """Find the path of the TV show in the JSON data based on the title."""
266
- for directory in self.file_structure:
267
- if directory['type'] == 'directory' and directory['path'] == 'tv':
268
- for sub_directory in directory['contents']:
269
- if sub_directory['type'] == 'directory' and title.lower() in sub_directory['path'].lower():
270
  return sub_directory['path']
271
  return None
272
 
273
- def get_tv_structure(self, title):
274
- """Find the path of the TV show in the JSON data based on the title."""
275
- for directory in self.file_structure:
276
- if directory['type'] == 'directory' and directory['path'] == 'tv':
277
- for sub_directory in directory['contents']:
278
- if sub_directory['type'] == 'directory' and title.lower() in sub_directory['path'].lower():
279
- return sub_directory
280
- return None
281
-
282
- def get_film_id(self, title):
283
- """Generate a film ID based on the title."""
284
  return title.replace(" ", "_").lower()
285
 
286
- def get_all_tv_shows(self):
287
- """Get all TV shows from the indexed cache structure JSON file."""
288
- tv_shows = {}
289
- for directory in self.file_structure:
290
- if directory['type'] == 'directory' and directory['path'] == 'tv':
291
- for sub_directory in directory['contents']:
292
- if sub_directory['type'] == 'directory':
293
- show_title = sub_directory['path'].split('/')[-1]
294
- tv_shows[show_title] = []
295
- for season_directory in sub_directory['contents']:
296
- if season_directory['type'] == 'directory':
297
- season = season_directory['path'].split('/')[-1]
298
- for episode in season_directory['contents']:
299
- if episode['type'] == 'file':
300
- tv_shows[show_title].append({
301
- "season": season,
302
- "episode": episode['path'].split('/')[-1],
303
- "path": episode['path']
304
- })
305
- return tv_shows
306
-
307
- def get_all_films(self):
308
- """Get all films from the indexed cache structure JSON file."""
309
- films = []
310
  for directory in self.file_structure:
311
- if directory['type'] == 'directory' and directory['path'] == 'films':
312
  for sub_directory in directory['contents']:
313
- if sub_directory['type'] == 'directory':
314
- films.append(sub_directory['path'])
315
- return films
 
1
  import os
2
  from indexer import indexer
 
 
 
3
  from threading import Event, Thread
4
  import asyncio
5
  import time
 
11
 
12
  class LoadBalancer:
13
  def __init__(self, cache_dir, token, repo, polling_interval=4, max_retries=3, initial_delay=1):
14
+ self.version = "0.0.1 Alpha"
15
  self.instances = []
16
  self.instances_health = {}
17
  self.polling_interval = polling_interval
 
22
  self.CACHE_DIR = cache_dir
23
  self.TOKEN = token
24
  self.REPO = repo
25
+ self.MUSIC_STORE = {}
 
26
  self.file_structure = None
 
27
 
28
  # Ensure CACHE_DIR exists
29
  if not os.path.exists(self.CACHE_DIR):
 
31
 
32
  # Initialize file structure and start prefetching
33
  self.file_structure = indexer()
 
34
 
35
  # Start polling and file checking in separate threads
36
  polling_thread = Thread(target=self.start_polling)
 
47
  await self.start_prefetching() # Start prefetching
48
  await asyncio.sleep(300) # Sleep for 5 minutes
49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  def get_reports(self):
51
  reports = self.instances_api.fetch_reports()
52
+ temp_music_store = {}
 
53
 
54
  for instance_url in self.instances[:]:
55
  if instance_url in reports:
56
  report = reports[instance_url]
57
  logging.info(f"Report from {instance_url}: {report}")
58
+ self.process_report(instance_url, report, temp_music_store)
59
  else:
60
  logging.error(f"Failed to get report from {instance_url}. Removing instance.")
61
  self.remove_instance(instance_url)
62
 
63
+ self.MUSIC_STORE = temp_music_store
 
64
 
65
+ def process_report(self, instance_url, report, temp_music_store):
66
+ music_store = report.get('music_store', {})
 
67
  cache_size = report.get('cache_size')
68
 
69
  logging.info(f"Processing report from {instance_url}")
70
 
71
+ # Update temporary music store
72
+ for title, path in music_store.items():
73
+ url = f"{instance_url}/api/get/music/{title.replace(' ', '%20')}"
74
+ temp_music_store[title] = url
 
 
 
 
 
 
 
 
 
 
 
75
 
76
+ logging.info("Music Store processed successfully.")
77
  self.update_instances_health(instance=instance_url, cache_size=cache_size)
78
 
79
  def start_polling(self):
 
103
  logging.info(f"Instance {instance_url} not found for removal.")
104
 
105
  def update_instances_health(self, instance, cache_size):
106
+ self.instances_health[instance] = {"used": cache_size["cache_size"], "total": "50 GB"}
 
107
  logging.info(f"Updated instance {instance} with cache size {cache_size}")
108
 
109
+ def download_music_to_best_instance(self, file_name):
110
  """
111
+ Downloads a music file to the first instance that has more free space on the self.instance_health list variable.
 
 
 
 
 
 
 
 
 
112
  """
113
  best_instance = None
114
  max_free_space = -1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  for instance_url, space_info in self.instances_health.items():
117
  total_space = convert_to_gb(space_info['total'])
118
  used_space = convert_to_gb(space_info['used'])
119
  free_space = total_space - used_space
120
+
121
  if free_space > max_free_space:
122
  max_free_space = free_space
123
  best_instance = instance_url
124
+
125
  if best_instance:
126
+ result = self.instances_api.download_music(best_instance, file_name)
127
+ music_id = result["music_id"]
128
  status = result["status"]
129
+ progress_url = f'{best_instance}/api/get/progress/{music_id}'
130
  response = {
131
+ "music_id": music_id,
132
+ "status": status,
133
+ "progress_url": progress_url
134
  }
135
 
136
  return response
137
  else:
138
+ logging.error("No suitable instance found for downloading the music.")
139
+ return {"error": "No suitable instance found for downloading the music."}
140
 
141
+ def find_music_path(self, title):
142
+ """Find the path of the music in the indexed data based on the title."""
143
  for directory in self.file_structure:
144
+ if directory['type'] == 'directory':
145
  for sub_directory in directory['contents']:
146
+ if sub_directory['type'] == 'file' and title.lower() in sub_directory['path'].lower():
 
 
 
 
 
 
 
 
 
 
 
147
  return sub_directory['path']
148
  return None
149
 
150
+ def get_music_id(self, title):
151
+ """Generate a unique music ID based on the title."""
 
 
 
 
 
 
 
 
 
152
  return title.replace(" ", "_").lower()
153
 
154
+ def get_all_music(self):
155
+ """Get all music files from the indexed file structure."""
156
+ music_files = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  for directory in self.file_structure:
158
+ if directory['type'] == 'directory':
159
  for sub_directory in directory['contents']:
160
+ if sub_directory['type'] == 'file':
161
+ music_files.append(sub_directory['path'])
162
+ return music_files
TODO.md DELETED
@@ -1,4 +0,0 @@
1
- create endpoints for
2
- * /api/get/film/{title} `Endpoint to get the movie by title.`
3
-
4
- * /api/get/tv/{title}/{season}/{episode} `Endpoint to get the episode by title, season and episode.`
 
 
 
 
 
api.py CHANGED
@@ -16,43 +16,20 @@ class InstancesAPI:
16
  logging.error(f"Error contacting instance {instance_url}: {e}")
17
  return reports
18
 
19
- def download_film(self, instance_url, title):
20
  """
21
- Download a film to an instance.
22
 
23
- If the download started, it returns a JSON like this:
24
  example:
25
- {"film_id": "my_spy_2020",
26
  "status": "Download started"}
27
 
28
- If the film has already been downloaded, it will return the video file.
29
  """
30
  data = {}
31
  try:
32
- response = requests.get(f"{instance_url}/api/get/film/{title}")
33
- response.raise_for_status()
34
- data = response.json()
35
-
36
- except requests.exceptions.RequestException as e:
37
- logging.error(f"Error contacting instance {instance_url}: {e}")
38
- data = {"error": str(e)}
39
-
40
- return data
41
-
42
- def download_episode(self, instance_url, title, season, episode):
43
- """
44
- Download a film to an instance.
45
-
46
- If the download started, it returns a JSON like this:
47
- example:
48
- {"film_id": "my_spy_2020",
49
- "status": "Download started"}
50
-
51
- If the film has already been downloaded, it will return the video file.
52
- """
53
- data = {}
54
- try:
55
- response = requests.get(f"{instance_url}/api/get/tv/{title}/{season}/{episode}")
56
  response.raise_for_status()
57
  data = response.json()
58
 
 
16
  logging.error(f"Error contacting instance {instance_url}: {e}")
17
  return reports
18
 
19
+ def download_music(self, instance_url, title):
20
  """
21
+ Download a music file to an instance.
22
 
23
+ If the download starts, it returns a JSON like this:
24
  example:
25
+ {"music_id": "song_title_2024",
26
  "status": "Download started"}
27
 
28
+ If the music file has already been downloaded, it will return the audio file.
29
  """
30
  data = {}
31
  try:
32
+ response = requests.get(f"{instance_url}/api/get/music/{title}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  response.raise_for_status()
34
  data = response.json()
35
 
app.py CHANGED
@@ -1,12 +1,10 @@
1
- from fastapi import FastAPI,HTTPException, Request, Query
2
  from fastapi.responses import JSONResponse
3
- from typing import Optional, List
4
- from LoadBalancer import LoadBalancer
5
  import logging
6
  import os
7
- import urllib.parse
8
- from utils import read_json_file, is_valid_url
9
- from tvdb import recent_list, genre_list
10
 
11
  CACHE_DIR = os.getenv("CACHE_DIR")
12
  TOKEN = os.getenv("TOKEN")
@@ -17,7 +15,6 @@ app = FastAPI()
17
  @app.on_event("startup")
18
  async def startup_event():
19
  global load_balancer
20
-
21
  load_balancer = LoadBalancer(cache_dir=CACHE_DIR, token=TOKEN, repo=REPO)
22
 
23
  @app.get("/")
@@ -47,172 +44,15 @@ async def register_instance(request: Request):
47
 
48
  @app.get("/api/get/file_structure")
49
  async def get_file_structure():
50
- return load_balancer.file_structure
51
-
52
- @app.get("/api/get/movie/store")
53
- async def get_movie_store():
54
- return load_balancer.FILM_STORE
55
-
56
- @app.get("/api/get/series/store")
57
- async def get_series_store():
58
- return load_balancer.TV_STORE
59
-
60
- @app.get("/api/get/movie/all")
61
- async def get_all_movies_api():
62
- return load_balancer.get_all_films()
63
-
64
- @app.get("/api/get/series/all")
65
- async def get_all_tvshows_api():
66
- return load_balancer.get_all_tv_shows()
67
-
68
- @app.get("/api/get/recent")
69
- async def get_recent_items(limit: int = 5):
70
- # Get sorted entries
71
- recent_films = recent_list.get_sorted_entries('film')
72
- recent_series = recent_list.get_sorted_entries('series')
73
-
74
- # Slice the lists to only return the desired number of items
75
- limited_films = recent_films[:limit]
76
- limited_series = recent_series[:limit]
77
-
78
- # Return combined results
79
- return JSONResponse(content={
80
- 'movies': limited_films,
81
- 'series': limited_series
82
- })
83
-
84
- @app.get("/api/get/genre")
85
- async def get_genre_items(genre: List[str] = Query(...), media_type: Optional[str] = None, limit: int = 5):
86
- """
87
- Get recent items from specified genres with an optional media type filter and a limit on the number of results.
88
-
89
- :param genre: The genres to filter by (e.g., 'Comedy').
90
- :param media_type: Optional. Filter by media type ('movie' or 'series').
91
- :param limit: The maximum number of items to return for each media type.
92
- :return: A JSON response containing the filtered items.
93
- """
94
- # Get sorted entries based on genres and media type
95
- entries = genre_list.get_entries_by_multiple_genres(genre, media_type=media_type)
96
-
97
- # Separate entries by media type
98
- movies = [entry for entry in entries if entry[4] == 'movie']
99
- series = [entry for entry in entries if entry[4] == 'series']
100
-
101
- # Limit the number of items for each media type
102
- limited_movies = movies[:limit]
103
- limited_series = series[:limit]
104
-
105
- # Organize the results by media type
106
- results = {
107
- 'movies': limited_movies,
108
- 'series': limited_series
109
- }
110
-
111
- # Return the results in a JSON response
112
- return JSONResponse(content=results)
113
-
114
- @app.get("/api/get/movie/metadata/{title}")
115
- async def get_movie_metadata_api(title: str):
116
- """Endpoint to get the movie metadata by title."""
117
- if not title:
118
- raise HTTPException(status_code=400, detail="No title provided")
119
-
120
- full_dir_path = os.path.join(CACHE_DIR, 'movie')
121
- json_cache_path = os.path.join(full_dir_path,f"{urllib.parse.quote(title)}.json")
122
-
123
- if os.path.exists(json_cache_path):
124
- data = await read_json_file(json_cache_path)
125
- return JSONResponse(content=data)
126
-
127
- raise HTTPException(status_code=404, detail="Metadata not found")
128
-
129
- @app.get("/api/get/movie/card/{title}")
130
- async def get_movie_card_api(title: str):
131
- """Endpoint to get the movie metadata by title."""
132
- if not title:
133
- raise HTTPException(status_code=400, detail="No title provided")
134
-
135
- full_dir_path = os.path.join(CACHE_DIR, 'movie')
136
- json_cache_path = os.path.join(full_dir_path,f"{urllib.parse.quote(title)}.json")
137
-
138
- if os.path.exists(json_cache_path):
139
- data = await read_json_file(json_cache_path)
140
- image = data['data']['image']
141
- eng_title = None
142
- banner = None
143
- if data['data'].get('translations') and data['data']['translations'].get('nameTranslations'):
144
- for name in data['data']['translations']['nameTranslations']:
145
- if name['language'] == 'eng':
146
- eng_title = name.get('name')
147
- break
148
-
149
- if data['data'].get('artworks'):
150
- for artwork in data['data']['artworks']:
151
- if artwork['type'] == 15:
152
- banner = artwork
153
- break
154
- year = data['data']['year']
155
- return JSONResponse(content={'title':eng_title or title, 'year': year, 'image': image, 'banner':banner})
156
-
157
- raise HTTPException(status_code=404, detail="Card not found")
158
 
159
- @app.get("/api/get/series/metadata/{title}")
160
- async def get_series_metadata_api(title: str):
161
- """Endpoint to get the TV show metadata by title."""
162
- if not title:
163
- raise HTTPException(status_code=400, detail="No title provided")
164
- full_dir_path = os.path.join(CACHE_DIR, 'series')
165
- json_cache_path = os.path.join(full_dir_path,f"{urllib.parse.quote(title)}.json")
166
-
167
- if os.path.exists(json_cache_path):
168
- data = await read_json_file(json_cache_path)
169
- # Add the file structure to the metadata
170
- tv_structure_data = load_balancer.get_tv_structure(title)
171
- if tv_structure_data:
172
- data['file_structure'] = tv_structure_data
173
-
174
- return JSONResponse(content=data)
175
-
176
- raise HTTPException(status_code=404, detail="Metadata not found")
177
 
178
- @app.get("/api/get/series/card/{title}")
179
- async def get_series_card_api(title: str):
180
- """Endpoint to get the TV show metadata by title."""
181
- if not title:
182
- raise HTTPException(status_code=400, detail="No title provided")
183
- full_dir_path = os.path.join(CACHE_DIR, 'series')
184
- json_cache_path = os.path.join(full_dir_path,f"{urllib.parse.quote(title)}.json")
185
-
186
- if os.path.exists(json_cache_path):
187
- data = await read_json_file(json_cache_path)
188
- image = data['data']['image']
189
- eng_title = None
190
- if data['data'].get('translations') and data['data']['translations'].get('nameTranslations'):
191
- for name in data['data']['translations']['nameTranslations']:
192
- if name['language'] == 'eng':
193
- eng_title = name.get('name')
194
- break
195
- year = data['data']['year']
196
- return JSONResponse(content={'title':eng_title or title, 'year': year, 'image': image})
197
-
198
- raise HTTPException(status_code=404, detail="Card not found")
199
-
200
-
201
- @app.get("/api/get/series/metadata/{series_id}/{season}")
202
- async def get_season_metadata_api(series_id: int, season: str):
203
- """Endpoint to get the TV show season metadata by id and season."""
204
- if not season:
205
- raise HTTPException(status_code=400, detail="Season must be provided and cannot be empty")
206
-
207
- # Convert series_id to string before joining the path
208
- json_cache_path = os.path.join(CACHE_DIR, "metadata", str(series_id), f"{season}.json")
209
- print(json_cache_path)
210
-
211
- if os.path.exists(json_cache_path):
212
- data = await read_json_file(json_cache_path)
213
- return JSONResponse(content=data)
214
-
215
- raise HTTPException(status_code=404, detail="Metadata not found")
216
 
217
  @app.get('/api/get/instances')
218
  async def get_instances():
@@ -222,61 +62,23 @@ async def get_instances():
222
  async def get_instances_health():
223
  return load_balancer.instances_health
224
 
225
- @app.get("/api/get/movie/{title}")
226
- async def get_movie_api(title: str):
227
- """Endpoint to get the movie by title."""
228
- if not title:
229
- raise HTTPException(status_code=400, detail="Title parameter is required")
230
 
231
- # Check if the movie is already cached
232
- if title in load_balancer.FILM_STORE:
233
- url = load_balancer.FILM_STORE[title]
234
  return JSONResponse(content={"url": url})
235
 
236
- movie_path = load_balancer.find_movie_path(title)
237
 
238
- if not movie_path:
239
- raise HTTPException(status_code=404, detail="Movie not found")
240
 
241
  # Start the download in an instance
242
- response = load_balancer.download_film_to_best_instance(title=title)
243
  if response:
244
  return JSONResponse(content=response)
245
-
246
- @app.get("/api/get/series/{title}/{season}/{episode}")
247
- async def get_tv_show_api(title: str, season: str, episode: str):
248
- """Endpoint to get the TV show by title, season, and episode."""
249
- if not title or not season or not episode:
250
- raise HTTPException(status_code=400, detail="Title, season, and episode parameters are required")
251
-
252
- # Check if the episode is already cached
253
- if title in load_balancer.TV_STORE and season in load_balancer.TV_STORE[title]:
254
- for ep in load_balancer.TV_STORE[title][season]:
255
- if episode in ep:
256
- url = load_balancer.TV_STORE[title][season][ep]
257
- return JSONResponse(content={"url": url})
258
-
259
- tv_path = load_balancer.find_tv_path(title)
260
-
261
- if not tv_path:
262
- raise HTTPException(status_code=404, detail="TV show not found")
263
-
264
- episode_path = None
265
- for directory in load_balancer.file_structure:
266
- if directory['type'] == 'directory' and directory['path'] == 'tv':
267
- for sub_directory in directory['contents']:
268
- if sub_directory['type'] == 'directory' and title.lower() in sub_directory['path'].lower():
269
- for season_dir in sub_directory['contents']:
270
- if season_dir['type'] == 'directory' and season in season_dir['path']:
271
- for episode_file in season_dir['contents']:
272
- if episode_file['type'] == 'file' and episode in episode_file['path']:
273
- episode_path = episode_file['path']
274
- break
275
-
276
- if not episode_path:
277
- raise HTTPException(status_code=404, detail="Episode not found")
278
-
279
- # Start the download in an instance
280
- response = load_balancer.download_episode_to_best_instance(title=title, season=season, episode=episode)
281
- if response:
282
- return JSONResponse(content=response)
 
1
+ from fastapi import FastAPI, HTTPException, Request
2
  from fastapi.responses import JSONResponse
 
 
3
  import logging
4
  import os
5
+ from typing import List
6
+ from LoadBalancer import LoadBalancer
7
+ from utils import is_valid_url
8
 
9
  CACHE_DIR = os.getenv("CACHE_DIR")
10
  TOKEN = os.getenv("TOKEN")
 
15
  @app.on_event("startup")
16
  async def startup_event():
17
  global load_balancer
 
18
  load_balancer = LoadBalancer(cache_dir=CACHE_DIR, token=TOKEN, repo=REPO)
19
 
20
  @app.get("/")
 
44
 
45
  @app.get("/api/get/file_structure")
46
  async def get_file_structure():
47
+ return load_balancer.file_structure
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
+ @app.get("/api/get/music/store")
50
+ async def get_music_store():
51
+ return load_balancer.MUSIC_STORE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
+ @app.get("/api/get/music/all")
54
+ async def get_all_music_api():
55
+ return load_balancer.get_all_music()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
  @app.get('/api/get/instances')
58
  async def get_instances():
 
62
  async def get_instances_health():
63
  return load_balancer.instances_health
64
 
65
+ @app.get("/api/get/music/{file_name}")
66
+ async def get_music_api(file_name: str):
67
+ """Endpoint to get the music file by title."""
68
+ if not file_name:
69
+ raise HTTPException(status_code=400, detail="file_name parameter is required")
70
 
71
+ # Check if the music file is already cached
72
+ if file_name in load_balancer.MUSIC_STORE:
73
+ url = load_balancer.MUSIC_STORE[file_name]
74
  return JSONResponse(content={"url": url})
75
 
76
+ music_path = load_balancer.find_music_path(file_name)
77
 
78
+ if not music_path:
79
+ raise HTTPException(status_code=404, detail="Music file not found")
80
 
81
  # Start the download in an instance
82
+ response = load_balancer.download_music_to_best_instance(file_name=file_name)
83
  if response:
84
  return JSONResponse(content=response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
services.py DELETED
@@ -1,181 +0,0 @@
1
- import bisect
2
-
3
- class RecentList:
4
- def __init__(self):
5
- # Initialize dictionaries to store titles and associated data
6
- self.films = {}
7
- self.series = {}
8
- # Initialize lists to keep track of the sorted entries
9
- self.sorted_films = []
10
- self.sorted_series = []
11
-
12
- def add_entry(self, title, year, description, image_link, media_type):
13
- if media_type == 'film':
14
- self._update_entry(self.films, self.sorted_films, title, year, description, image_link)
15
- elif media_type == 'series':
16
- self._update_entry(self.series, self.sorted_series, title, year, description, image_link)
17
-
18
- def _update_entry(self, dictionary, sorted_list, title, year, description, image_link):
19
- try:
20
- # Convert year to integer
21
- year = int(year)
22
- except ValueError:
23
- raise ValueError(f"Invalid year: {year}. Year must be an integer.")
24
-
25
- if title in dictionary:
26
- # Remove the old entry from the sorted list if it exists
27
- old_year = dictionary[title][0] # Get the old year
28
- try:
29
- sorted_list.remove((-old_year, title))
30
- except ValueError:
31
- pass # Ignore if the old entry does not exist in the sorted list
32
-
33
- # Update or add the new entry in the dictionary
34
- dictionary[title] = (year, description, image_link)
35
-
36
- # Insert the new year and title into the sorted list
37
- bisect.insort(sorted_list, (-year, title))
38
-
39
- def get_sorted_entries(self, media_type):
40
- if media_type == 'film':
41
- # Get sorted films with details
42
- return [(title, -year, self.films[title][1], self.films[title][2]) for year, title in self.sorted_films]
43
- elif media_type == 'series':
44
- # Get sorted series with details
45
- return [(title, -year, self.series[title][1], self.series[title][2]) for year, title in self.sorted_series]
46
-
47
- import bisect
48
-
49
- class GenreList:
50
- def __init__(self):
51
- # Initialize a dictionary to store genres and their associated data
52
- self.genres = {}
53
-
54
- def add_entry(self, genres, title, year, description, image_link, media_type):
55
- """
56
- Add an entry to multiple genres.
57
-
58
- :param genres: A list of genre dictionaries, each containing 'id', 'name', and 'slug'.
59
- :param title: The title of the media.
60
- :param year: The release year of the media.
61
- :param description: A brief description of the media.
62
- :param image_link: A URL to an image representing the media.
63
- :param media_type: The type of media ('movie' or 'series').
64
- """
65
- for genre in genres:
66
- genre_name = genre['name']
67
- if genre_name not in self.genres:
68
- # Initialize the genre with an empty dictionary and sorted list
69
- self.genres[genre_name] = {'entries': {}, 'sorted_entries': []}
70
-
71
- # Update or add the entry in the specified genre
72
- self._update_genre(self.genres[genre_name]['entries'], self.genres[genre_name]['sorted_entries'], title, year, description, image_link, media_type)
73
-
74
- def _update_genre(self, dictionary, sorted_list, title, year, description, image_link, media_type):
75
- try:
76
- # Convert year to integer
77
- year = int(year)
78
- except ValueError:
79
- raise ValueError(f"Invalid year: {year}. Year must be an integer.")
80
-
81
- if title in dictionary:
82
- # Remove the old entry from the sorted list if it exists
83
- old_year = dictionary[title][0] # Get the old year
84
- try:
85
- sorted_list.remove((-old_year, title))
86
- except ValueError:
87
- pass # Ignore if the old entry does not exist in the sorted list
88
-
89
- # Update or add the new entry in the genre dictionary
90
- dictionary[title] = (year, description, image_link, media_type)
91
-
92
- # Insert the new year and title into the sorted list
93
- bisect.insort(sorted_list, (-year, title))
94
-
95
- def get_sorted_entries(self, genre_name, media_type=None):
96
- """
97
- Get sorted entries for a specified genre and optional media type.
98
-
99
- :param genre_name: The name of the genre to retrieve entries from.
100
- :param media_type: Optional. Filter by media type ('movie' or 'series').
101
- :return: A list of tuples containing title, year, description, image_link, and media_type.
102
- """
103
- if genre_name in self.genres:
104
- entries = [
105
- (title, -year, self.genres[genre_name]['entries'][title][1],
106
- self.genres[genre_name]['entries'][title][2], self.genres[genre_name]['entries'][title][3])
107
- for year, title in self.genres[genre_name]['sorted_entries']
108
- ]
109
- if media_type:
110
- entries = [entry for entry in entries if entry[4] == media_type]
111
- return entries
112
- else:
113
- return []
114
-
115
- def get_entries_by_multiple_genres(self, genre_names, media_type=None):
116
- """
117
- Get entries that are present in all specified genres.
118
-
119
- :param genre_names: A list of genre names.
120
- :param media_type: Optional. Filter by media type ('movie' or 'series').
121
- :return: A list of tuples containing title, year, description, image_link, and media_type.
122
- """
123
- if not genre_names:
124
- return []
125
-
126
- # Get entries for the first genre
127
- common_entries = set(self.genres[genre_names[0]]['entries'].keys()) if genre_names[0] in self.genres else set()
128
-
129
- # Intersect with entries of the remaining genres
130
- for genre_name in genre_names[1:]:
131
- if genre_name in self.genres:
132
- common_entries.intersection_update(self.genres[genre_name]['entries'].keys())
133
- else:
134
- return []
135
-
136
- # Collect the sorted entries for the common titles
137
- sorted_entries = []
138
- for title in common_entries:
139
- year = self.genres[genre_names[0]]['entries'][title][0]
140
- description = self.genres[genre_names[0]]['entries'][title][1]
141
- image_link = self.genres[genre_names[0]]['entries'][title][2]
142
- media_type_entry = self.genres[genre_names[0]]['entries'][title][3]
143
- if media_type is None or media_type_entry == media_type:
144
- sorted_entries.append((title, year, description, image_link, media_type_entry))
145
-
146
- # Sort the entries by year (descending)
147
- sorted_entries.sort(key=lambda x: -x[1])
148
-
149
- return sorted_entries
150
-
151
- def remove_genre(self, genre_name):
152
- """Remove a genre entirely from the list."""
153
- if genre_name in self.genres:
154
- del self.genres[genre_name]
155
-
156
- def remove_entry_from_genre(self, genre_name, title):
157
- """Remove a specific title from a specific genre."""
158
- if genre_name in self.genres and title in self.genres[genre_name]['entries']:
159
- old_year = self.genres[genre_name]['entries'][title][0]
160
- del self.genres[genre_name]['entries'][title]
161
- self.genres[genre_name]['sorted_entries'].remove((-old_year, title))
162
-
163
-
164
- # Example usage:
165
- # genre_list = GenreList()
166
- # genres = [
167
- # {"id": 15, "name": "Comedy", "slug": "comedy"},
168
- # {"id": 17, "name": "Animation", "slug": "animation"},
169
- # {"id": 27, "name": "Anime", "slug": "anime"}
170
- # ]
171
- # genres2 = [
172
- # {"id": 15, "name": "Comedy", "slug": "comedy"},
173
- # {"id": 17, "name": "Animation", "slug": "animation"},
174
- # {"id": 27, "name": "Anime", "slug": "anime"}
175
- # ]
176
- # genre_list.add_entry(genres, 'Movie Title', 2023, 'Description here', 'image_link_here', 'movie')
177
- # genre_list.add_entry(genres2, 'Series Title', 2022, 'Series Description', 'series_image_link_here', 'movie')
178
-
179
- # # Fetch entries that belong to both 'Comedy' and 'Animation'
180
- # sorted_entries = genre_list.get_entries_by_multiple_genres(['Comedy', 'Animation'], media_type='movie')
181
- # print(sorted_entries) # This should return only 'Movie Title' which is in both 'Comedy' and 'Animation'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tvdb.py DELETED
@@ -1,193 +0,0 @@
1
- # tvdb.py
2
- import os
3
- import requests
4
- import urllib.parse
5
- from datetime import datetime, timedelta
6
- from dotenv import load_dotenv
7
- import json
8
- import asyncio
9
- import aiofiles
10
- from tvdbApiClient import fetch_and_cache_seasons, save_to_json
11
- from services import RecentList, GenreList
12
-
13
- load_dotenv()
14
- THETVDB_API_KEY = os.getenv("THETVDB_API_KEY")
15
- THETVDB_API_URL = os.getenv("THETVDB_API_URL")
16
- CACHE_DIR = os.getenv("CACHE_DIR")
17
- TOKEN_EXPIRY = None
18
- THETVDB_TOKEN = None
19
- recent_list = RecentList()
20
- genre_list = GenreList()
21
-
22
- def authenticate_thetvdb():
23
- global THETVDB_TOKEN, TOKEN_EXPIRY
24
- auth_url = f"{THETVDB_API_URL}/login"
25
- auth_data = {
26
- "apikey": THETVDB_API_KEY
27
- }
28
- try:
29
- print("Authenticating with TheTVDB API...")
30
- response = requests.post(auth_url, json=auth_data)
31
- response.raise_for_status()
32
- response_data = response.json()
33
- THETVDB_TOKEN = response_data['data']['token']
34
- TOKEN_EXPIRY = datetime.now() + timedelta(days=30)
35
- print("Authentication successful.")
36
- except requests.RequestException as e:
37
- print(f"Authentication failed: {e}")
38
- THETVDB_TOKEN = None
39
- TOKEN_EXPIRY = None
40
-
41
- def get_thetvdb_token():
42
- global THETVDB_TOKEN, TOKEN_EXPIRY
43
- if not THETVDB_TOKEN or datetime.now() >= TOKEN_EXPIRY:
44
- authenticate_thetvdb()
45
- return THETVDB_TOKEN
46
-
47
- def clean_data(data):
48
- print("Cleaning data...")
49
- fields_to_keep = {
50
- "data": {
51
- 'id': None,
52
- 'name': None,
53
- 'image': None,
54
- 'score': None,
55
- 'runtime': None,
56
- 'releases': None,
57
- 'year': None,
58
- 'contentRatings': None,
59
- 'originalCountry': None,
60
- 'originalLanguage': None,
61
- 'translations': {},
62
- 'artworks': [],
63
- 'genres':[],
64
- 'characters': [],
65
- 'spoken_languages': [],
66
- 'translations': {}
67
- }
68
- }
69
- cleaned_data = {}
70
-
71
- for key, value in fields_to_keep.items():
72
- if key in data:
73
- cleaned_data[key] = {}
74
- for field in fields_to_keep[key]:
75
- if field in data[key]:
76
- cleaned_data[key][field] = data[key][field]
77
-
78
- print("Data cleaned successfully.")
79
- return cleaned_data
80
-
81
- async def fetch_and_cache_json(original_title, title, media_type, year=None):
82
- print(f"Fetching data for: {original_title}")
83
- if year:
84
- search_url = f"{THETVDB_API_URL}/search?query={urllib.parse.quote(title)}&type={media_type}&year={year}"
85
- else:
86
- search_url = f"{THETVDB_API_URL}/search?query={urllib.parse.quote(title)}&type={media_type}"
87
-
88
- token = get_thetvdb_token()
89
- if not token:
90
- print("Authentication token not available.")
91
- return
92
-
93
- headers = {
94
- "Authorization": f"Bearer {token}",
95
- "accept": "application/json",
96
- }
97
-
98
- try:
99
- print(f"Sending search request to: {search_url}")
100
- response = requests.get(search_url, headers=headers)
101
- print(f"Search response status code: {response.status_code}")
102
- response.raise_for_status()
103
- data = response.json()
104
-
105
- if 'data' in data and data['data']:
106
- first_result = data['data'][0]
107
- tvdb_id = first_result.get('tvdb_id')
108
- media_type = first_result.get('type')
109
- print(f"Found TVDB ID: {tvdb_id} with media type: {media_type}")
110
-
111
- if not tvdb_id:
112
- print("TVDB ID not found in the search results")
113
- return
114
-
115
- if media_type == 'movie':
116
- extended_url = f"{THETVDB_API_URL}/movies/{tvdb_id}/extended?meta=translations"
117
- elif media_type == 'series':
118
- extended_url = f"{THETVDB_API_URL}/series/{tvdb_id}/extended?meta=translations"
119
- await fetch_and_cache_seasons(tvdb_id)
120
- else:
121
- print(f"Unsupported media type: {media_type}")
122
- return
123
-
124
- response = requests.get(extended_url, headers=headers)
125
- print(f"Extended data response status code: {response.status_code}")
126
- response.raise_for_status()
127
- extended_data = response.json()
128
-
129
- cleaned_data = clean_data(extended_data)
130
- print(f"cleaning.. {original_title}")
131
-
132
- genres = None
133
- if cleaned_data['data'].get('genres'):
134
- genres = cleaned_data['data'].get('genres')
135
- print(f"genres extracted: {genres}")
136
-
137
- description = None
138
- if cleaned_data['data'].get('translations') and cleaned_data['data']['translations'].get('overviewTranslations'):
139
- for overview in cleaned_data['data']['translations']['overviewTranslations']:
140
- if overview['language'] == 'eng':
141
- description = overview.get('overview')
142
- break
143
- print(f"Description extracted: {description}")
144
-
145
- image_link = None
146
- if cleaned_data['data'].get('artworks'):
147
- for artwork in cleaned_data['data']['artworks']:
148
- if artwork['type'] in [15, 3]:
149
- image_link = artwork.get('thumbnail')
150
- break
151
- print(f"Image link extracted: {image_link}")
152
-
153
- if media_type == 'movie':
154
- recent_list.add_entry(original_title, cleaned_data['data']['year'], description, image_link, 'film')
155
- genre_list.add_entry(genres, original_title, cleaned_data['data']['year'], description, image_link, 'movie')
156
- elif media_type == 'series':
157
- recent_list.add_entry(original_title, cleaned_data['data']['year'], description, image_link, 'series')
158
- genre_list.add_entry(genres, original_title, cleaned_data['data']['year'], description, image_link, 'series')
159
- print(f"adding.. {original_title}")
160
-
161
- # Create the full directory path if it doesn't exist
162
- full_dir_path = os.path.join(CACHE_DIR, media_type)
163
- os.makedirs(full_dir_path, exist_ok=True)
164
-
165
- # Now create the JSON cache path
166
- json_cache_path = os.path.join(full_dir_path, f"{urllib.parse.quote(original_title)}.json")
167
- await save_to_json(cleaned_data, json_cache_path)
168
- print(f"Data saved to JSON at: {json_cache_path}")
169
- else:
170
- print(f"No data found for {original_title} in search results.")
171
-
172
- except requests.RequestException as e:
173
- print(f"Error fetching data: {e}")
174
- except Exception as e:
175
- print(f"An unexpected error occurred: {e}")
176
-
177
- def main():
178
- entries = [
179
- {"original_title": "Funky Monkey (2004)", "title": "Funky Monkey", "media_type": "movie", "year": 2004},
180
- {"original_title": "My Spy (2020)", "title": "My Spy", "media_type": "movie", "year": 2020},
181
- {"original_title": "My Spy (2024)", "title": "My Spy", "media_type": "movie", "year": 2024},
182
- {"original_title": "Yaariayan (2014)", "title": "Yaariayan", "media_type": "movie", "year": 2014},
183
- {"original_title": "Yaariyan 2 (2023)", "title": "Yaariyan 2", "media_type": "movie", "year": 2023},
184
- ]
185
-
186
- asyncio.run(process_entries(entries))
187
-
188
- async def process_entries(entries):
189
- for entry in entries:
190
- await fetch_and_cache_json(entry['original_title'], entry['title'], entry['media_type'], entry['year'])
191
-
192
- if __name__ == "__main__":
193
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tvdbApiClient.py DELETED
@@ -1,95 +0,0 @@
1
- import json
2
- import os
3
- import logging
4
- from pathlib import Path
5
- import tvdb_v4_official
6
- from utils import save_to_json
7
-
8
- THETVDB_API_KEY = os.getenv("THETVDB_API_KEY")
9
- CACHE_DIR = os.getenv("CACHE_DIR")
10
- SAVE_DIR = os.path.join(CACHE_DIR, "metadata")
11
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
12
-
13
- # Initialize TVDB client
14
- tvdb = tvdb_v4_official.TVDB(THETVDB_API_KEY)
15
-
16
- def get_series_info(series_id):
17
- """Fetch series information including episodes from TVDB."""
18
- try:
19
- series = tvdb.get_series_extended(series_id, meta="episodes")
20
- logging.info("Series info fetched successfully.")
21
- return series
22
- except Exception as e:
23
- logging.error(f"Error fetching series info: {e}")
24
- return None
25
-
26
- def filter_episode_data(episode):
27
- """Filter episode data to include only necessary fields."""
28
- return {
29
- "id": episode.get("id"),
30
- "seriesId": episode.get("seriesId"),
31
- "name": episode.get("name"),
32
- "aired": episode.get("aired"),
33
- "runtime": episode.get("runtime"),
34
- "overview": episode.get("overview"),
35
- "image": episode.get("image"),
36
- "imageType": episode.get("imageType"),
37
- "isMovie": episode.get("isMovie"),
38
- "number": episode.get("number"),
39
- "absoluteNumber": episode.get("absoluteNumber"),
40
- "seasonNumber": episode.get("seasonNumber"),
41
- "finaleType": episode.get("finaleType"),
42
- "year": episode.get("year")
43
- }
44
-
45
-
46
-
47
- async def fetch_and_cache_seasons(series_id):
48
- """Fetch and cache episodes for a given series ID asynchronously."""
49
- series_info = get_series_info(series_id)
50
- if not series_info:
51
- logging.error("Series info could not be fetched.")
52
- return
53
-
54
- # Fetch all episodes for the series
55
- try:
56
- all_episodes = tvdb.get_series_episodes(series_id, lang="eng").get('episodes', [])
57
- except Exception as e:
58
- logging.error(f"Error fetching episodes for series ID {series_id}: {e}")
59
- return
60
-
61
- # Organize episodes by season
62
- all_seasons = {}
63
- for episode in all_episodes:
64
- season_number = episode.get('seasonNumber')
65
-
66
- if season_number == 0:
67
- season_key = "Specials"
68
- else:
69
- season_key = f"Season {season_number}"
70
-
71
- if season_key not in all_seasons:
72
- all_seasons[season_key] = []
73
-
74
- filtered_data = filter_episode_data(episode)
75
- all_seasons[season_key].append(filtered_data)
76
-
77
- # Create folder for the series
78
- series_folder = Path(SAVE_DIR) / str(series_id)
79
- series_folder.mkdir(parents=True, exist_ok=True)
80
-
81
- # Save episodes for each season in separate JSON files
82
- for season_key, episodes in sorted(all_seasons.items()):
83
- episodes_sorted = sorted(episodes, key=lambda e: e.get('number'))
84
- season_file = series_folder / f"{season_key}.json"
85
- await save_to_json(episodes_sorted, season_file)
86
-
87
- async def main(series_id):
88
- """Main function to fetch and cache episodes asynchronously."""
89
- await fetch_and_cache_seasons(series_id)
90
-
91
- if __name__ == "__main__":
92
- import asyncio
93
- # Replace with your series ID
94
- SERIES_ID = "315103"
95
- asyncio.run(main(SERIES_ID))