ChandimaPrabath commited on
Commit
7bb63b8
·
1 Parent(s): 6b7b0ee
Files changed (2) hide show
  1. app.py +2 -2
  2. tvdb.py +61 -45
app.py CHANGED
@@ -76,8 +76,8 @@ async def get_recent_items(limit: int = 5):
76
 
77
  # Return combined results
78
  return JSONResponse(content={
79
- 'recent_films': limited_films,
80
- 'recent_series': limited_series
81
  })
82
 
83
  @app.get("/api/get/film/metadata/{title}")
 
76
 
77
  # Return combined results
78
  return JSONResponse(content={
79
+ 'films': limited_films,
80
+ 'series': limited_series
81
  })
82
 
83
  @app.get("/api/get/film/metadata/{title}")
tvdb.py CHANGED
@@ -25,11 +25,13 @@ def authenticate_thetvdb():
25
  "apikey": THETVDB_API_KEY
26
  }
27
  try:
 
28
  response = requests.post(auth_url, json=auth_data)
29
  response.raise_for_status()
30
  response_data = response.json()
31
  THETVDB_TOKEN = response_data['data']['token']
32
  TOKEN_EXPIRY = datetime.now() + timedelta(days=30)
 
33
  except requests.RequestException as e:
34
  print(f"Authentication failed: {e}")
35
  THETVDB_TOKEN = None
@@ -42,26 +44,26 @@ def get_thetvdb_token():
42
  return THETVDB_TOKEN
43
 
44
  def clean_data(data):
 
45
  fields_to_keep = {
46
- "data": {
47
- 'id': None,
48
- 'name': None,
49
- 'image': None,
50
- 'score': None,
51
- 'runtime': None,
52
- 'releases': None,
53
- 'year': None,
54
- 'contentRatings': None,
55
- 'originalCountry': None,
56
- 'originalLanguage': None,
57
- 'translations': {},
58
- 'artworks': [],
59
- 'characters': [],
60
- 'spoken_languages': [],
61
- 'translations': {}
 
62
  }
63
- }
64
- """Clean up the data to retain only necessary fields."""
65
  cleaned_data = {}
66
 
67
  for key, value in fields_to_keep.items():
@@ -71,9 +73,11 @@ def clean_data(data):
71
  if field in data[key]:
72
  cleaned_data[key][field] = data[key][field]
73
 
 
74
  return cleaned_data
75
 
76
  async def fetch_and_cache_json(original_title, title, media_type, year=None):
 
77
  if year:
78
  search_url = f"{THETVDB_API_URL}/search?query={urllib.parse.quote(title)}&type={media_type}&year={year}"
79
  else:
@@ -81,7 +85,7 @@ async def fetch_and_cache_json(original_title, title, media_type, year=None):
81
 
82
  token = get_thetvdb_token()
83
  if not token:
84
- print("Authentication failed")
85
  return
86
 
87
  headers = {
@@ -90,22 +94,22 @@ async def fetch_and_cache_json(original_title, title, media_type, year=None):
90
  }
91
 
92
  try:
93
- # Fetch initial search results
94
  response = requests.get(search_url, headers=headers)
 
95
  response.raise_for_status()
96
  data = response.json()
97
 
98
  if 'data' in data and data['data']:
99
- # Extract the TVDB ID and type from the first result
100
  first_result = data['data'][0]
101
  tvdb_id = first_result.get('tvdb_id')
102
  media_type = first_result.get('type')
 
103
 
104
  if not tvdb_id:
105
  print("TVDB ID not found in the search results")
106
  return
107
 
108
- # Determine the correct extended URL based on media type
109
  if media_type == 'movie':
110
  extended_url = f"{THETVDB_API_URL}/movies/{tvdb_id}/extended?meta=translations"
111
  elif media_type == 'series':
@@ -114,50 +118,62 @@ async def fetch_and_cache_json(original_title, title, media_type, year=None):
114
  else:
115
  print(f"Unsupported media type: {media_type}")
116
  return
117
-
118
- # Request the extended information using the TVDB ID
119
  response = requests.get(extended_url, headers=headers)
 
120
  response.raise_for_status()
121
  extended_data = response.json()
 
122
  cleaned_data = clean_data(extended_data)
123
-
124
- # Extract the English description
125
  description = None
126
- if cleaned_data['data']['translations'] and cleaned_data['data']['translations']['overviewTranslations']:
127
  for overview in cleaned_data['data']['translations']['overviewTranslations']:
128
- if overview['language'] == 'eng' and overview['isPrimary']:
129
- description = overview['overview']
130
  break
131
-
132
- # Extract the artwork type 15 link
133
  image_link = None
134
- if cleaned_data['data']['artworks']:
135
  for artwork in cleaned_data['data']['artworks']:
136
- if artwork['type'] == 15:
137
- image_link = artwork['image']
138
  break
139
-
140
- # Add the entry to the RecentList with description and image_link
141
  if media_type == 'movie':
142
  recent_list.add_entry(original_title, cleaned_data['data']['year'], description, image_link, 'film')
143
  elif media_type == 'series':
144
  recent_list.add_entry(original_title, cleaned_data['data']['year'], description, image_link, 'series')
145
-
146
- # Cache the extended JSON response
147
  json_cache_path = os.path.join(CACHE_DIR, f"{urllib.parse.quote(original_title)}.json")
148
  await save_to_json(cleaned_data, json_cache_path)
149
-
 
 
 
150
  except requests.RequestException as e:
151
  print(f"Error fetching data: {e}")
 
 
152
 
153
  def main():
154
- # Replace with your series ID and other parameters
155
- original_title = "The Listner (2009)"
156
- title = "The Listner"
157
- media_type = "series" # or "movie"
158
- year = 2009
 
 
 
 
159
 
160
- asyncio.run(fetch_and_cache_json(original_title, title, media_type, year))
 
 
161
 
162
  if __name__ == "__main__":
163
  main()
 
25
  "apikey": THETVDB_API_KEY
26
  }
27
  try:
28
+ print("Authenticating with TheTVDB API...")
29
  response = requests.post(auth_url, json=auth_data)
30
  response.raise_for_status()
31
  response_data = response.json()
32
  THETVDB_TOKEN = response_data['data']['token']
33
  TOKEN_EXPIRY = datetime.now() + timedelta(days=30)
34
+ print("Authentication successful.")
35
  except requests.RequestException as e:
36
  print(f"Authentication failed: {e}")
37
  THETVDB_TOKEN = None
 
44
  return THETVDB_TOKEN
45
 
46
  def clean_data(data):
47
+ print("Cleaning data...")
48
  fields_to_keep = {
49
+ "data": {
50
+ 'id': None,
51
+ 'name': None,
52
+ 'image': None,
53
+ 'score': None,
54
+ 'runtime': None,
55
+ 'releases': None,
56
+ 'year': None,
57
+ 'contentRatings': None,
58
+ 'originalCountry': None,
59
+ 'originalLanguage': None,
60
+ 'translations': {},
61
+ 'artworks': [],
62
+ 'characters': [],
63
+ 'spoken_languages': [],
64
+ 'translations': {}
65
+ }
66
  }
 
 
67
  cleaned_data = {}
68
 
69
  for key, value in fields_to_keep.items():
 
73
  if field in data[key]:
74
  cleaned_data[key][field] = data[key][field]
75
 
76
+ print("Data cleaned successfully.")
77
  return cleaned_data
78
 
79
  async def fetch_and_cache_json(original_title, title, media_type, year=None):
80
+ print(f"Fetching data for: {original_title}")
81
  if year:
82
  search_url = f"{THETVDB_API_URL}/search?query={urllib.parse.quote(title)}&type={media_type}&year={year}"
83
  else:
 
85
 
86
  token = get_thetvdb_token()
87
  if not token:
88
+ print("Authentication token not available.")
89
  return
90
 
91
  headers = {
 
94
  }
95
 
96
  try:
97
+ print(f"Sending search request to: {search_url}")
98
  response = requests.get(search_url, headers=headers)
99
+ print(f"Search response status code: {response.status_code}")
100
  response.raise_for_status()
101
  data = response.json()
102
 
103
  if 'data' in data and data['data']:
 
104
  first_result = data['data'][0]
105
  tvdb_id = first_result.get('tvdb_id')
106
  media_type = first_result.get('type')
107
+ print(f"Found TVDB ID: {tvdb_id} with media type: {media_type}")
108
 
109
  if not tvdb_id:
110
  print("TVDB ID not found in the search results")
111
  return
112
 
 
113
  if media_type == 'movie':
114
  extended_url = f"{THETVDB_API_URL}/movies/{tvdb_id}/extended?meta=translations"
115
  elif media_type == 'series':
 
118
  else:
119
  print(f"Unsupported media type: {media_type}")
120
  return
121
+
 
122
  response = requests.get(extended_url, headers=headers)
123
+ print(f"Extended data response status code: {response.status_code}")
124
  response.raise_for_status()
125
  extended_data = response.json()
126
+
127
  cleaned_data = clean_data(extended_data)
128
+ print(f"cleaning.. {original_title}")
129
+
130
  description = None
131
+ if cleaned_data['data'].get('translations') and cleaned_data['data']['translations'].get('overviewTranslations'):
132
  for overview in cleaned_data['data']['translations']['overviewTranslations']:
133
+ if overview['language'] == 'eng' and overview.get('isPrimary'):
134
+ description = overview.get('overview')
135
  break
136
+ print(f"Description extracted: {description}")
137
+
138
  image_link = None
139
+ if cleaned_data['data'].get('artworks'):
140
  for artwork in cleaned_data['data']['artworks']:
141
+ if artwork['type'] in [15, 3]:
142
+ image_link = artwork.get('image')
143
  break
144
+ print(f"Image link extracted: {image_link}")
145
+
146
  if media_type == 'movie':
147
  recent_list.add_entry(original_title, cleaned_data['data']['year'], description, image_link, 'film')
148
  elif media_type == 'series':
149
  recent_list.add_entry(original_title, cleaned_data['data']['year'], description, image_link, 'series')
150
+ print(f"adding.. {original_title}")
151
+
152
  json_cache_path = os.path.join(CACHE_DIR, f"{urllib.parse.quote(original_title)}.json")
153
  await save_to_json(cleaned_data, json_cache_path)
154
+ print(f"Data saved to JSON at: {json_cache_path}")
155
+ else:
156
+ print(f"No data found for {original_title} in search results.")
157
+
158
  except requests.RequestException as e:
159
  print(f"Error fetching data: {e}")
160
+ except Exception as e:
161
+ print(f"An unexpected error occurred: {e}")
162
 
163
  def main():
164
+ entries = [
165
+ {"original_title": "Funky Monkey (2004)", "title": "Funky Monkey", "media_type": "movie", "year": 2004},
166
+ {"original_title": "My Spy (2020)", "title": "My Spy", "media_type": "movie", "year": 2020},
167
+ {"original_title": "My Spy (2024)", "title": "My Spy", "media_type": "movie", "year": 2024},
168
+ {"original_title": "Yaariayan (2014)", "title": "Yaariayan", "media_type": "movie", "year": 2014},
169
+ {"original_title": "Yaariyan 2 (2023)", "title": "Yaariyan 2", "media_type": "movie", "year": 2023},
170
+ ]
171
+
172
+ asyncio.run(process_entries(entries))
173
 
174
+ async def process_entries(entries):
175
+ for entry in entries:
176
+ await fetch_and_cache_json(entry['original_title'], entry['title'], entry['media_type'], entry['year'])
177
 
178
  if __name__ == "__main__":
179
  main()