sam12345324 commited on
Commit
3301839
·
verified ·
1 Parent(s): ef46ad4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +189 -216
app.py CHANGED
@@ -1,222 +1,195 @@
1
- from fastapi import FastAPI, File, UploadFile, HTTPException
2
- from fastapi.responses import JSONResponse
 
3
  import requests
4
- import hashlib
5
- import uuid
6
- import json
7
- import asyncio
8
- from typing import Dict, Any, Optional, Tuple, List
9
- import mimetypes
10
-
11
- app = FastAPI(title="10-Image Upscaler API")
12
-
13
- # --- Configuration ---
14
- API_BASE_URL = "https://api.grid.plus"
15
- APP_ID = "808645"
16
- PLATFORM = "h5"
17
- APP_VERSION = "8.9.7"
18
- SIGNATURE_SALT = "Pg@photo_photogrid#20250225"
19
- SIGNATURE_PREFIX = "XX"
20
- COMMON_USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36"
21
- MAX_IMAGES = 10 # Maximum number of images allowed per request
22
-
23
- # --- Helper Functions ---
24
- def generate_uuid() -> str:
25
- return str(uuid.uuid4())
26
-
27
- def hash_md5(data: str) -> str:
28
- return hashlib.md5(data.encode('utf-8')).hexdigest()
29
-
30
- def hash_sha256(data: str) -> str:
31
- return hashlib.sha256(data.encode('utf-8')).hexdigest()
32
-
33
- async def fetch_current_ip(session: requests.Session) -> str:
34
- placeholder_ip = f"98.76.54.{generate_uuid()[:3].replace('-', '')}"
35
- return placeholder_ip
36
-
37
- async def generate_ghost_id_once(session: requests.Session, device_id: str) -> str:
38
- ip_address = await fetch_current_ip(session)
39
- ghost_id = hash_md5(device_id + ip_address)
40
- return ghost_id
41
-
42
- def build_headers(token: str, uid: str, device_id: str, mcc: str = "en-US") -> Dict[str, str]:
43
- return {
44
- "X-AppID": APP_ID,
45
- "X-Platform": PLATFORM,
46
- "X-Version": APP_VERSION,
47
- "X-SessionToken": token,
48
- "X-UniqueID": uid,
49
- "X-DeviceID": device_id,
50
- "X-MCC": mcc,
51
- "User-Agent": COMMON_USER_AGENT,
52
- }
53
-
54
- def _value_to_string_for_signature(value: Any) -> str:
55
- if isinstance(value, bool):
56
- return "true" if value else "false"
57
- if value is None:
58
- return "null"
59
- return str(value)
60
-
61
- async def create_signature(data_obj: Dict[str, Any], step_name: str) -> str:
62
- sorted_keys = sorted(data_obj.keys())
63
- concatenated_string = "".join(key + _value_to_string_for_signature(data_obj[key]) for key in sorted_keys)
64
- string_to_hash = concatenated_string + SIGNATURE_SALT
65
- signature = SIGNATURE_PREFIX + hash_sha256(string_to_hash)
66
- return signature
67
-
68
- async def http_request(
69
- session: requests.Session,
70
- endpoint: str,
71
- data: Optional[Dict[str, Any]],
72
- method: str,
73
- content_type_key: str,
74
- cookies: Dict[str, str],
75
- precomputed_ghost_id: str,
76
- step_name: str,
77
- is_s3_upload: bool = False
78
- ) -> Tuple[Optional[Dict[str, Any]], int, Optional[requests.Response]]:
79
- url = API_BASE_URL + endpoint if not is_s3_upload else endpoint
80
- if is_s3_upload:
81
- s3_headers = {"Content-Type": data.get("Content-Type", "application/octet-stream")}
82
- request_kwargs = {"method": "PUT", "url": url, "headers": s3_headers, "data": data.get('file_content'), "timeout": 60}
83
- else:
84
- default_headers = build_headers(
85
- token=cookies.get("t", ""),
86
- uid=cookies.get("u", ""),
87
- device_id=cookies.get("did", "")
88
- )
89
- ghost_id = precomputed_ghost_id
90
- x_headers_for_sig = {k: v for k, v in default_headers.items() if k.startswith("X-")}
91
- signable_payload_parts = {k: v for k, v in data.items() if k != 'file'} if content_type_key == "MULTIPART" else data or {}
92
- data_to_sign = {**x_headers_for_sig, "X-GhostID": ghost_id, **signable_payload_parts}
93
- signature = await create_signature(data_to_sign, step_name)
94
- final_headers = {**default_headers, "sig": signature, "X-GhostID": ghost_id}
95
- if content_type_key != "MULTIPART":
96
- content_type_map = {"JSON": "application/json", "FORM": "application/x-www-form-urlencoded"}
97
- final_headers["Content-Type"] = content_type_map.get(content_type_key, "application/json")
98
- request_kwargs = {"method": method.upper(), "url": url, "headers": final_headers, "cookies": cookies, "timeout": 30}
99
- if data:
100
- if content_type_key == "MULTIPART":
101
- request_kwargs["files"] = {'file': data.get('file')} if data.get('file') else None
102
- request_kwargs["data"] = {k: str(v) for k, v in data.items() if k != 'file'}
103
- elif content_type_key == "JSON":
104
- request_kwargs["json"] = data
105
- else:
106
- request_kwargs["data"] = data
107
- try:
108
- response = await asyncio.to_thread(session.request, **request_kwargs)
109
- if is_s3_upload:
110
- return ({"s3_status": "success"} if response.ok else {"s3_status": "failed", "raw_text": response.text}, response.status_code, response)
111
- try:
112
- return response.json(), response.status_code, response
113
- except json.JSONDecodeError:
114
- return {"raw_text": response.text, "code": 0 if response.ok else response.status_code, "_non_json_response": True}, response.status_code, response
115
- except requests.RequestException as e:
116
- status_code = e.response.status_code if e.response else 0
117
- response_text = e.response.text if e.response else ""
118
- return {"error_message": str(e), "raw_text": response_text, "code": -1, "_request_exception": True}, status_code, e.response
119
-
120
- async def upscale_image(image_bytes: bytes, file_extension: str, filename: str) -> Dict[str, Any]:
121
- async with asyncio.Semaphore(2): # Allow up to 2 concurrent requests to the external API
122
- with requests.Session() as session:
123
- device_id = generate_uuid()
124
- cookies = {"did": device_id, "t": "", "u": "", "_vid": generate_uuid()}
125
- flow_ghost_id = await generate_ghost_id_once(session, device_id)
126
- if file_extension == "jpg": file_extension = "jpeg"
127
- get_upload_url_payload = {'ext': file_extension, 'method': 'wn_superresolution'}
128
- s3_url_response_data, status, _ = await http_request(
129
- session, "/v1/ai/web/nologin/getuploadurl", get_upload_url_payload, "POST", "MULTIPART", cookies, flow_ghost_id, "GetUploadURL"
130
- )
131
- if not s3_url_response_data or s3_url_response_data.get("code") != 0:
132
- return {"filename": filename, "error": "Failed to get S3 upload URL", "status_code": status}
133
- s3_upload_url = s3_url_response_data.get("data", {}).get("upload_url")
134
- s3_img_url = s3_url_response_data.get("data", {}).get("img_url")
135
- if not s3_upload_url or not s3_img_url:
136
- return {"filename": filename, "error": "Missing S3 upload URL or image URL", "status_code": 500}
137
- content_type_for_s3 = mimetypes.guess_type(f"file.{file_extension}")[0] or 'application/octet-stream'
138
- s3_payload = {'file_content': image_bytes, 'Content-Type': content_type_for_s3}
139
- max_retries = 3
140
- for attempt in range(max_retries):
141
- s3_upload_response_data, s3_status, _ = await http_request(
142
- session, s3_upload_url, s3_payload, "PUT", "", cookies, flow_ghost_id, "S3Upload", is_s3_upload=True
143
- )
144
- if s3_status in [200, 201, 204]:
145
- break
146
- if attempt == max_retries - 1:
147
- return {"filename": filename, "error": "Failed to upload image to S3", "status_code": s3_status}
148
- await asyncio.sleep(2)
149
- trigger_upscale_payload = {'url': s3_img_url, 'method': 'wn_superresolution'}
150
- trigger_response_data, status, _ = await http_request(
151
- session, "/v1/ai/web/super_resolution/nologinupload", trigger_upscale_payload, "POST", "MULTIPART", cookies, flow_ghost_id, "TriggerUpscale"
152
- )
153
- if not trigger_response_data or trigger_response_data.get("code") != 0:
154
- return {"filename": filename, "error": "Failed to trigger upscale", "status_code": status}
155
- task_id = trigger_response_data.get("task_id") or trigger_response_data.get("data", {}).get("task_id")
156
- if not task_id:
157
- return {"filename": filename, "error": "Task ID not found", "status_code": 500}
158
- max_retries, poll_interval = 20, 7
159
- for i in range(max_retries):
160
- await asyncio.sleep(poll_interval)
161
- poll_payload = {"task_ids": [task_id]}
162
- poll_response_data, status, _ = await http_request(
163
- session, "/v1/ai/web/super_resolution/nologinbatchresult", poll_payload, "POST", "JSON", cookies, flow_ghost_id, "PollResult"
164
- )
165
- if not poll_response_data or poll_response_data.get("code") != 0:
166
- continue
167
- results_list = poll_response_data.get("data", [])
168
- if results_list and results_list[0].get("status") in [0, 2]:
169
- upscaled_url_list = results_list[0].get("result_image_url") or results_list[0].get("image_url")
170
- if isinstance(upscaled_url_list, list) and upscaled_url_list:
171
- return {"filename": filename, "upscaled_url": upscaled_url_list[0], "status_code": 200}
172
- return {"filename": filename, "error": "Upscaled URL list empty", "status_code": 500}
173
- elif results_list and results_list[0].get("status") != 1:
174
- error_msg = results_list[0].get('fail_msg', results_list[0].get('errmsg', 'Unknown error'))
175
- return {"filename": filename, "error": f"Task failed: {error_msg}", "status_code": 500}
176
- return {"filename": filename, "error": "Max retries reached for polling", "status_code": 500}
177
-
178
- @app.post("/upscale")
179
- async def upscale_single(file: UploadFile = File(...)):
180
  try:
181
- file_extension = file.filename.split('.')[-1].lower()
182
- if file_extension not in ['jpg', 'jpeg', 'png']:
183
- raise HTTPException(status_code=400, detail="Unsupported file format. Use JPG, JPEG, or PNG.")
184
- image_bytes = await file.read()
185
- result = await upscale_image(image_bytes, file_extension, file.filename)
186
- if "error" in result:
187
- raise HTTPException(status_code=result["status_code"], detail=result["error"])
188
- return JSONResponse(content={"filename": file.filename, "upscaled_url": result["upscaled_url"]})
189
- except Exception as e:
190
- raise HTTPException(status_code=500, detail=f"Error processing image: {str(e)}")
191
 
192
- @app.post("/upscale-multiple")
193
- async def upscale_multiple(files: List[UploadFile] = File(...)):
194
  try:
195
- if len(files) > MAX_IMAGES:
196
- raise HTTPException(status_code=400, detail=f"Maximum {MAX_IMAGES} images allowed per request.")
197
-
198
- results = []
199
- tasks = []
200
- for file in files:
201
- file_extension = file.filename.split('.')[-1].lower()
202
- if file_extension not in ['jpg', 'jpeg', 'png']:
203
- results.append({"filename": file.filename, "error": "Unsupported file format. Use JPG, JPEG, or PNG.", "status_code": 400})
204
- continue
205
- image_bytes = await file.read()
206
- tasks.append(upscale_image(image_bytes, file_extension, file.filename))
207
-
208
- if tasks:
209
- upscale_results = await asyncio.gather(*tasks, return_exceptions=True)
210
- for result in upscale_results:
211
- if isinstance(result, Exception):
212
- results.append({"filename": "unknown", "error": str(result), "status_code": 500})
213
- else:
214
- results.append(result)
215
-
216
- return JSONResponse(content={"results": results})
217
  except Exception as e:
218
- raise HTTPException(status_code=500, detail=f"Error processing images: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
 
220
- @app.get("/")
221
- async def root():
222
- return {"message": f"Visit /upscale for single image or /upscale-multiple for up to {MAX_IMAGES} images"}
 
1
+ import gradio as gr
2
+ import cv2
3
+ import os
4
  import requests
5
+ from tqdm import tqdm
6
+ import tempfile
7
+ import logging
8
+ import time
9
+ from PIL import Image
10
+ import io
11
+ import concurrent.futures
12
+ import subprocess
13
+ import shutil
14
+
15
+ # ===========================
16
+ # CONFIG
17
+ # ===========================
18
+ UPSCALE_APIS = [
19
+ "https://sam12345324-imagemultiupscaler.hf.space/upscale-multiple",
20
+ "https://HIRO12121212-up1.hf.space/upscale-multiple", # Replace with your API
21
+ "https://HIRO12121212-up2.hf.space/upscale-multiple", # Replace with your API
22
+ "https://HIRO12121212-up3.hf.space/upscale-multiple", # Replace with your API
23
+ "https://HIRO12121212-up4.hf.space/upscale-multiple", # Replace with your API
24
+ "https://HIRO12121212-up5.hf.space/upscale-multiple", # Replace with your API
25
+ "https://HIRO12121212-up6.hf.space/upscale-multiple", # Replace with your API
26
+ "https://HIRO12121212-up7.hf.space/upscale-multiple", # Replace with your API
27
+ ]
28
+ api_cycle = itertools.cycle(UPSCALE_APIS)
29
+ MAX_RETRIES = 3
30
+ BATCH_SIZE = 10 # Number of frames per API call
31
+
32
+ # Setup logging
33
+ logging.basicConfig(
34
+ level=logging.INFO,
35
+ format="%(asctime)s [%(levelname)s] %(message)s"
36
+ )
37
+
38
+ # ===========================
39
+ # Helper Functions
40
+ # ===========================
41
+ def validate_image(content):
42
+ """Check if the content is a valid image using PIL."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  try:
44
+ img = Image.open(io.BytesIO(content))
45
+ img.verify()
46
+ return True
47
+ except:
48
+ return False
 
 
 
 
 
49
 
50
+ def download_image(url):
51
+ """Download an image from URL and return content or None."""
52
  try:
53
+ img_resp = requests.get(url)
54
+ if img_resp.status_code == 200 and validate_image(img_resp.content):
55
+ return img_resp.content
56
+ else:
57
+ logging.warning(f"Invalid or failed download for {url}")
58
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  except Exception as e:
60
+ logging.warning(f"Download error for {url}: {e}")
61
+ return None
62
+
63
+ def upscale_batch(frame_paths):
64
+ """
65
+ Send a batch of frames to the multi-image upscaler API.
66
+ Returns a list of downloaded image bytes or None for failed frames.
67
+ """
68
+ for attempt in range(1, MAX_RETRIES + 1):
69
+ try:
70
+ files = []
71
+ for path in frame_paths:
72
+ # Use 'files' key for each image
73
+ files.append(("files", (os.path.basename(path), open(path, "rb"), "image/png")))
74
+
75
+ api = next(api_cycle)
76
+ r = requests.post(api, files=files)
77
+ if r.status_code == 200:
78
+ data = r.json()
79
+ urls = [item["upscaled_url"] for item in data.get("results", []) if item.get("status_code") == 200] # adjust based on API response
80
+ if not urls or len(urls) != len(frame_paths):
81
+ logging.warning(f"Batch attempt {attempt}: URL count mismatch")
82
+ continue
83
+
84
+ with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
85
+ results = list(executor.map(download_image, urls))
86
+ return results
87
+ else:
88
+ logging.warning(f"Batch attempt {attempt} failed: Status {r.status_code}")
89
+ except Exception as e:
90
+ logging.warning(f"Batch attempt {attempt} error: {e}")
91
+ time.sleep(1)
92
+ return [None]*len(frame_paths)
93
+
94
+ # ===========================
95
+ # Main Upscale Function
96
+ # ===========================
97
+ def upscale_video(video_file):
98
+ logging.info(f"Starting upscale for video: {video_file}")
99
+
100
+ # Temporary directories
101
+ temp_dir = tempfile.mkdtemp()
102
+ frames_dir = os.path.join(temp_dir, "frames")
103
+ upscaled_dir = os.path.join(temp_dir, "upscaled")
104
+ os.makedirs(frames_dir, exist_ok=True)
105
+ os.makedirs(upscaled_dir, exist_ok=True)
106
+
107
+ # ---------------------------
108
+ # STEP 1: Extract frames
109
+ # ---------------------------
110
+ logging.info("Extracting frames...")
111
+ cap = cv2.VideoCapture(video_file)
112
+ fps = cap.get(cv2.CAP_PROP_FPS)
113
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
114
+ logging.info(f"Video FPS: {fps}, Total frames: {frame_count}")
115
+
116
+ frame_paths = []
117
+ idx = 0
118
+ while True:
119
+ ret, frame = cap.read()
120
+ if not ret:
121
+ break
122
+ frame_path = os.path.join(frames_dir, f"frame_{idx:05d}.png")
123
+ cv2.imwrite(frame_path, frame)
124
+ frame_paths.append(frame_path)
125
+ idx += 1
126
+ cap.release()
127
+ logging.info(f"Finished extracting {len(frame_paths)} frames")
128
+
129
+ # ---------------------------
130
+ # STEP 2: Upscale frames in batches
131
+ # ---------------------------
132
+ logging.info("Upscaling frames using API in batches...")
133
+ upscaled_paths = []
134
+ batch_list = [frame_paths[i:i+BATCH_SIZE] for i in range(0, len(frame_paths), BATCH_SIZE)]
135
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(UPSCALE_APIS)) as executor:
136
+ batch_results_list = list(tqdm(executor.map(upscale_batch, batch_list), desc="Upscaling batches", total=len(batch_list)))
137
+ for batch_idx, batch_results in enumerate(batch_results_list):
138
+ for j, content in enumerate(batch_results):
139
+ frame_idx = batch_idx * BATCH_SIZE + j
140
+ if content:
141
+ out_path = os.path.join(upscaled_dir, f"upscaled_{frame_idx:05d}.png")
142
+ with open(out_path, "wb") as f:
143
+ f.write(content)
144
+ upscaled_paths.append(out_path)
145
+ logging.info(f"Saved upscaled frame {frame_idx}: {out_path}")
146
+ else:
147
+ # Use original frame if upscaling failed
148
+ original_path = os.path.join(frames_dir, f"frame_{frame_idx:05d}.png")
149
+ out_path = os.path.join(upscaled_dir, f"upscaled_{frame_idx:05d}.png")
150
+ shutil.copy(original_path, out_path)
151
+ upscaled_paths.append(out_path)
152
+ logging.warning(f"Used original frame {frame_idx} due to failed upscaling")
153
+
154
+ if not upscaled_paths:
155
+ raise ValueError("No frames were successfully upscaled. Cannot merge video.")
156
+
157
+ logging.info(f"Upscaled {len(upscaled_paths)} frames successfully")
158
+
159
+ # ---------------------------
160
+ # STEP 3: Merge frames back to video using FFmpeg
161
+ # ---------------------------
162
+ logging.info("Merging frames back to video using FFmpeg...")
163
+ output_path = os.path.join(temp_dir, "upscaled_video.mp4")
164
+ subprocess.run([
165
+ 'ffmpeg', '-framerate', str(fps), '-i', os.path.join(upscaled_dir, 'upscaled_%05d.png'),
166
+ '-c:v', 'libx264', '-pix_fmt', 'yuv420p', output_path
167
+ ], check=True)
168
+ logging.info(f"Upscaled video saved at: {output_path}")
169
+
170
+ # ---------------------------
171
+ # STEP 4: Cleanup temporary frames
172
+ # ---------------------------
173
+ logging.info("Cleaning temporary frames...")
174
+ for f in frame_paths + upscaled_paths:
175
+ try:
176
+ os.remove(f)
177
+ except Exception as e:
178
+ logging.warning(f"Could not remove {f}: {e}")
179
+
180
+ logging.info("Processing completed successfully")
181
+ return output_path
182
+
183
+ # ===========================
184
+ # Gradio UI
185
+ # ===========================
186
+ demo = gr.Interface(
187
+ fn=upscale_video,
188
+ inputs=gr.Video(label="Upload Video"),
189
+ outputs=gr.Video(label="Upscaled Video"),
190
+ title="🎥 Video Upscaler (Batch URL Support)",
191
+ description="Uploads a video and upscales it frame by frame using a multi-image URL-returning upscaler API (up to 10 images per batch)."
192
+ )
193
 
194
+ if __name__ == "__main__":
195
+ demo.launch()