Fred808 commited on
Commit
4ca1d24
·
verified ·
1 Parent(s): 85cfb12

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +338 -218
app.py CHANGED
@@ -1,233 +1,353 @@
1
  import os
2
- import re
3
  import json
4
  import time
5
- from typing import Dict, Any, List
6
- from urllib.parse import urlparse, parse_qs
7
-
8
- from fastapi import FastAPI, Request, HTTPException
9
- from fastapi.responses import JSONResponse
10
-
11
- try:
12
- from huggingface_hub import HfApi
13
- HF_AVAILABLE = True
14
- except Exception:
15
- HfApi = None
16
- HF_AVAILABLE = False
17
-
18
- # Directory to store compiled uploads
19
- BASE_DIR = os.path.dirname(__file__)
20
- UPLOAD_DIR = os.path.join(BASE_DIR, "uploads")
21
- os.makedirs(UPLOAD_DIR, exist_ok=True)
22
-
23
- app = FastAPI(title="Data Collection Server", description="Receives text/URLs from captioning/image servers, groups by course, compiles JSON and optionally uploads to HuggingFace.")
24
-
25
- # In-memory store for course data
26
- courses: Dict[str, Dict[str, Any]] = {}
27
-
28
- URL_RE = re.compile(r"https?://[\w\-\./?%&=:@,+~#]+")
29
- DONE_RE = re.compile(r"\b(done|finished|completed|complete)\b", re.IGNORECASE)
30
-
31
- HF_TOKEN = os.getenv("HF_TOKEN")
32
- HF_DATASET_REPO = os.getenv("HF_DATASET_REPO") # e.g. "username/dataset-name"
33
-
34
-
35
- def extract_urls(text: str) -> List[str]:
36
- return URL_RE.findall(text or "")
37
-
38
-
39
- def extract_course_from_url(url: str) -> str:
40
  try:
41
- parsed = urlparse(url)
42
- qs = parse_qs(parsed.query)
43
- course = qs.get("course") or qs.get("Course") or qs.get("COURSE")
44
- if course:
45
- return course[0]
46
- except Exception:
47
- pass
48
- return None
49
-
50
-
51
- def now_ts() -> str:
52
- return time.strftime("%Y%m%dT%H%M%S")
53
-
54
-
55
- async def parse_request(request: Request) -> Dict[str, Any]:
56
- """Read incoming request in any format and return a dict with keys: text, json, form, headers"""
57
- payload = {"text": "", "json": None, "form": {}, "headers": dict(request.headers)}
58
 
59
- # Try JSON
 
60
  try:
61
- body = await request.json()
62
- payload["json"] = body
63
- # if it's a simple string payload inside JSON
64
- if isinstance(body, str):
65
- payload["text"] = body
66
- elif isinstance(body, dict):
67
- # flatten likely fields
68
- for k in ["text", "caption", "message", "body", "content"]:
69
- if k in body and isinstance(body[k], str):
70
- payload["text"] = body[k]
71
- break
72
- # allow explicit course field
73
- if "course" in body and isinstance(body["course"], str):
74
- payload["course"] = body["course"]
75
- except Exception:
76
- # not JSON - try raw body
77
- try:
78
- raw = (await request.body()).decode("utf-8", errors="ignore")
79
- payload["text"] = raw
80
- except Exception:
81
- payload["text"] = ""
82
-
83
- # Try form (for multipart/form-data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  try:
85
- form = await request.form()
86
- for k, v in form.multi_items():
87
- # take first text-like value
88
- payload["form"][k] = str(v)
89
- if k in ("text", "caption", "message", "content") and not payload["text"]:
90
- payload["text"] = str(v)
91
- if k == "course":
92
- payload["course"] = str(v)
93
- except Exception:
94
- pass
95
-
96
- # If no text yet but JSON is a list or similar, stringify (best-effort)
97
- if not payload["text"] and payload.get("json") is not None:
98
- try:
99
- payload["text"] = json.dumps(payload["json"])
100
- except Exception:
101
- payload["text"] = str(payload["json"])
102
-
103
- return payload
104
-
105
-
106
- def add_entry(course: str, entry: Dict[str, Any]):
107
- c = courses.setdefault(course, {"items": [], "last_updated": None})
108
- c["items"].append(entry)
109
- c["last_updated"] = time.time()
110
-
111
-
112
- def compile_course(course: str) -> str:
113
- """Compile course data to JSON file and optionally upload to HuggingFace. Returns path to saved file."""
114
- if course not in courses:
115
- raise ValueError(f"Unknown course: {course}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
- data = {
118
- "course": course,
119
- "compiled_at": now_ts(),
120
- "count": len(courses[course]["items"]),
121
- "items": courses[course]["items"],
 
 
 
 
 
 
 
 
 
122
  }
123
-
124
- filename = f"{course}_{now_ts()}.json"
125
- safe_filename = re.sub(r"[^a-zA-Z0-9_\-\.]+", "_", filename)
126
- path = os.path.join(UPLOAD_DIR, safe_filename)
127
-
128
- with open(path, "w", encoding="utf-8") as f:
129
- json.dump(data, f, ensure_ascii=False, indent=2)
130
-
131
- # Optionally upload to HuggingFace
132
- if HF_TOKEN and HF_DATASET_REPO and HF_AVAILABLE:
133
  try:
134
- api = HfApi()
135
- # upload path at root of repo with same filename
136
- api.upload_file(
137
- path_or_fileobj=path,
138
- path_in_repo=safe_filename,
139
- repo_id=HF_DATASET_REPO,
140
- repo_type="dataset",
141
- token=HF_TOKEN,
142
- )
143
  except Exception as e:
144
- # Log but don't fail the compile
145
- print(f"[WARN] HuggingFace upload failed: {e}")
146
-
147
- # After compiling, clear stored items for that course
148
- courses[course]["items"] = []
149
- return path
150
-
151
-
152
- @app.post("/submit")
153
- async def submit(request: Request):
154
- """Receive any data (text, JSON, form). Will try to extract course and URLs and store entries.
155
- If message contains 'done' or similar, it will compile the course to JSON (and upload if configured).
156
- """
157
- payload = await parse_request(request)
158
- text = (payload.get("text") or "").strip()
159
-
160
- # Collect urls found
161
- urls = extract_urls(text)
162
-
163
- # Determine course from payload (explicit field) or from any URL
164
- course = payload.get("course")
165
- if not course:
166
- for u in urls:
167
- c = extract_course_from_url(u)
168
- if c:
169
- course = c
170
- break
171
-
172
- if not course:
173
- course = "unknown_course"
174
-
175
- entry = {
176
- "timestamp": now_ts(),
177
- "text": text,
178
- "json": payload.get("json"),
179
- "form": payload.get("form"),
180
- "urls": urls,
181
- "headers": {k: v for k, v in payload.get("headers", {}).items() if k.lower() in ("user-agent", "host", "content-type")},
182
- }
183
-
184
- add_entry(course, entry)
185
-
186
- # Detect completion
187
- if DONE_RE.search(text):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  try:
189
- path = compile_course(course)
190
- return JSONResponse({"status": "compiled", "course": course, "path": path})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
  except Exception as e:
192
- raise HTTPException(status_code=500, detail=str(e))
193
-
194
- # Detect explicit 'course change' in URLs (if a URL contains a different course than stored) -- best-effort
195
- # If a URL indicates a different course and there were previous items, compile previous course first
196
- # Example: previous stored course is same; we don't track per-source last course, so skip this more complex behavior for now
197
-
198
- return JSONResponse({"status": "stored", "course": course, "count": len(courses[course]["items"])})
199
-
200
-
201
- @app.get("/status")
202
- async def status():
203
- summary = {c: {"count": len(v["items"]), "last_updated": v["last_updated"]} for c, v in courses.items()}
204
- return {"courses": summary}
205
-
206
-
207
- @app.post("/compile")
208
- async def compile_endpoint(course: str = None):
209
- """Force compile a course. If course is not provided and only one exists, compile that one."""
210
- if not course:
211
- if len(courses) == 1:
212
- course = next(iter(courses.keys()))
213
- else:
214
- raise HTTPException(status_code=400, detail="Provide course query parameter when multiple courses exist.")
215
-
216
- try:
217
- path = compile_course(course)
218
- return {"status": "compiled", "course": course, "path": path}
219
- except Exception as e:
220
- raise HTTPException(status_code=500, detail=str(e))
221
-
222
-
223
- @app.get("/debug/{course}")
224
- async def debug_course(course: str):
225
- if course not in courses:
226
- raise HTTPException(status_code=404, detail="Course not found")
227
- return courses[course]
228
-
229
 
230
  if __name__ == "__main__":
231
- import uvicorn
232
- port = int(os.getenv("PORT", "8000"))
233
- uvicorn.run(app, host="0.0.0.0", port=port)
 
 
 
 
1
  import os
 
2
  import json
3
  import time
4
+ import asyncio
5
+ import aiohttp
6
+ from typing import Dict, List, Set
7
+ from urllib.parse import quote, urljoin
8
+ from datetime import datetime
9
+ from pathlib import Path
10
+
11
+ # Path for storing caption data
12
+ CAPTIONS_DIR = Path("captions_data")
13
+ CAPTIONS_DIR.mkdir(exist_ok=True)
14
+
15
+ def get_caption_file_path(course: str) -> Path:
16
+ """Get the path to the JSON file for storing course captions"""
17
+ safe_name = quote(course, safe='')
18
+ return CAPTIONS_DIR / f"{safe_name}_captions.json"
19
+
20
+ def save_captions_to_file(course: str, captions: List[Dict]) -> None:
21
+ """Save captions to a JSON file"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  try:
23
+ file_path = get_caption_file_path(course)
24
+ with open(file_path, 'w', encoding='utf-8') as f:
25
+ json.dump(captions, f, indent=2, ensure_ascii=False)
26
+ except Exception as e:
27
+ print(f"Error saving captions for {course}: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
+ def load_captions_from_file(course: str) -> List[Dict]:
30
+ """Load existing captions from JSON file"""
31
  try:
32
+ file_path = get_caption_file_path(course)
33
+ if file_path.exists():
34
+ with open(file_path, 'r', encoding='utf-8') as f:
35
+ return json.load(f)
36
+ except Exception as e:
37
+ print(f"Error loading captions for {course}: {e}")
38
+ return []
39
+
40
+ # Configuration
41
+ SOURCE_SERVER = "https://fred808-vs2.hf.space"
42
+ CAPTION_SERVERS = [
43
+ "https://fred808-pill-3-1.hf.space/analyze",
44
+ "https://fred808-pil-4-1.hf.space/analyze",
45
+ "https://fred808-pil-4-2.hf.space/analyze",
46
+ "https://fred808-pil-4-3.hf.space/analyze",
47
+ "https://fred1012-fred1012-gw0j2h.hf.space/analyze",
48
+ "https://fred1012-fred1012-wqs6c2.hf.space/analyze",
49
+ "https://fred1012-fred1012-oncray.hf.space/analyze",
50
+ "https://fred1012-fred1012-4goge7.hf.space/analyze",
51
+ "https://fred1012-fred1012-z0eh7m.hf.space/analyze",
52
+ "https://fred1012-fred1012-u95rte.hf.space/analyze",
53
+ "https://fred1012-fred1012-igje22.hf.space/analyze",
54
+ "https://fred1012-fred1012-ibkuf8.hf.space/analyze",
55
+ "https://fred1012-fred1012-nwqthy.hf.space/analyze",
56
+ "https://fred1012-fred1012-4ldqj4.hf.space/analyze",
57
+ "https://fred1012-fred1012-pivlzg.hf.space/analyze",
58
+ "https://fred1012-fred1012-ptlc5u.hf.space/analyze",
59
+ "https://fred1012-fred1012-u7lh57.hf.space/analyze",
60
+ "https://fred1012-fred1012-q8djv1.hf.space/analyze",
61
+ "https://fredalone-fredalone-ozugrp.hf.space/analyze",
62
+ "https://fredalone-fredalone-9brxj2.hf.space/analyze",
63
+ "https://fredalone-fredalone-p8vq9a.hf.space/analyze",
64
+ "https://fredalone-fredalone-vbli2y.hf.space/analyze",
65
+ "https://fredalone-fredalone-uggger.hf.space/analyze",
66
+ "https://fredalone-fredalone-nmi7e8.hf.space/analyze",
67
+ "https://fredalone-fredalone-d1f26d.hf.space/analyze",
68
+ "https://fredalone-fredalone-461jp2.hf.space/analyze",
69
+ "https://fredalone-fredalone-3enfg4.hf.space/analyze",
70
+ "https://fredalone-fredalone-dqdbpv.hf.space/analyze",
71
+ "https://fredalone-fredalone-ivtjua.hf.space/analyze",
72
+ "https://fredalone-fredalone-6bezt2.hf.space/analyze",
73
+ "https://fredalone-fredalone-e0wfnk.hf.space/analyze",
74
+ "https://fredalone-fredalone-zu2t7j.hf.space/analyze",
75
+ "https://fredalone-fredalone-dqtv1o.hf.space/analyze",
76
+ "https://fredalone-fredalone-wclyog.hf.space/analyze",
77
+ "https://fredalone-fredalone-t27vig.hf.space/analyze",
78
+ "https://fredalone-fredalone-gahbxh.hf.space/analyze",
79
+ "https://fredalone-fredalone-kw2po4.hf.space/analyze",
80
+ "https://fredalone-fredalone-8h285h.hf.space/analyze"
81
+ ]
82
+ MODEL_TYPE = "Florence-2-large" # Explicitly request large model
83
+ DATA_COLLECTION_SERVER = "https://fred808-flow.hf.space"
84
+
85
+ # Tracking state
86
+ processed_images: Dict[str, Set[str]] = {} # {course: set(image_names)}
87
+ course_captions: Dict[str, List[Dict]] = {} # {course: [{image, caption, metadata}]}
88
+
89
+ async def fetch_courses() -> List[str]:
90
+ """Fetch available courses from source server"""
91
+ async with aiohttp.ClientSession() as session:
92
+ async with session.get(f"{SOURCE_SERVER}/courses") as resp:
93
+ data = await resp.json()
94
+ if isinstance(data, dict) and 'courses' in data:
95
+ return [c['course_folder'] for c in data['courses'] if isinstance(c, dict)]
96
+ return []
97
+
98
+ async def fetch_course_images(course: str) -> List[Dict]:
99
+ """Fetch images list for a course"""
100
+ course_frames = f"{course}_frames" if not course.endswith("_frames") else course
101
+ url = f"{SOURCE_SERVER}/images/{quote(course_frames)}"
102
+ async with aiohttp.ClientSession() as session:
103
+ async with session.get(url) as resp:
104
+ data = await resp.json()
105
+ if isinstance(data, dict) and 'images' in data:
106
+ return data['images']
107
+ return []
108
+
109
+ async def get_caption(server: str, image_url: str) -> Dict:
110
+ """Get caption from a specific server"""
111
+ params = {
112
+ 'image_url': image_url,
113
+ 'model_choice': MODEL_TYPE
114
+ }
115
  try:
116
+ async with aiohttp.ClientSession() as session:
117
+ async with session.get(server, params=params, timeout=30) as resp:
118
+ return await resp.json()
119
+ except Exception as e:
120
+ print(f"Error from {server}: {e}")
121
+ return None
122
+
123
+ async def get_model_info():
124
+ """Get model information from caption servers"""
125
+ model_info = []
126
+ async with aiohttp.ClientSession() as session:
127
+ for server in CAPTION_SERVERS:
128
+ try:
129
+ health_url = server.rsplit('/analyze', 1)[0] + '/health'
130
+ async with session.get(health_url) as resp:
131
+ info = await resp.json()
132
+ model_info.append({
133
+ 'url': server,
134
+ 'model': info.get('model_choice', 'unknown')
135
+ })
136
+ except Exception as e:
137
+ print(f"Couldn't get model info from {server}: {e}")
138
+ return model_info
139
+
140
+ class CaptionServer:
141
+ def __init__(self, url):
142
+ self.url = url
143
+ self.busy = False
144
+ self.model = "unknown"
145
+ self.total_processed = 0
146
+ self.total_time = 0
147
+
148
+ @property
149
+ def fps(self):
150
+ return self.total_processed / self.total_time if self.total_time > 0 else 0
151
+
152
+ async def process_image(server: CaptionServer, course: str, image: Dict) -> Dict:
153
+ """Process single image through one caption server"""
154
+ if server.busy:
155
+ return None
156
+
157
+ server.busy = True
158
+ start_time = time.time()
159
+
160
+ try:
161
+ # Structure URL correctly: /images/COURSE_NAME_frames/IMAGE.png
162
+ course_frames = f"{course}_frames" if not course.endswith("_frames") else course
163
+ image_url = urljoin(SOURCE_SERVER, f"/images/{quote(course_frames)}/{quote(image['filename'])}")
164
+ result = await get_caption(server.url, image_url)
165
+
166
+ processing_time = time.time() - start_time
167
+ server.total_time += processing_time
168
+
169
+ if result and result.get('success') and result.get('caption'):
170
+ server.total_processed += 1
171
+ metadata = {
172
+ "image": image['filename'],
173
+ "caption": result['caption']
174
+ }
175
+ print(f"Server {server.url} processed {image['filename']} in {processing_time:.2f}s ({server.fps:.2f} fps)")
176
+ return metadata
177
+
178
+ except Exception as e:
179
+ print(f"Error processing {image['filename']} on {server.url}: {e}")
180
+
181
+ finally:
182
+ server.busy = False
183
+
184
+ return None
185
 
186
+ async def submit_to_dataset(course: str, metadata_list: List[Dict]):
187
+ """Submit course results to dataset collection server"""
188
+ # Group by parent folder
189
+ parent_folder = os.path.dirname(course) if '/' in course else course.split('_')[0]
190
+
191
+ # Prepare payload
192
+ payload = {
193
+ "text": f"Completed captions for course {course}. done",
194
+ "course": parent_folder,
195
+ "metadata": {
196
+ "course_name": course,
197
+ "image_count": len(metadata_list)
198
+ },
199
+ "captions": metadata_list
200
  }
201
+
202
+ async with aiohttp.ClientSession() as session:
 
 
 
 
 
 
 
 
203
  try:
204
+ async with session.post(
205
+ f"{DATA_COLLECTION_SERVER}/submit",
206
+ json=payload
207
+ ) as resp:
208
+ result = await resp.json()
209
+ print(f"Dataset submission result for {course}: {result}")
210
+ return result
 
 
211
  except Exception as e:
212
+ print(f"Error submitting to dataset: {e}")
213
+ return None
214
+
215
+ async def process_course(course: str, servers: List[CaptionServer]):
216
+ """Process all images in a course using available servers"""
217
+ if course not in processed_images:
218
+ processed_images[course] = set()
219
+ if course not in course_captions:
220
+ # Load any existing captions from file
221
+ course_captions[course] = load_captions_from_file(course)
222
+ # Update processed images set from loaded captions
223
+ processed_images[course].update(cap['image'] for cap in course_captions[course])
224
+
225
+ # Get list of images
226
+ images = await fetch_course_images(course)
227
+ if not images:
228
+ return
229
+
230
+ print(f"\nProcessing {len(images)} images for course {course}")
231
+ remaining_images = [img for img in images if img['filename'] not in processed_images[course]]
232
+
233
+ while remaining_images:
234
+ # Create tasks for each available server
235
+ tasks = []
236
+ for server in servers:
237
+ if not server.busy and remaining_images:
238
+ img = remaining_images[0]
239
+ remaining_images = remaining_images[1:]
240
+ tasks.append(process_image(server, course, img))
241
+
242
+ if not tasks:
243
+ await asyncio.sleep(0.1)
244
+ continue
245
+
246
+ # Process images in parallel across servers
247
+ results = await asyncio.gather(*tasks)
248
+
249
+ # Handle results
250
+ has_new_results = False
251
+ for result in results:
252
+ if result:
253
+ processed_images[course].add(result['image'])
254
+ course_captions[course].append(result)
255
+ has_new_results = True
256
+
257
+ # Save progress after each batch with new results
258
+ if has_new_results:
259
+ save_captions_to_file(course, course_captions[course])
260
+
261
+ # Show progress
262
+ total = len(images)
263
+ done = len(processed_images[course])
264
+ print(f"\rProgress: {done}/{total} images ({done/total*100:.1f}%)", end="")
265
+
266
+ if not remaining_images and len(processed_images[course]) == len(images):
267
+ print(f"\nCourse {course} complete, submitting to dataset...")
268
+ await submit_to_dataset(course, course_captions[course])
269
+ processed_images[course].clear()
270
+ course_captions[course].clear()
271
+ break
272
+
273
+ async def main():
274
+ # Initialize caption servers
275
+ servers = [CaptionServer(url) for url in CAPTION_SERVERS]
276
+
277
+ # Check for existing caption files and report
278
+ existing_captions = list(CAPTIONS_DIR.glob("*_captions.json"))
279
+ if existing_captions:
280
+ print("\nFound existing caption files:")
281
+ for cap_file in existing_captions:
282
+ course = cap_file.stem.replace("_captions", "")
283
+ try:
284
+ with open(cap_file, 'r', encoding='utf-8') as f:
285
+ captions = json.load(f)
286
+ print(f"- {course}: {len(captions)} captions")
287
+ except Exception as e:
288
+ print(f"- Error reading {cap_file.name}: {e}")
289
+ print()
290
+
291
+ # Get model information and verify Florence-2-large availability
292
+ model_info = await get_model_info()
293
+ print("\nCaption Servers:")
294
+ available_servers = []
295
+ for info, server in zip(model_info, servers):
296
+ server.model = info['model']
297
+ if MODEL_TYPE in info.get('model', ''):
298
+ available_servers.append(server)
299
+ print(f"✓ {server.url} confirmed {MODEL_TYPE}")
300
+ else:
301
+ print(f"✗ {server.url} using {server.model} - skipping (requires {MODEL_TYPE})")
302
+
303
+ if not available_servers:
304
+ print(f"\nError: No servers with {MODEL_TYPE} available!")
305
+ return
306
+
307
+ # Update servers list to only use those with large model
308
+ servers = available_servers
309
+ print(f"\nUsing {len(servers)} servers with {MODEL_TYPE}")
310
+ print()
311
+
312
+ start_time = time.time()
313
+
314
+ while True:
315
  try:
316
+ # Get available courses
317
+ courses = await fetch_courses()
318
+ if not courses:
319
+ print("No courses found, waiting...")
320
+ await asyncio.sleep(10)
321
+ continue
322
+
323
+ print(f"Found {len(courses)} courses")
324
+
325
+ # Process each course with all available servers
326
+ for course in courses:
327
+ await process_course(course, servers)
328
+
329
+ # Show server stats
330
+ print("\nServer Stats:")
331
+ total_processed = sum(s.total_processed for s in servers)
332
+ elapsed = time.time() - start_time
333
+ if elapsed > 0:
334
+ print(f"Total images processed: {total_processed}")
335
+ print(f"Overall speed: {total_processed/elapsed:.2f} fps")
336
+ for s in servers:
337
+ print(f"- {s.url}: {s.total_processed} images, {s.fps:.2f} fps")
338
+ print()
339
+
340
+ # Wait before next check
341
+ await asyncio.sleep(5)
342
+
343
  except Exception as e:
344
+ print(f"Error in main loop: {e}")
345
+ await asyncio.sleep(10)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346
 
347
  if __name__ == "__main__":
348
+ print("Starting caption coordinator...")
349
+ print(f"Source server: {SOURCE_SERVER}")
350
+ print(f"Caption servers: {CAPTION_SERVERS}")
351
+ print(f"Dataset server: {DATA_COLLECTION_SERVER}")
352
+
353
+ asyncio.run(main())