danicor commited on
Commit
bf29f37
·
verified ·
1 Parent(s): ee406c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +373 -160
app.py CHANGED
@@ -1,207 +1,420 @@
 
1
  import torch
2
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
- import time, hashlib, threading, logging
 
 
4
  from datetime import datetime, timedelta
5
- from typing import Tuple
6
- from fastapi import FastAPI, HTTPException
7
- from fastapi.middleware.cors import CORSMiddleware
8
- from pydantic import BaseModel
9
- from contextlib import asynccontextmanager
10
 
11
- # ---- Logging ----
12
  logging.basicConfig(level=logging.INFO)
13
- logger = logging.getLogger("translator")
14
 
15
- # ---- Cache ----
16
  class TranslationCache:
17
  def __init__(self, cache_duration_minutes: int = 60):
18
  self.cache = {}
19
  self.cache_duration = timedelta(minutes=cache_duration_minutes)
20
  self.lock = threading.Lock()
21
-
22
- def _key(self, text, source, target):
23
- return hashlib.md5(f"{text}_{source}_{target}".encode()).hexdigest()
24
-
25
- def get(self, text, source, target):
 
 
 
26
  with self.lock:
27
- key = self._key(text, source, target)
28
  if key in self.cache:
29
- translation, ts = self.cache[key]
30
- if datetime.now() - ts < self.cache_duration:
 
31
  return translation
32
  else:
 
33
  del self.cache[key]
34
  return None
35
-
36
- def set(self, text, source, target, translation):
 
37
  with self.lock:
38
- key = self._key(text, source, target)
39
  self.cache[key] = (translation, datetime.now())
 
40
 
41
- def clear(self):
42
- with self.lock:
43
- self.cache = {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
- # ---- Translator ----
46
  class MultilingualTranslator:
47
- def __init__(self, cache_minutes=60):
48
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
49
  logger.info(f"Using device: {self.device}")
50
- self.cache = TranslationCache(cache_minutes)
51
- self.model_name = "facebook/m2m100_418M" # سبک‌تر از نسخه 1.2B
 
 
 
 
 
52
  logger.info(f"Loading model: {self.model_name}")
53
- self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
54
- self.model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name).to(self.device)
55
- logger.info("Model loaded successfully!")
56
- # stats
57
- self.stats = {
58
- "requests": 0,
59
- "chars_translated": 0,
60
- "avg_time": 0.0
61
- }
62
-
63
- def translate_text(self, text, src, tgt) -> Tuple[str, float]:
64
- start = time.time()
65
- cached = self.cache.get(text, src, tgt)
66
- if cached:
67
- return cached, time.time() - start
68
  try:
69
- self.tokenizer.src_lang = src
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  encoded = self.tokenizer(text, return_tensors="pt").to(self.device)
71
- tokens = self.model.generate(
 
 
72
  **encoded,
73
- forced_bos_token_id=self.tokenizer.get_lang_id(tgt),
74
  max_length=512,
75
  num_beams=4,
76
  early_stopping=True
77
  )
78
- result = self.tokenizer.batch_decode(tokens, skip_special_tokens=True)[0]
79
- self.cache.set(text, src, tgt, result)
80
- processing = time.time() - start
81
- # update stats
82
- self.stats["requests"] += 1
83
- self.stats["chars_translated"] += len(text)
84
- self.stats["avg_time"] = (
85
- (self.stats["avg_time"] * (self.stats["requests"] - 1) + processing)
86
- / self.stats["requests"]
87
- )
88
- return result, processing
 
89
  except Exception as e:
90
  logger.error(f"Translation error: {e}")
91
- return f"Translation error: {str(e)}", time.time() - start
92
 
93
- # ---- Languages ----
94
  LANGUAGE_MAP = {
95
- "English": "en", "Persian (Farsi)": "fa", "Arabic": "ar",
96
- "French": "fr", "German": "de", "Spanish": "es", "Italian": "it",
97
- "Portuguese": "pt", "Russian": "ru", "Chinese (Simplified)": "zh",
98
- "Japanese": "ja", "Korean": "ko", "Hindi": "hi", "Turkish": "tr",
99
- "Dutch": "nl", "Polish": "pl", "Swedish": "sv", "Norwegian": "no",
100
- "Danish": "da", "Finnish": "fi", "Greek": "el", "Hebrew": "he",
101
- "Thai": "th", "Vietnamese": "vi", "Indonesian": "id", "Malay": "ms",
102
- "Czech": "cs", "Slovak": "sk", "Hungarian": "hu", "Romanian": "ro",
103
- "Bulgarian": "bg", "Croatian": "hr", "Serbian": "sr", "Slovenian": "sl",
104
- "Lithuanian": "lt", "Latvian": "lv", "Estonian": "et", "Ukrainian": "uk",
105
- "Belarusian": "be", "Kazakh": "kk", "Uzbek": "uz", "Georgian": "ka",
106
- "Armenian": "hy", "Azerbaijani": "az", "Bengali": "bn", "Urdu": "ur",
107
- "Tamil": "ta", "Telugu": "te", "Malayalam": "ml", "Kannada": "kn",
108
- "Gujarati": "gu", "Punjabi": "pa", "Marathi": "mr", "Nepali": "ne",
109
- "Sinhala": "si", "Burmese": "my", "Khmer": "km", "Lao": "lo",
110
- "Mongolian": "mn", "Afrikaans": "af", "Amharic": "am",
111
- "Yoruba": "yo", "Igbo": "ig", "Hausa": "ha", "Swahili": "sw",
112
- "Xhosa": "xh", "Zulu": "zu"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  }
114
 
115
- # ---- FastAPI ----
116
- translator: MultilingualTranslator | None = None # lazy load
117
-
118
- @asynccontextmanager
119
- async def lifespan(app: FastAPI):
120
- logger.info("🚀 Translator API started. Model will be loaded on first request.")
121
- yield
122
- logger.info("🛑 Translator API shutting down.")
123
-
124
- app = FastAPI(title="Translator API", lifespan=lifespan)
125
-
126
- app.add_middleware(
127
- CORSMiddleware,
128
- allow_origins=["*"], allow_credentials=True,
129
- allow_methods=["*"], allow_headers=["*"]
130
- )
131
 
132
- # ---- Models ----
133
- class TranslateRequest(BaseModel):
134
- text: str
135
- source_lang: str
136
- target_lang: str
137
- api_key: str | None = None
138
-
139
- class ConfigRequest(BaseModel):
140
- cache_minutes: int
141
-
142
- # ---- Endpoints ----
143
- @app.post("/api/translate")
144
- async def translate(req: TranslateRequest):
145
  global translator
146
- if not req.text.strip():
147
- raise HTTPException(status_code=400, detail="No text provided")
148
- src = LANGUAGE_MAP.get(req.source_lang)
149
- tgt = LANGUAGE_MAP.get(req.target_lang)
150
- if not src or not tgt:
151
- raise HTTPException(status_code=400, detail="Invalid language codes")
152
 
 
 
 
 
 
 
 
 
 
 
153
  if translator is None:
154
- logger.info("⏳ Loading translation model...")
155
- start = time.time()
156
- translator = MultilingualTranslator()
157
- logger.info(f" Model loaded in {time.time()-start:.1f}s")
158
-
159
- translation, secs = translator.translate_text(req.text, src, tgt)
160
- return {
161
- "translation": translation,
162
- "source_language": req.source_lang,
163
- "target_language": req.target_lang,
164
- "processing_time": round(secs, 2),
165
- "character_count": len(req.text),
166
- "status": "success"
167
- }
168
-
169
- @app.get("/api/languages")
170
- async def languages():
171
- return {"languages": list(LANGUAGE_MAP.keys()), "status": "success"}
 
 
172
 
173
- @app.get("/api/health")
174
- async def health():
175
- return {
176
- "status": "ok",
177
- "model_loaded": translator is not None,
178
- "device": str(translator.device) if translator else None,
179
- "model": translator.model_name if translator else None
180
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
 
182
- @app.get("/api/stats")
183
- async def stats():
184
- if translator is None:
185
- return {"stats": {}, "status": "model_not_loaded"}
186
- return {"stats": translator.stats, "status": "success"}
187
 
188
- @app.post("/api/reset-cache")
189
- async def reset_cache():
190
- if translator is None:
191
- return {"message": "Model not loaded yet", "status": "warning"}
192
- translator.cache.clear()
193
- return {"message": "Cache cleared", "status": "success"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
- @app.post("/api/config")
196
- async def update_config(cfg: ConfigRequest):
197
- global translator
198
- if translator is None:
199
- translator = MultilingualTranslator(cfg.cache_minutes)
200
- return {"message": f"Model initialized with cache {cfg.cache_minutes} minutes", "status": "success"}
201
- else:
202
- translator.cache = TranslationCache(cfg.cache_minutes)
203
- return {"message": f"Cache duration updated to {cfg.cache_minutes} minutes", "status": "success"}
204
 
205
  if __name__ == "__main__":
206
- import uvicorn
207
- uvicorn.run("app:app", host="0.0.0.0", port=8000, reload=False)
 
 
 
 
 
 
1
+ import gradio as gr
2
  import torch
3
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
4
+ import time
5
+ import json
6
+ import hashlib
7
  from datetime import datetime, timedelta
8
+ import threading
9
+ from queue import Queue
10
+ import logging
11
+ from typing import Dict, List, Tuple
12
+ import requests
13
 
14
+ # Set up logging
15
  logging.basicConfig(level=logging.INFO)
16
+ logger = logging.getLogger(__name__)
17
 
 
18
  class TranslationCache:
19
  def __init__(self, cache_duration_minutes: int = 60):
20
  self.cache = {}
21
  self.cache_duration = timedelta(minutes=cache_duration_minutes)
22
  self.lock = threading.Lock()
23
+
24
+ def _generate_key(self, text: str, source_lang: str, target_lang: str) -> str:
25
+ """Generate cache key from text and languages"""
26
+ content = f"{text}_{source_lang}_{target_lang}"
27
+ return hashlib.md5(content.encode()).hexdigest()
28
+
29
+ def get(self, text: str, source_lang: str, target_lang: str) -> str:
30
+ """Get translation from cache if exists and not expired"""
31
  with self.lock:
32
+ key = self._generate_key(text, source_lang, target_lang)
33
  if key in self.cache:
34
+ translation, timestamp = self.cache[key]
35
+ if datetime.now() - timestamp < self.cache_duration:
36
+ logger.info(f"Cache hit for key: {key[:8]}...")
37
  return translation
38
  else:
39
+ # Remove expired entry
40
  del self.cache[key]
41
  return None
42
+
43
+ def set(self, text: str, source_lang: str, target_lang: str, translation: str):
44
+ """Store translation in cache"""
45
  with self.lock:
46
+ key = self._generate_key(text, source_lang, target_lang)
47
  self.cache[key] = (translation, datetime.now())
48
+ logger.info(f"Cached translation for key: {key[:8]}...")
49
 
50
+ class TranslationQueue:
51
+ def __init__(self, max_workers: int = 3):
52
+ self.queue = Queue()
53
+ self.max_workers = max_workers
54
+ self.current_workers = 0
55
+ self.lock = threading.Lock()
56
+
57
+ def add_task(self, task_func, *args, **kwargs):
58
+ """Add translation task to queue"""
59
+ self.queue.put((task_func, args, kwargs))
60
+
61
+ def process_queue(self):
62
+ """Process tasks from queue"""
63
+ while not self.queue.empty():
64
+ with self.lock:
65
+ if self.current_workers >= self.max_workers:
66
+ time.sleep(0.1)
67
+ continue
68
+
69
+ if not self.queue.empty():
70
+ task_func, args, kwargs = self.queue.get()
71
+ self.current_workers += 1
72
+
73
+ def worker():
74
+ try:
75
+ result = task_func(*args, **kwargs)
76
+ return result
77
+ finally:
78
+ with self.lock:
79
+ self.current_workers -= 1
80
+
81
+ thread = threading.Thread(target=worker)
82
+ thread.start()
83
 
 
84
  class MultilingualTranslator:
85
+ def __init__(self, cache_duration_minutes: int = 60):
86
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
87
  logger.info(f"Using device: {self.device}")
88
+
89
+ # Initialize cache and queue
90
+ self.cache = TranslationCache(cache_duration_minutes)
91
+ self.queue = TranslationQueue()
92
+
93
+ # Load model - using a powerful multilingual model
94
+ self.model_name = "facebook/m2m100_1.2B"
95
  logger.info(f"Loading model: {self.model_name}")
96
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  try:
98
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
99
+ self.model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name)
100
+ self.model.to(self.device)
101
+
102
+ # Create pipeline
103
+ self.translator = pipeline(
104
+ "translation",
105
+ model=self.model,
106
+ tokenizer=self.tokenizer,
107
+ device=0 if self.device.type == "cuda" else -1
108
+ )
109
+ logger.info("Model loaded successfully!")
110
+ except Exception as e:
111
+ logger.error(f"Error loading model: {e}")
112
+ raise
113
+
114
+ def translate_text(self, text: str, source_lang: str, target_lang: str) -> Tuple[str, float]:
115
+ """Translate text from source to target language"""
116
+ start_time = time.time()
117
+
118
+ # Check cache first
119
+ cached_result = self.cache.get(text, source_lang, target_lang)
120
+ if cached_result:
121
+ return cached_result, time.time() - start_time
122
+
123
+ try:
124
+ # Set source language for tokenizer
125
+ self.tokenizer.src_lang = source_lang
126
+
127
+ # Encode input
128
  encoded = self.tokenizer(text, return_tensors="pt").to(self.device)
129
+
130
+ # Generate translation
131
+ generated_tokens = self.model.generate(
132
  **encoded,
133
+ forced_bos_token_id=self.tokenizer.get_lang_id(target_lang),
134
  max_length=512,
135
  num_beams=4,
136
  early_stopping=True
137
  )
138
+
139
+ # Decode result
140
+ translation = self.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
141
+
142
+ # Cache the result
143
+ self.cache.set(text, source_lang, target_lang, translation)
144
+
145
+ processing_time = time.time() - start_time
146
+ logger.info(f"Translation completed in {processing_time:.2f} seconds")
147
+
148
+ return translation, processing_time
149
+
150
  except Exception as e:
151
  logger.error(f"Translation error: {e}")
152
+ return f"Translation error: {str(e)}", time.time() - start_time
153
 
154
+ # Language mappings for M2M100 model
155
  LANGUAGE_MAP = {
156
+ "English": "en",
157
+ "Persian (Farsi)": "fa",
158
+ "Arabic": "ar",
159
+ "French": "fr",
160
+ "German": "de",
161
+ "Spanish": "es",
162
+ "Italian": "it",
163
+ "Portuguese": "pt",
164
+ "Russian": "ru",
165
+ "Chinese (Simplified)": "zh",
166
+ "Japanese": "ja",
167
+ "Korean": "ko",
168
+ "Hindi": "hi",
169
+ "Turkish": "tr",
170
+ "Dutch": "nl",
171
+ "Polish": "pl",
172
+ "Swedish": "sv",
173
+ "Norwegian": "no",
174
+ "Danish": "da",
175
+ "Finnish": "fi",
176
+ "Greek": "el",
177
+ "Hebrew": "he",
178
+ "Thai": "th",
179
+ "Vietnamese": "vi",
180
+ "Indonesian": "id",
181
+ "Malay": "ms",
182
+ "Czech": "cs",
183
+ "Slovak": "sk",
184
+ "Hungarian": "hu",
185
+ "Romanian": "ro",
186
+ "Bulgarian": "bg",
187
+ "Croatian": "hr",
188
+ "Serbian": "sr",
189
+ "Slovenian": "sl",
190
+ "Lithuanian": "lt",
191
+ "Latvian": "lv",
192
+ "Estonian": "et",
193
+ "Ukrainian": "uk",
194
+ "Belarusian": "be",
195
+ "Kazakh": "kk",
196
+ "Uzbek": "uz",
197
+ "Georgian": "ka",
198
+ "Armenian": "hy",
199
+ "Azerbaijani": "az",
200
+ "Bengali": "bn",
201
+ "Urdu": "ur",
202
+ "Tamil": "ta",
203
+ "Telugu": "te",
204
+ "Malayalam": "ml",
205
+ "Kannada": "kn",
206
+ "Gujarati": "gu",
207
+ "Punjabi": "pa",
208
+ "Marathi": "mr",
209
+ "Nepali": "ne",
210
+ "Sinhala": "si",
211
+ "Burmese": "my",
212
+ "Khmer": "km",
213
+ "Lao": "lo",
214
+ "Mongolian": "mn",
215
+ "Afrikaans": "af",
216
+ "Amharic": "am",
217
+ "Yoruba": "yo",
218
+ "Igbo": "ig",
219
+ "Hausa": "ha",
220
+ "Swahili": "sw",
221
+ "Xhosa": "xh",
222
+ "Zulu": "zu"
223
  }
224
 
225
+ # Initialize translator with configurable cache duration
226
+ translator = None
227
+ cache_duration = 60 # Default 60 minutes
 
 
 
 
 
 
 
 
 
 
 
 
 
228
 
229
+ def initialize_translator(cache_minutes):
 
 
 
 
 
 
 
 
 
 
 
 
230
  global translator
231
+ translator = MultilingualTranslator(cache_minutes)
 
 
 
 
 
232
 
233
+ def translate_interface(text, source_lang, target_lang, cache_minutes_input):
234
+ """Main translation interface function"""
235
+ global translator, cache_duration
236
+
237
+ # Update cache duration if changed
238
+ if cache_minutes_input != cache_duration:
239
+ cache_duration = cache_minutes_input
240
+ translator = MultilingualTranslator(cache_minutes_input)
241
+
242
+ # Initialize translator if needed
243
  if translator is None:
244
+ translator = MultilingualTranslator(cache_minutes_input)
245
+
246
+ if not text.strip():
247
+ return "Please enter text to translate", "0.00"
248
+
249
+ if source_lang == target_lang:
250
+ return "Source and target languages cannot be the same", "0.00"
251
+
252
+ # Get language codes
253
+ source_code = LANGUAGE_MAP.get(source_lang)
254
+ target_code = LANGUAGE_MAP.get(target_lang)
255
+
256
+ if not source_code or not target_code:
257
+ return "Invalid language selection", "0.00"
258
+
259
+ try:
260
+ translation, processing_time = translator.translate_text(text, source_code, target_code)
261
+ return translation, f"{processing_time:.2f}"
262
+ except Exception as e:
263
+ return f"Error: {str(e)}", "0.00"
264
 
265
+ # API endpoint for WordPress plugin
266
+ def api_translate(text: str, source_lang: str, target_lang: str, api_key: str = None):
267
+ """API endpoint for external calls"""
268
+ # Here you can add API key validation if needed
269
+ if not text.strip():
270
+ return {"error": "No text provided", "status": "error"}
271
+
272
+ source_code = LANGUAGE_MAP.get(source_lang)
273
+ target_code = LANGUAGE_MAP.get(target_lang)
274
+
275
+ if not source_code or not target_code:
276
+ return {"error": "Invalid language codes", "status": "error"}
277
+
278
+ try:
279
+ if translator is None:
280
+ initialize_translator(60)
281
+
282
+ translation, processing_time = translator.translate_text(text, source_code, target_code)
283
+
284
+ return {
285
+ "translation": translation,
286
+ "source_language": source_lang,
287
+ "target_language": target_lang,
288
+ "processing_time": processing_time,
289
+ "character_count": len(text),
290
+ "status": "success"
291
+ }
292
+ except Exception as e:
293
+ return {"error": str(e), "status": "error"}
294
 
295
+ # Initialize translator with default settings
296
+ initialize_translator(60)
 
 
 
297
 
298
+ # Create Gradio interface
299
+ with gr.Blocks(title="Advanced Multilingual Translator", theme=gr.themes.Soft()) as demo:
300
+ gr.Markdown(
301
+ """
302
+ # 🌍 Advanced Multilingual Translator
303
+
304
+ **Powered by Facebook's M2M100 Model**
305
+
306
+ Features:
307
+ - Support for 100+ languages
308
+ - Intelligent caching system
309
+ - GPU acceleration when available
310
+ - Concurrent user support
311
+ - API endpoints for WordPress integration
312
+ """
313
+ )
314
+
315
+ with gr.Row():
316
+ with gr.Column(scale=1):
317
+ gr.Markdown("### ⚙️ Settings")
318
+ cache_minutes = gr.Slider(
319
+ minimum=5,
320
+ maximum=1440,
321
+ value=60,
322
+ step=5,
323
+ label="Cache Duration (minutes)",
324
+ info="How long to keep translations in cache"
325
+ )
326
+
327
+ device_info = gr.Textbox(
328
+ value=f"Device: {torch.device('cuda' if torch.cuda.is_available() else 'cpu')}",
329
+ label="System Info",
330
+ interactive=False
331
+ )
332
+
333
+ with gr.Column(scale=2):
334
+ gr.Markdown("### 📝 Translation")
335
+
336
+ with gr.Row():
337
+ source_lang = gr.Dropdown(
338
+ choices=list(LANGUAGE_MAP.keys()),
339
+ value="English",
340
+ label="Source Language"
341
+ )
342
+ target_lang = gr.Dropdown(
343
+ choices=list(LANGUAGE_MAP.keys()),
344
+ value="Persian (Farsi)",
345
+ label="Target Language"
346
+ )
347
+
348
+ input_text = gr.Textbox(
349
+ lines=5,
350
+ placeholder="Enter text to translate...",
351
+ label="Input Text"
352
+ )
353
+
354
+ translate_btn = gr.Button("🔄 Translate", variant="primary")
355
+
356
+ output_text = gr.Textbox(
357
+ lines=5,
358
+ label="Translation",
359
+ interactive=False
360
+ )
361
+
362
+ processing_time = gr.Textbox(
363
+ label="Processing Time (seconds)",
364
+ interactive=False
365
+ )
366
+
367
+ # Event handlers
368
+ translate_btn.click(
369
+ fn=translate_interface,
370
+ inputs=[input_text, source_lang, target_lang, cache_minutes],
371
+ outputs=[output_text, processing_time]
372
+ )
373
+
374
+ # Auto-translate on Enter
375
+ input_text.submit(
376
+ fn=translate_interface,
377
+ inputs=[input_text, source_lang, target_lang, cache_minutes],
378
+ outputs=[output_text, processing_time]
379
+ )
380
+
381
+ gr.Markdown(
382
+ """
383
+ ---
384
+ ### 🔌 API Endpoint
385
+
386
+ **For WordPress Plugin Integration:**
387
+
388
+ `POST /api/translate`
389
+
390
+ **Parameters:**
391
+ - `text`: Text to translate
392
+ - `source_lang`: Source language name
393
+ - `target_lang`: Target language name
394
+ - `api_key`: API key (optional)
395
+
396
+ **Response:**
397
+ ```json
398
+ {
399
+ "translation": "Translated text",
400
+ "source_language": "English",
401
+ "target_language": "Persian (Farsi)",
402
+ "processing_time": 1.23,
403
+ "character_count": 100,
404
+ "status": "success"
405
+ }
406
+ ```
407
+ """
408
+ )
409
 
410
+ # For API access
411
+ app = demo
 
 
 
 
 
 
 
412
 
413
  if __name__ == "__main__":
414
+ demo.launch(
415
+ server_name="0.0.0.0",
416
+ server_port=7860,
417
+ share=True,
418
+ enable_queue=True,
419
+ max_threads=10
420
+ )