Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,96 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import time
|
| 2 |
import requests
|
| 3 |
import json
|
| 4 |
-
from typing import Optional
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
data = {
|
| 28 |
-
"model":
|
| 29 |
"messages": [
|
| 30 |
-
{"role": "system", "content": "You are a professional content writer specializing in creating high-quality glossary entries."},
|
| 31 |
{"role": "user", "content": prompt}
|
| 32 |
],
|
| 33 |
"max_tokens": max_tokens,
|
| 34 |
-
"temperature": 0.7
|
|
|
|
|
|
|
|
|
|
| 35 |
}
|
| 36 |
|
| 37 |
for attempt in range(max_retries):
|
| 38 |
try:
|
| 39 |
-
print(f"
|
|
|
|
| 40 |
response = requests.post(url, headers=headers, json=data, timeout=60)
|
|
|
|
| 41 |
|
| 42 |
if response.status_code == 200:
|
| 43 |
result = response.json()
|
| 44 |
content = result['choices'][0]['message']['content'].strip()
|
| 45 |
-
print(
|
| 46 |
return content
|
| 47 |
|
|
|
|
|
|
|
|
|
|
| 48 |
elif response.status_code == 429:
|
| 49 |
-
# Rate limit hit
|
| 50 |
try:
|
| 51 |
error_data = response.json()
|
| 52 |
-
error_msg = error_data.get('error', {}).get('message', '')
|
| 53 |
except:
|
| 54 |
error_msg = "Rate limit exceeded"
|
| 55 |
|
| 56 |
print(f"β³ Rate limit hit: {error_msg}")
|
| 57 |
|
| 58 |
-
#
|
| 59 |
retry_after = response.headers.get('retry-after')
|
| 60 |
if retry_after:
|
| 61 |
-
wait_time = int(retry_after)
|
| 62 |
-
print(f"β° Waiting {wait_time} seconds as suggested by API...")
|
| 63 |
else:
|
| 64 |
-
wait_time = (2 ** attempt) *
|
| 65 |
-
print(f"β° Waiting {wait_time} seconds (exponential backoff)...")
|
| 66 |
|
| 67 |
-
if attempt < max_retries - 1:
|
|
|
|
| 68 |
time.sleep(wait_time)
|
| 69 |
continue
|
| 70 |
else:
|
| 71 |
-
return f"β Rate limit exceeded
|
| 72 |
-
|
| 73 |
-
elif response.status_code == 401:
|
| 74 |
-
return "β Invalid API key. Please check your OpenAI API key."
|
| 75 |
|
| 76 |
elif response.status_code == 404:
|
| 77 |
-
|
| 78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
| 80 |
else:
|
| 81 |
# Other errors
|
| 82 |
try:
|
| 83 |
error_data = response.json()
|
| 84 |
-
error_msg = error_data.get('error', {}).get('message', '
|
| 85 |
except:
|
| 86 |
error_msg = f"HTTP {response.status_code}"
|
| 87 |
|
| 88 |
-
if "insufficient_quota" in error_msg.lower():
|
| 89 |
-
return "β OpenAI API quota exceeded. Please check your billing at https://platform.openai.com/usage"
|
| 90 |
-
|
| 91 |
print(f"β API Error: {error_msg}")
|
|
|
|
| 92 |
if attempt < max_retries - 1:
|
| 93 |
-
|
|
|
|
| 94 |
continue
|
| 95 |
else:
|
| 96 |
return f"β API Error: {error_msg}"
|
|
@@ -98,10 +174,18 @@ def _call_openai_with_retry(self, api_key: str, prompt: str, model: str = "gpt-4
|
|
| 98 |
except requests.exceptions.Timeout:
|
| 99 |
print(f"β° Request timeout on attempt {attempt + 1}")
|
| 100 |
if attempt < max_retries - 1:
|
| 101 |
-
time.sleep(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
continue
|
| 103 |
else:
|
| 104 |
-
return "β
|
| 105 |
|
| 106 |
except Exception as e:
|
| 107 |
print(f"β Unexpected error on attempt {attempt + 1}: {str(e)}")
|
|
@@ -110,22 +194,453 @@ def _call_openai_with_retry(self, api_key: str, prompt: str, model: str = "gpt-4
|
|
| 110 |
continue
|
| 111 |
else:
|
| 112 |
return f"β Unexpected error: {str(e)}"
|
| 113 |
-
|
| 114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
if
|
| 124 |
-
return "β
|
| 125 |
-
|
| 126 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 127 |
else:
|
| 128 |
-
return "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
|
| 130 |
-
|
| 131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import os
|
| 3 |
+
from typing import Dict, List, Tuple
|
| 4 |
import time
|
| 5 |
import requests
|
| 6 |
import json
|
|
|
|
| 7 |
|
| 8 |
+
class GlossaryGenerator:
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.template = """
|
| 11 |
+
**Glossary Page Template**
|
| 12 |
+
Use this template to create individual glossary pages for specific terms. Fill in each section with relevant information.
|
| 13 |
+
**[TERM NAME]**
|
| 14 |
+
**1. Introduction / Brief Definition (AI Overview)**
|
| 15 |
+
* **Purpose:** Provide the absolute clearest, most concise, and direct answer to "What is [TERM NAME]?" This should be a short, no-fluff definition, similar to an AI overview or a quick dictionary entry.
|
| 16 |
+
* **Content:** Start immediately with the core definition. Get straight to the point.
|
| 17 |
+
**2. Detailed Explanation**
|
| 18 |
+
* **Purpose:** Expand on the brief definition, offering a comprehensive explanation of the term. This section should follow a "reverse pyramid" structure, meaning the most critical information is presented first, followed by supporting details.
|
| 19 |
+
* **Content:**
|
| 20 |
+
* **Elaborate on the core concept:** Build upon the initial definition, providing more depth and context.
|
| 21 |
+
* **Explore related questions (PAA / Query Fans):** Anticipate what users might ask next or what related topics they might search for. Integrate answers to "People Also Ask" (PAA) type questions or expand into "query fan" concepts that naturally branch off the main term.
|
| 22 |
+
* Provide context, background, or the purpose of the term.
|
| 23 |
+
* Include key characteristics, functions, or processes associated with the term.
|
| 24 |
+
* Use examples to illustrate the concept clearly.
|
| 25 |
+
* Break down complex ideas into simpler parts.
|
| 26 |
+
**3. Key Concepts / Components (Optional)**
|
| 27 |
+
* **Purpose:** If the term has distinct sub-sections, components, or related key ideas that warrant separate discussion, list and explain them here.
|
| 28 |
+
* **Content:**
|
| 29 |
+
* Use bullet points or sub-headings for each key concept.
|
| 30 |
+
* Briefly define and explain each component.
|
| 31 |
+
**4. Importance / Application (Optional)**
|
| 32 |
+
* **Purpose:** Explain why the term is significant, its impact, or how it is applied in real-world scenarios.
|
| 33 |
+
* **Content:** Discuss the relevance, benefits, challenges, or practical uses of the term.
|
| 34 |
+
**5. Related Terms / Concepts**
|
| 35 |
+
* **Purpose:** Link to other relevant terms within your glossary or related concepts that readers might find useful for further understanding.
|
| 36 |
+
* **Content:**
|
| 37 |
+
* List terms that are closely associated or often discussed alongside the current term.
|
| 38 |
+
**6. Sources / References**
|
| 39 |
+
* **Purpose:** Cite the sources from which the information was gathered. This adds credibility and allows readers to explore further.
|
| 40 |
+
* **Content:**
|
| 41 |
+
* List URLs, book titles, or other references.
|
| 42 |
+
"""
|
| 43 |
+
# Track request timing to avoid rate limits
|
| 44 |
+
self.last_request_time = 0
|
| 45 |
+
self.min_request_interval = 2.0 # Minimum 2 seconds between requests
|
| 46 |
+
|
| 47 |
+
def _test_api_key(self, api_key: str) -> bool:
|
| 48 |
+
"""Test OpenAI API key with minimal request"""
|
| 49 |
+
if not api_key or not api_key.strip():
|
| 50 |
+
return False
|
| 51 |
+
|
| 52 |
+
try:
|
| 53 |
+
# Test with minimal request using GPT-3.5-turbo (cheaper and more reliable)
|
| 54 |
+
test_result = self._call_openai_safe(api_key.strip(), "Hi", "gpt-3.5-turbo", 1)
|
| 55 |
+
return not test_result.startswith("β")
|
| 56 |
+
|
| 57 |
+
except Exception as e:
|
| 58 |
+
print(f"API test error: {e}")
|
| 59 |
+
return False
|
| 60 |
+
|
| 61 |
+
def _wait_for_rate_limit(self):
|
| 62 |
+
"""Ensure minimum time between requests"""
|
| 63 |
+
current_time = time.time()
|
| 64 |
+
time_since_last = current_time - self.last_request_time
|
| 65 |
+
|
| 66 |
+
if time_since_last < self.min_request_interval:
|
| 67 |
+
sleep_time = self.min_request_interval - time_since_last
|
| 68 |
+
print(f"β° Waiting {sleep_time:.1f}s to respect rate limits...")
|
| 69 |
+
time.sleep(sleep_time)
|
| 70 |
+
|
| 71 |
+
self.last_request_time = time.time()
|
| 72 |
+
|
| 73 |
+
def _call_openai_safe(self, api_key: str, prompt: str, model: str = "gpt-3.5-turbo", max_tokens: int = 2000, max_retries: int = 2) -> str:
|
| 74 |
+
"""Safe OpenAI API call with proper rate limiting and error handling"""
|
| 75 |
+
|
| 76 |
+
if not api_key or not api_key.strip():
|
| 77 |
+
return "π Please enter your OpenAI API key above to generate content."
|
| 78 |
+
|
| 79 |
+
clean_key = api_key.strip()
|
| 80 |
+
|
| 81 |
+
# Wait to respect rate limits
|
| 82 |
+
self._wait_for_rate_limit()
|
| 83 |
+
|
| 84 |
+
url = "https://api.openai.com/v1/chat/completions"
|
| 85 |
+
headers = {
|
| 86 |
+
"Authorization": f"Bearer {clean_key}",
|
| 87 |
+
"Content-Type": "application/json"
|
| 88 |
+
}
|
| 89 |
|
| 90 |
data = {
|
| 91 |
+
"model": model,
|
| 92 |
"messages": [
|
| 93 |
+
{"role": "system", "content": "You are a professional content writer specializing in creating high-quality glossary entries. You follow templates precisely and create comprehensive, well-structured content."},
|
| 94 |
{"role": "user", "content": prompt}
|
| 95 |
],
|
| 96 |
"max_tokens": max_tokens,
|
| 97 |
+
"temperature": 0.7,
|
| 98 |
+
"top_p": 1,
|
| 99 |
+
"frequency_penalty": 0,
|
| 100 |
+
"presence_penalty": 0
|
| 101 |
}
|
| 102 |
|
| 103 |
for attempt in range(max_retries):
|
| 104 |
try:
|
| 105 |
+
print(f"π Making API call to {model} (attempt {attempt + 1}/{max_retries})")
|
| 106 |
+
|
| 107 |
response = requests.post(url, headers=headers, json=data, timeout=60)
|
| 108 |
+
print(f"π Response status: {response.status_code}")
|
| 109 |
|
| 110 |
if response.status_code == 200:
|
| 111 |
result = response.json()
|
| 112 |
content = result['choices'][0]['message']['content'].strip()
|
| 113 |
+
print("β
API call successful")
|
| 114 |
return content
|
| 115 |
|
| 116 |
+
elif response.status_code == 401:
|
| 117 |
+
return "β Invalid API key. Please check your OpenAI API key."
|
| 118 |
+
|
| 119 |
elif response.status_code == 429:
|
| 120 |
+
# Rate limit hit - get retry time from headers
|
| 121 |
try:
|
| 122 |
error_data = response.json()
|
| 123 |
+
error_msg = error_data.get('error', {}).get('message', 'Rate limit exceeded')
|
| 124 |
except:
|
| 125 |
error_msg = "Rate limit exceeded"
|
| 126 |
|
| 127 |
print(f"β³ Rate limit hit: {error_msg}")
|
| 128 |
|
| 129 |
+
# Get retry-after from headers or use exponential backoff
|
| 130 |
retry_after = response.headers.get('retry-after')
|
| 131 |
if retry_after:
|
| 132 |
+
wait_time = min(int(retry_after), 60) # Cap at 60 seconds
|
|
|
|
| 133 |
else:
|
| 134 |
+
wait_time = min((2 ** attempt) * 3, 30) # Exponential backoff, capped at 30s
|
|
|
|
| 135 |
|
| 136 |
+
if attempt < max_retries - 1:
|
| 137 |
+
print(f"β° Waiting {wait_time} seconds before retry...")
|
| 138 |
time.sleep(wait_time)
|
| 139 |
continue
|
| 140 |
else:
|
| 141 |
+
return f"β Rate limit exceeded. Please wait a few minutes and try again. (Try using a different model or reducing request frequency)"
|
|
|
|
|
|
|
|
|
|
| 142 |
|
| 143 |
elif response.status_code == 404:
|
| 144 |
+
return f"β Model '{model}' not available. Try using 'gpt-3.5-turbo' or 'gpt-4o-mini' instead."
|
| 145 |
+
|
| 146 |
+
elif response.status_code == 403:
|
| 147 |
+
try:
|
| 148 |
+
error_data = response.json()
|
| 149 |
+
error_msg = error_data.get('error', {}).get('message', 'Access denied')
|
| 150 |
+
if "insufficient_quota" in error_msg.lower():
|
| 151 |
+
return "β OpenAI API quota exceeded. Please check your billing at https://platform.openai.com/usage"
|
| 152 |
+
else:
|
| 153 |
+
return f"β Access denied: {error_msg}"
|
| 154 |
+
except:
|
| 155 |
+
return "β Access denied. Check your API key permissions."
|
| 156 |
|
| 157 |
else:
|
| 158 |
# Other errors
|
| 159 |
try:
|
| 160 |
error_data = response.json()
|
| 161 |
+
error_msg = error_data.get('error', {}).get('message', f'HTTP {response.status_code}')
|
| 162 |
except:
|
| 163 |
error_msg = f"HTTP {response.status_code}"
|
| 164 |
|
|
|
|
|
|
|
|
|
|
| 165 |
print(f"β API Error: {error_msg}")
|
| 166 |
+
|
| 167 |
if attempt < max_retries - 1:
|
| 168 |
+
print(f"β° Waiting 3 seconds before retry...")
|
| 169 |
+
time.sleep(3)
|
| 170 |
continue
|
| 171 |
else:
|
| 172 |
return f"β API Error: {error_msg}"
|
|
|
|
| 174 |
except requests.exceptions.Timeout:
|
| 175 |
print(f"β° Request timeout on attempt {attempt + 1}")
|
| 176 |
if attempt < max_retries - 1:
|
| 177 |
+
time.sleep(5)
|
| 178 |
+
continue
|
| 179 |
+
else:
|
| 180 |
+
return "β Request timeout. Please try again."
|
| 181 |
+
|
| 182 |
+
except requests.exceptions.ConnectionError:
|
| 183 |
+
print(f"π Connection error on attempt {attempt + 1}")
|
| 184 |
+
if attempt < max_retries - 1:
|
| 185 |
+
time.sleep(3)
|
| 186 |
continue
|
| 187 |
else:
|
| 188 |
+
return "β Connection error. Please check your internet connection."
|
| 189 |
|
| 190 |
except Exception as e:
|
| 191 |
print(f"β Unexpected error on attempt {attempt + 1}: {str(e)}")
|
|
|
|
| 194 |
continue
|
| 195 |
else:
|
| 196 |
return f"β Unexpected error: {str(e)}"
|
| 197 |
+
|
| 198 |
+
return "β All retry attempts failed. Please try again later."
|
| 199 |
+
|
| 200 |
+
def generate_new_content(self, api_key: str, term: str, context: str = "", target_audience: str = "general", preferred_model: str = "gpt-3.5-turbo") -> str:
|
| 201 |
+
"""Generate new glossary content for a given term"""
|
| 202 |
+
|
| 203 |
+
if not term.strip():
|
| 204 |
+
return "Please enter a term to generate content for."
|
| 205 |
+
|
| 206 |
+
# Choose appropriate max_tokens based on model
|
| 207 |
+
max_tokens = 2500 if "gpt-4" in preferred_model else 2000
|
| 208 |
+
|
| 209 |
+
prompt = f"""
|
| 210 |
+
Create a comprehensive glossary entry for the term "{term}" following this EXACT template structure:
|
| 211 |
+
{self.template}
|
| 212 |
+
**Requirements:**
|
| 213 |
+
- Replace [TERM NAME] with "{term}"
|
| 214 |
+
- Target Audience: {target_audience}
|
| 215 |
+
- Additional Context: {context if context else "No additional context provided"}
|
| 216 |
+
- Fill in ALL sections with relevant, accurate information
|
| 217 |
+
- Use the "reverse pyramid" structure - most important info first
|
| 218 |
+
- Include relevant PAA (People Also Ask) questions in section 2
|
| 219 |
+
- Remove optional sections only if truly not applicable
|
| 220 |
+
- Maintain clear, concise language
|
| 221 |
+
- Provide at least 3 related terms in section 5
|
| 222 |
+
- Include credible sources/references in section 6
|
| 223 |
+
**Focus Areas:**
|
| 224 |
+
- Make the brief definition crystal clear and direct
|
| 225 |
+
- Expand thoroughly in the detailed explanation
|
| 226 |
+
- Include practical examples and use cases
|
| 227 |
+
- Address common questions people might have
|
| 228 |
+
- Ensure professional, authoritative tone
|
| 229 |
+
Generate the complete glossary entry now:
|
| 230 |
+
"""
|
| 231 |
+
|
| 232 |
+
return self._call_openai_safe(api_key, prompt, preferred_model, max_tokens)
|
| 233 |
+
|
| 234 |
+
def update_existing_content(self, api_key: str, term: str, existing_content: str, update_instructions: str = "", preferred_model: str = "gpt-3.5-turbo") -> Tuple[str, str]:
|
| 235 |
+
"""Analyze existing content and provide update recommendations"""
|
| 236 |
+
|
| 237 |
+
if not term.strip() or not existing_content.strip():
|
| 238 |
+
return "Please provide both term and existing content.", ""
|
| 239 |
+
|
| 240 |
+
# Choose appropriate max_tokens based on model
|
| 241 |
+
max_tokens = 1500 if "gpt-4" in preferred_model else 1200
|
| 242 |
+
|
| 243 |
+
# First, analyze the content
|
| 244 |
+
analysis_prompt = f"""
|
| 245 |
+
Analyze this existing glossary content for "{term}" against the template standard and provide specific improvement recommendations.
|
| 246 |
+
**EXISTING CONTENT:**
|
| 247 |
+
{existing_content}
|
| 248 |
+
**TEMPLATE STANDARD:**
|
| 249 |
+
{self.template}
|
| 250 |
+
**UPDATE INSTRUCTIONS:** {update_instructions if update_instructions else "General content improvement"}
|
| 251 |
+
**Provide a detailed analysis covering:**
|
| 252 |
+
1. **STRUCTURAL ANALYSIS:**
|
| 253 |
+
- Does it follow the template structure?
|
| 254 |
+
- Which sections are missing or incomplete?
|
| 255 |
+
- Is the reverse pyramid structure implemented?
|
| 256 |
+
2. **CONTENT QUALITY ASSESSMENT:**
|
| 257 |
+
- Clarity and conciseness of the brief definition
|
| 258 |
+
- Depth and comprehensiveness of detailed explanation
|
| 259 |
+
- Relevance and usefulness of examples
|
| 260 |
+
- Quality of related terms and references
|
| 261 |
+
3. **SPECIFIC RECOMMENDATIONS (prioritized):**
|
| 262 |
+
- HIGH PRIORITY: Critical improvements needed
|
| 263 |
+
- MEDIUM PRIORITY: Important enhancements
|
| 264 |
+
- LOW PRIORITY: Nice-to-have improvements
|
| 265 |
+
4. **SEO & USER EXPERIENCE:**
|
| 266 |
+
- Missing PAA questions to address
|
| 267 |
+
- Keyword opportunities
|
| 268 |
+
- Cross-linking possibilities
|
| 269 |
+
- Readability improvements
|
| 270 |
+
5. **SOURCES & CREDIBILITY:**
|
| 271 |
+
- Quality of current references
|
| 272 |
+
- Missing authoritative sources
|
| 273 |
+
- Fact-checking requirements
|
| 274 |
+
Format as a professional content analysis report.
|
| 275 |
+
"""
|
| 276 |
+
|
| 277 |
+
recommendations = self._call_openai_safe(api_key, analysis_prompt, preferred_model, max_tokens)
|
| 278 |
+
|
| 279 |
+
if recommendations.startswith("β"):
|
| 280 |
+
return recommendations, ""
|
| 281 |
+
|
| 282 |
+
# Add extra wait time between the two requests
|
| 283 |
+
print("β° Waiting between analysis and content generation...")
|
| 284 |
+
time.sleep(3)
|
| 285 |
+
|
| 286 |
+
# Then generate updated content
|
| 287 |
+
update_prompt = f"""
|
| 288 |
+
Create an improved version of the glossary entry for "{term}" based on the analysis and recommendations.
|
| 289 |
+
**ORIGINAL CONTENT:**
|
| 290 |
+
{existing_content}
|
| 291 |
+
**ANALYSIS & RECOMMENDATIONS:**
|
| 292 |
+
{recommendations}
|
| 293 |
+
**TEMPLATE TO FOLLOW:**
|
| 294 |
+
{self.template}
|
| 295 |
+
**UPDATE INSTRUCTIONS:** {update_instructions if update_instructions else "Apply the key recommendations from the analysis"}
|
| 296 |
+
**Create the improved glossary entry that:**
|
| 297 |
+
1. Follows the template structure exactly
|
| 298 |
+
2. Implements the high and medium priority recommendations
|
| 299 |
+
3. Maintains the best elements from the original
|
| 300 |
+
4. Adds missing sections or information
|
| 301 |
+
5. Improves clarity, structure, and usefulness
|
| 302 |
+
6. Includes better examples and explanations
|
| 303 |
+
7. Enhances SEO and user experience
|
| 304 |
+
Generate the complete, improved glossary entry:
|
| 305 |
+
"""
|
| 306 |
+
|
| 307 |
+
updated_content = self._call_openai_safe(api_key, update_prompt, preferred_model, max_tokens + 500)
|
| 308 |
+
|
| 309 |
+
return recommendations, updated_content
|
| 310 |
+
|
| 311 |
+
def create_outline_brief(self, api_key: str, topic: str, scope: str = "comprehensive", preferred_model: str = "gpt-3.5-turbo") -> str:
|
| 312 |
+
"""Create an outline or brief for new glossary content"""
|
| 313 |
+
|
| 314 |
+
if not topic.strip():
|
| 315 |
+
return "Please enter a topic for the outline."
|
| 316 |
+
|
| 317 |
+
# Choose appropriate max_tokens based on model
|
| 318 |
+
max_tokens = 3000 if "gpt-4" in preferred_model else 2500
|
| 319 |
+
|
| 320 |
+
prompt = f"""
|
| 321 |
+
Create a comprehensive content brief for developing a glossary focused on "{topic}".
|
| 322 |
+
**Scope:** {scope}
|
| 323 |
+
**Template Standard:** Follow the 6-section template structure provided
|
| 324 |
+
**Create a detailed brief covering:**
|
| 325 |
+
**1. TOPIC OVERVIEW & STRATEGY**
|
| 326 |
+
- Comprehensive topic definition and boundaries
|
| 327 |
+
- Target audience analysis and segmentation
|
| 328 |
+
- Content complexity and depth recommendations
|
| 329 |
+
- Competitive landscape and differentiation opportunities
|
| 330 |
+
**2. TERM IDENTIFICATION & PRIORITIZATION**
|
| 331 |
+
- **Primary Terms (10-15 key terms):** Most important, high-search volume terms
|
| 332 |
+
- **Secondary Terms (8-12 supporting terms):** Important supporting concepts
|
| 333 |
+
- **Long-tail Terms (5-10 specific terms):** Niche but valuable terms
|
| 334 |
+
- **Priority Matrix:** High/Medium/Low priority for each term with reasoning
|
| 335 |
+
**3. CONTENT ARCHITECTURE**
|
| 336 |
+
- Template section recommendations for each term type
|
| 337 |
+
- Suggested content depth and length for each priority level
|
| 338 |
+
- Cross-linking strategy between terms
|
| 339 |
+
- Information hierarchy and user journey mapping
|
| 340 |
+
**4. RESEARCH & DEVELOPMENT PLAN**
|
| 341 |
+
- **Primary Sources:** Authoritative websites, publications, studies
|
| 342 |
+
- **Expert Sources:** Industry leaders, academic researchers, practitioners
|
| 343 |
+
- **User Research:** Common questions, search patterns, knowledge gaps
|
| 344 |
+
- **Competitive Analysis:** What others are doing well/poorly
|
| 345 |
+
**5. SEO & DISCOVERABILITY STRATEGY**
|
| 346 |
+
- **Primary Keywords:** Main search terms for each priority level
|
| 347 |
+
- **Long-tail Keywords:** Specific phrases users search for
|
| 348 |
+
- **PAA Questions:** "People Also Ask" questions to address
|
| 349 |
+
- **Content Gap Analysis:** Opportunities competitors are missing
|
| 350 |
+
- **Internal Linking Strategy:** How terms connect to each other
|
| 351 |
+
**6. PRODUCTION ROADMAP**
|
| 352 |
+
- **Phase 1:** High-priority terms (timeline and resource allocation)
|
| 353 |
+
- **Phase 2:** Secondary terms and enhancements
|
| 354 |
+
- **Phase 3:** Long-tail terms and optimization
|
| 355 |
+
- **Resource Requirements:** Estimated hours per term type
|
| 356 |
+
- **Quality Assurance:** Review process and standards
|
| 357 |
+
- **Maintenance Plan:** Update frequency and monitoring
|
| 358 |
+
**7. SUCCESS METRICS & KPIs**
|
| 359 |
+
- Content quality indicators
|
| 360 |
+
- User engagement metrics
|
| 361 |
+
- SEO performance targets
|
| 362 |
+
- Conversion and utility measurements
|
| 363 |
+
Create a comprehensive, actionable brief that will guide the entire glossary development process.
|
| 364 |
+
"""
|
| 365 |
+
|
| 366 |
+
return self._call_openai_safe(api_key, prompt, preferred_model, max_tokens)
|
| 367 |
|
| 368 |
+
def create_gradio_interface():
|
| 369 |
+
"""Create the Gradio interface for the glossary generator"""
|
| 370 |
+
|
| 371 |
+
generator = GlossaryGenerator()
|
| 372 |
+
|
| 373 |
+
def test_api_key(api_key):
|
| 374 |
+
"""Test if the API key is valid"""
|
| 375 |
+
if not api_key or not api_key.strip():
|
| 376 |
+
return "β Please enter your OpenAI API key"
|
| 377 |
+
|
| 378 |
+
# Basic format check
|
| 379 |
+
clean_key = api_key.strip()
|
| 380 |
+
if not clean_key.startswith('sk-'):
|
| 381 |
+
return "β Invalid format - OpenAI API keys should start with 'sk-'"
|
| 382 |
+
|
| 383 |
+
if len(clean_key) < 20:
|
| 384 |
+
return "β API key too short - check if you copied the full key"
|
| 385 |
+
|
| 386 |
+
# Test the key
|
| 387 |
+
if generator._test_api_key(clean_key):
|
| 388 |
+
return "β
API key is valid and connected!"
|
| 389 |
else:
|
| 390 |
+
return "β API key test failed - check your key and try again"
|
| 391 |
+
|
| 392 |
+
def generate_new_wrapper(api_key, term, context, audience, model):
|
| 393 |
+
if not term.strip():
|
| 394 |
+
return "Please enter a term to generate content for."
|
| 395 |
+
return generator.generate_new_content(api_key, term, context, audience, model)
|
| 396 |
+
|
| 397 |
+
def update_existing_wrapper(api_key, term, existing_content, update_instructions, model):
|
| 398 |
+
if not term.strip() or not existing_content.strip():
|
| 399 |
+
return "Please provide both term and existing content.", ""
|
| 400 |
+
recommendations, updated_content = generator.update_existing_content(api_key, term, existing_content, update_instructions, model)
|
| 401 |
+
return recommendations, updated_content
|
| 402 |
+
|
| 403 |
+
def create_outline_wrapper(api_key, topic, scope, model):
|
| 404 |
+
if not topic.strip():
|
| 405 |
+
return "Please enter a topic for the outline."
|
| 406 |
+
return generator.create_outline_brief(api_key, topic, scope, model)
|
| 407 |
+
|
| 408 |
+
# Create the Gradio interface
|
| 409 |
+
with gr.Blocks(title="Glossary Content Generator", theme=gr.themes.Soft()) as demo:
|
| 410 |
+
gr.Markdown("""
|
| 411 |
+
# π Glossary Content Generator (Rate-Limit Safe)
|
| 412 |
+
|
| 413 |
+
**Powered by OpenAI** - Professional glossary content creation and optimization tool.
|
| 414 |
+
|
| 415 |
+
π **Enter your OpenAI API key below to get started!**
|
| 416 |
+
β
**Now with improved rate limiting and error handling**
|
| 417 |
+
""")
|
| 418 |
+
|
| 419 |
+
# API Key Section
|
| 420 |
+
with gr.Row():
|
| 421 |
+
with gr.Column(scale=2):
|
| 422 |
+
api_key_input = gr.Textbox(
|
| 423 |
+
label="π OpenAI API Key",
|
| 424 |
+
placeholder="sk-proj-... or sk-...",
|
| 425 |
+
type="password",
|
| 426 |
+
info="Get your API key at https://platform.openai.com/api-keys"
|
| 427 |
+
)
|
| 428 |
+
with gr.Column(scale=1):
|
| 429 |
+
test_key_btn = gr.Button("Test Key", variant="secondary")
|
| 430 |
+
manual_test_btn = gr.Button("Skip Test & Try Anyway", variant="outline", size="sm")
|
| 431 |
+
with gr.Column(scale=1):
|
| 432 |
+
key_status = gr.Textbox(
|
| 433 |
+
label="Status",
|
| 434 |
+
value="β No API key provided",
|
| 435 |
+
interactive=False,
|
| 436 |
+
max_lines=2
|
| 437 |
+
)
|
| 438 |
+
|
| 439 |
+
def manual_override(api_key):
|
| 440 |
+
if api_key and api_key.strip():
|
| 441 |
+
return "β οΈ Test skipped - trying anyway..."
|
| 442 |
+
return "β Please enter an API key first"
|
| 443 |
+
|
| 444 |
+
test_key_btn.click(
|
| 445 |
+
test_api_key,
|
| 446 |
+
inputs=[api_key_input],
|
| 447 |
+
outputs=[key_status]
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
manual_test_btn.click(
|
| 451 |
+
manual_override,
|
| 452 |
+
inputs=[api_key_input],
|
| 453 |
+
outputs=[key_status]
|
| 454 |
+
)
|
| 455 |
+
|
| 456 |
+
gr.Markdown("---")
|
| 457 |
+
|
| 458 |
+
with gr.Tabs():
|
| 459 |
+
# Tab 1: Generate New Content
|
| 460 |
+
with gr.TabItem("π Generate New Content"):
|
| 461 |
+
gr.Markdown("### Create a new glossary entry from scratch")
|
| 462 |
+
|
| 463 |
+
with gr.Row():
|
| 464 |
+
with gr.Column(scale=1):
|
| 465 |
+
new_term = gr.Textbox(
|
| 466 |
+
label="Term to Define",
|
| 467 |
+
placeholder="e.g., Machine Learning, CPQ, SEO, API",
|
| 468 |
+
lines=1
|
| 469 |
+
)
|
| 470 |
+
new_context = gr.Textbox(
|
| 471 |
+
label="Additional Context (Optional)",
|
| 472 |
+
placeholder="Provide industry context, specific use cases, or background information",
|
| 473 |
+
lines=3
|
| 474 |
+
)
|
| 475 |
+
new_audience = gr.Dropdown(
|
| 476 |
+
label="Target Audience",
|
| 477 |
+
choices=["general", "technical", "business", "beginner", "expert"],
|
| 478 |
+
value="general"
|
| 479 |
+
)
|
| 480 |
+
new_model = gr.Dropdown(
|
| 481 |
+
label="AI Model",
|
| 482 |
+
choices=["gpt-3.5-turbo", "gpt-4o-mini", "gpt-4", "gpt-4-turbo"],
|
| 483 |
+
value="gpt-3.5-turbo",
|
| 484 |
+
info="gpt-3.5-turbo is recommended for speed and cost"
|
| 485 |
+
)
|
| 486 |
+
generate_btn = gr.Button("π Generate Content", variant="primary", size="lg")
|
| 487 |
+
|
| 488 |
+
with gr.Column(scale=2):
|
| 489 |
+
new_output = gr.Textbox(
|
| 490 |
+
label="Generated Glossary Entry",
|
| 491 |
+
lines=25,
|
| 492 |
+
max_lines=30,
|
| 493 |
+
show_copy_button=True
|
| 494 |
+
)
|
| 495 |
+
|
| 496 |
+
generate_btn.click(
|
| 497 |
+
generate_new_wrapper,
|
| 498 |
+
inputs=[api_key_input, new_term, new_context, new_audience, new_model],
|
| 499 |
+
outputs=[new_output]
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
# Add examples
|
| 503 |
+
gr.Markdown("**π‘ Example Terms:** API, Machine Learning, Blockchain, SaaS, Customer Journey, A/B Testing")
|
| 504 |
+
|
| 505 |
+
# Tab 2: Update Existing Content
|
| 506 |
+
with gr.TabItem("π Update Existing Content"):
|
| 507 |
+
gr.Markdown("### Analyze and improve existing glossary entries")
|
| 508 |
+
|
| 509 |
+
with gr.Row():
|
| 510 |
+
with gr.Column(scale=1):
|
| 511 |
+
update_term = gr.Textbox(
|
| 512 |
+
label="Term Name",
|
| 513 |
+
placeholder="Name of the term being updated",
|
| 514 |
+
lines=1
|
| 515 |
+
)
|
| 516 |
+
existing_content = gr.Textbox(
|
| 517 |
+
label="Existing Content",
|
| 518 |
+
placeholder="Paste your current glossary entry here",
|
| 519 |
+
lines=10
|
| 520 |
+
)
|
| 521 |
+
update_instructions = gr.Textbox(
|
| 522 |
+
label="Update Instructions (Optional)",
|
| 523 |
+
placeholder="e.g., 'Add more technical details', 'Include recent developments', 'Improve SEO focus'",
|
| 524 |
+
lines=3
|
| 525 |
+
)
|
| 526 |
+
update_model = gr.Dropdown(
|
| 527 |
+
label="AI Model",
|
| 528 |
+
choices=["gpt-3.5-turbo", "gpt-4o-mini", "gpt-4", "gpt-4-turbo"],
|
| 529 |
+
value="gpt-3.5-turbo",
|
| 530 |
+
info="Note: This makes 2 API calls (analysis + update)"
|
| 531 |
+
)
|
| 532 |
+
update_btn = gr.Button("π Analyze & Update", variant="primary", size="lg")
|
| 533 |
+
|
| 534 |
+
with gr.Column(scale=2):
|
| 535 |
+
with gr.Row():
|
| 536 |
+
recommendations_output = gr.Textbox(
|
| 537 |
+
label="π Analysis & Recommendations",
|
| 538 |
+
lines=12,
|
| 539 |
+
max_lines=15,
|
| 540 |
+
show_copy_button=True
|
| 541 |
+
)
|
| 542 |
+
with gr.Row():
|
| 543 |
+
updated_content_output = gr.Textbox(
|
| 544 |
+
label="β¨ Updated Content",
|
| 545 |
+
lines=12,
|
| 546 |
+
max_lines=15,
|
| 547 |
+
show_copy_button=True
|
| 548 |
+
)
|
| 549 |
+
|
| 550 |
+
update_btn.click(
|
| 551 |
+
update_existing_wrapper,
|
| 552 |
+
inputs=[api_key_input, update_term, existing_content, update_instructions, update_model],
|
| 553 |
+
outputs=[recommendations_output, updated_content_output]
|
| 554 |
+
)
|
| 555 |
|
| 556 |
+
# Tab 3: Create Outline/Brief
|
| 557 |
+
with gr.TabItem("π Create Content Brief"):
|
| 558 |
+
gr.Markdown("### Generate a comprehensive strategy brief for glossary development")
|
| 559 |
+
|
| 560 |
+
with gr.Row():
|
| 561 |
+
with gr.Column(scale=1):
|
| 562 |
+
outline_topic = gr.Textbox(
|
| 563 |
+
label="Topic/Subject Area",
|
| 564 |
+
placeholder="e.g., Digital Marketing, Cloud Computing, Artificial Intelligence, E-commerce",
|
| 565 |
+
lines=1
|
| 566 |
+
)
|
| 567 |
+
outline_scope = gr.Dropdown(
|
| 568 |
+
label="Scope & Depth",
|
| 569 |
+
choices=["comprehensive", "focused", "basic", "advanced", "specialized"],
|
| 570 |
+
value="comprehensive"
|
| 571 |
+
)
|
| 572 |
+
outline_model = gr.Dropdown(
|
| 573 |
+
label="AI Model",
|
| 574 |
+
choices=["gpt-3.5-turbo", "gpt-4o-mini", "gpt-4", "gpt-4-turbo"],
|
| 575 |
+
value="gpt-3.5-turbo",
|
| 576 |
+
info="Larger briefs may benefit from GPT-4"
|
| 577 |
+
)
|
| 578 |
+
outline_btn = gr.Button("π Create Strategic Brief", variant="primary", size="lg")
|
| 579 |
+
|
| 580 |
+
with gr.Column(scale=2):
|
| 581 |
+
outline_output = gr.Textbox(
|
| 582 |
+
label="π Content Strategy Brief",
|
| 583 |
+
lines=25,
|
| 584 |
+
max_lines=30,
|
| 585 |
+
show_copy_button=True
|
| 586 |
+
)
|
| 587 |
+
|
| 588 |
+
outline_btn.click(
|
| 589 |
+
create_outline_wrapper,
|
| 590 |
+
inputs=[api_key_input, outline_topic, outline_scope, outline_model],
|
| 591 |
+
outputs=[outline_output]
|
| 592 |
+
)
|
| 593 |
+
|
| 594 |
+
gr.Markdown("**π‘ Example Topics:** Digital Marketing, FinTech, SaaS Operations, Data Science, Cybersecurity")
|
| 595 |
+
|
| 596 |
+
# Tab 4: Template Reference
|
| 597 |
+
with gr.TabItem("π Template Reference"):
|
| 598 |
+
gr.Markdown("### Official Glossary Template Structure")
|
| 599 |
+
template_display = gr.Textbox(
|
| 600 |
+
label="Template Guidelines",
|
| 601 |
+
value=generator.template,
|
| 602 |
+
lines=35,
|
| 603 |
+
max_lines=40,
|
| 604 |
+
interactive=False,
|
| 605 |
+
show_copy_button=True
|
| 606 |
+
)
|
| 607 |
+
|
| 608 |
+
gr.Markdown("""
|
| 609 |
+
---
|
| 610 |
+
## π§ How to Get Your OpenAI API Key:
|
| 611 |
+
|
| 612 |
+
1. **Visit** [platform.openai.com/api-keys](https://platform.openai.com/api-keys)
|
| 613 |
+
2. **Sign up** or log in to your OpenAI account
|
| 614 |
+
3. **Add billing information** (GPT-4 requires a paid account)
|
| 615 |
+
4. **Create a new secret key**
|
| 616 |
+
5. **Copy and paste** it into the field above
|
| 617 |
+
|
| 618 |
+
## π° **Cost Information:**
|
| 619 |
+
- **GPT-3.5-turbo**: ~$0.0015 per 1K input tokens, ~$0.002 per 1K output tokens (Recommended)
|
| 620 |
+
- **GPT-4**: ~$0.03 per 1K input tokens, ~$0.06 per 1K output tokens
|
| 621 |
+
- **Per generation**: $0.003-0.10 depending on model and content length
|
| 622 |
+
|
| 623 |
+
## β¨ **New Features & Fixes:**
|
| 624 |
+
- π‘οΈ **Rate Limit Protection**: 2-second delays between requests
|
| 625 |
+
- π **Smart Retry Logic**: Exponential backoff on rate limits
|
| 626 |
+
- π« **No More Double Requests**: Removed automatic model fallbacks
|
| 627 |
+
- β‘ **Model Selection**: Choose your preferred AI model
|
| 628 |
+
- π **Better Error Messages**: Clear feedback on what went wrong
|
| 629 |
+
- π― **GPT-3.5 Default**: Faster, cheaper, and more reliable for most tasks
|
| 630 |
+
|
| 631 |
+
## π **Model Recommendations:**
|
| 632 |
+
- **GPT-3.5-turbo**: Best for most glossary content (fast, cheap, high quality)
|
| 633 |
+
- **GPT-4o-mini**: Good balance of quality and cost
|
| 634 |
+
- **GPT-4**: Use for complex technical content or when highest quality needed
|
| 635 |
+
""")
|
| 636 |
+
|
| 637 |
+
return demo
|
| 638 |
+
|
| 639 |
+
# Launch the application
|
| 640 |
+
if __name__ == "__main__":
|
| 641 |
+
app = create_gradio_interface()
|
| 642 |
+
app.launch(
|
| 643 |
+
share=False,
|
| 644 |
+
server_name="0.0.0.0",
|
| 645 |
+
server_port=7860
|
| 646 |
+
)
|