ryanshelley commited on
Commit
aff8218
Β·
verified Β·
1 Parent(s): c4dadff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -190
app.py CHANGED
@@ -2,8 +2,6 @@ import gradio as gr
2
  import os
3
  from typing import Dict, List, Tuple
4
  import time
5
- import requests
6
- import json
7
 
8
  class GlossaryGenerator:
9
  def __init__(self):
@@ -40,9 +38,6 @@ Use this template to create individual glossary pages for specific terms. Fill i
40
  * **Content:**
41
  * List URLs, book titles, or other references.
42
  """
43
- # Track request timing to avoid rate limits
44
- self.last_request_time = 0
45
- self.min_request_interval = 2.0 # Minimum 2 seconds between requests
46
 
47
  def _test_api_key(self, api_key: str) -> bool:
48
  """Test OpenAI API key with minimal request"""
@@ -50,37 +45,34 @@ Use this template to create individual glossary pages for specific terms. Fill i
50
  return False
51
 
52
  try:
53
- # Test with minimal request using GPT-3.5-turbo (cheaper and more reliable)
54
- test_result = self._call_openai_safe(api_key.strip(), "Hi", "gpt-3.5-turbo", 1)
55
- return not test_result.startswith("❌")
 
 
 
 
 
 
 
 
 
56
 
57
  except Exception as e:
58
  print(f"API test error: {e}")
59
  return False
60
 
61
- def _wait_for_rate_limit(self):
62
- """Ensure minimum time between requests"""
63
- current_time = time.time()
64
- time_since_last = current_time - self.last_request_time
65
-
66
- if time_since_last < self.min_request_interval:
67
- sleep_time = self.min_request_interval - time_since_last
68
- print(f"⏰ Waiting {sleep_time:.1f}s to respect rate limits...")
69
- time.sleep(sleep_time)
70
-
71
- self.last_request_time = time.time()
72
-
73
- def _call_openai_safe(self, api_key: str, prompt: str, model: str = "gpt-3.5-turbo", max_tokens: int = 2000, max_retries: int = 2) -> str:
74
- """Safe OpenAI API call with proper rate limiting and error handling"""
75
 
76
  if not api_key or not api_key.strip():
77
  return "πŸ”‘ Please enter your OpenAI API key above to generate content."
78
 
79
  clean_key = api_key.strip()
80
 
81
- # Wait to respect rate limits
82
- self._wait_for_rate_limit()
83
-
84
  url = "https://api.openai.com/v1/chat/completions"
85
  headers = {
86
  "Authorization": f"Bearer {clean_key}",
@@ -100,112 +92,50 @@ Use this template to create individual glossary pages for specific terms. Fill i
100
  "presence_penalty": 0
101
  }
102
 
103
- for attempt in range(max_retries):
104
- try:
105
- print(f"πŸš€ Making API call to {model} (attempt {attempt + 1}/{max_retries})")
106
-
107
- response = requests.post(url, headers=headers, json=data, timeout=60)
108
- print(f"πŸ“Š Response status: {response.status_code}")
 
 
 
 
 
 
 
 
 
109
 
110
- if response.status_code == 200:
111
- result = response.json()
112
- content = result['choices'][0]['message']['content'].strip()
113
- print("βœ… API call successful")
114
- return content
115
 
116
- elif response.status_code == 401:
117
  return "❌ Invalid API key. Please check your OpenAI API key."
118
-
119
  elif response.status_code == 429:
120
- # Rate limit hit - get retry time from headers
121
- try:
122
- error_data = response.json()
123
- error_msg = error_data.get('error', {}).get('message', 'Rate limit exceeded')
124
- except:
125
- error_msg = "Rate limit exceeded"
126
-
127
- print(f"⏳ Rate limit hit: {error_msg}")
128
-
129
- # Get retry-after from headers or use exponential backoff
130
- retry_after = response.headers.get('retry-after')
131
- if retry_after:
132
- wait_time = min(int(retry_after), 60) # Cap at 60 seconds
133
- else:
134
- wait_time = min((2 ** attempt) * 3, 30) # Exponential backoff, capped at 30s
135
-
136
- if attempt < max_retries - 1:
137
- print(f"⏰ Waiting {wait_time} seconds before retry...")
138
- time.sleep(wait_time)
139
- continue
140
- else:
141
- return f"❌ Rate limit exceeded. Please wait a few minutes and try again. (Try using a different model or reducing request frequency)"
142
-
143
- elif response.status_code == 404:
144
- return f"❌ Model '{model}' not available. Try using 'gpt-3.5-turbo' or 'gpt-4o-mini' instead."
145
-
146
- elif response.status_code == 403:
147
- try:
148
- error_data = response.json()
149
- error_msg = error_data.get('error', {}).get('message', 'Access denied')
150
- if "insufficient_quota" in error_msg.lower():
151
- return "❌ OpenAI API quota exceeded. Please check your billing at https://platform.openai.com/usage"
152
- else:
153
- return f"❌ Access denied: {error_msg}"
154
- except:
155
- return "❌ Access denied. Check your API key permissions."
156
-
157
- else:
158
- # Other errors
159
- try:
160
- error_data = response.json()
161
- error_msg = error_data.get('error', {}).get('message', f'HTTP {response.status_code}')
162
- except:
163
- error_msg = f"HTTP {response.status_code}"
164
-
165
- print(f"❌ API Error: {error_msg}")
166
-
167
- if attempt < max_retries - 1:
168
- print(f"⏰ Waiting 3 seconds before retry...")
169
- time.sleep(3)
170
- continue
171
- else:
172
- return f"❌ API Error: {error_msg}"
173
-
174
- except requests.exceptions.Timeout:
175
- print(f"⏰ Request timeout on attempt {attempt + 1}")
176
- if attempt < max_retries - 1:
177
- time.sleep(5)
178
- continue
179
  else:
180
- return "❌ Request timeout. Please try again."
181
 
182
- except requests.exceptions.ConnectionError:
183
- print(f"🌐 Connection error on attempt {attempt + 1}")
184
- if attempt < max_retries - 1:
185
- time.sleep(3)
186
- continue
187
- else:
188
- return "❌ Connection error. Please check your internet connection."
189
-
190
- except Exception as e:
191
- print(f"❌ Unexpected error on attempt {attempt + 1}: {str(e)}")
192
- if attempt < max_retries - 1:
193
- time.sleep(2)
194
- continue
195
- else:
196
- return f"❌ Unexpected error: {str(e)}"
197
-
198
- return "❌ All retry attempts failed. Please try again later."
199
 
200
- def generate_new_content(self, api_key: str, term: str, context: str = "", target_audience: str = "general", preferred_model: str = "gpt-3.5-turbo") -> str:
201
  """Generate new glossary content for a given term"""
202
 
203
  if not term.strip():
204
  return "Please enter a term to generate content for."
205
 
206
- # Choose appropriate max_tokens based on model
207
- max_tokens = 2500 if "gpt-4" in preferred_model else 2000
208
-
209
  prompt = f"""
210
  Create a comprehensive glossary entry for the term "{term}" following this EXACT template structure:
211
  {self.template}
@@ -229,17 +159,14 @@ Create a comprehensive glossary entry for the term "{term}" following this EXACT
229
  Generate the complete glossary entry now:
230
  """
231
 
232
- return self._call_openai_safe(api_key, prompt, preferred_model, max_tokens)
233
 
234
- def update_existing_content(self, api_key: str, term: str, existing_content: str, update_instructions: str = "", preferred_model: str = "gpt-3.5-turbo") -> Tuple[str, str]:
235
  """Analyze existing content and provide update recommendations"""
236
 
237
  if not term.strip() or not existing_content.strip():
238
  return "Please provide both term and existing content.", ""
239
 
240
- # Choose appropriate max_tokens based on model
241
- max_tokens = 1500 if "gpt-4" in preferred_model else 1200
242
-
243
  # First, analyze the content
244
  analysis_prompt = f"""
245
  Analyze this existing glossary content for "{term}" against the template standard and provide specific improvement recommendations.
@@ -274,14 +201,7 @@ Analyze this existing glossary content for "{term}" against the template standar
274
  Format as a professional content analysis report.
275
  """
276
 
277
- recommendations = self._call_openai_safe(api_key, analysis_prompt, preferred_model, max_tokens)
278
-
279
- if recommendations.startswith("❌"):
280
- return recommendations, ""
281
-
282
- # Add extra wait time between the two requests
283
- print("⏰ Waiting between analysis and content generation...")
284
- time.sleep(3)
285
 
286
  # Then generate updated content
287
  update_prompt = f"""
@@ -304,19 +224,16 @@ Create an improved version of the glossary entry for "{term}" based on the analy
304
  Generate the complete, improved glossary entry:
305
  """
306
 
307
- updated_content = self._call_openai_safe(api_key, update_prompt, preferred_model, max_tokens + 500)
308
 
309
  return recommendations, updated_content
310
 
311
- def create_outline_brief(self, api_key: str, topic: str, scope: str = "comprehensive", preferred_model: str = "gpt-3.5-turbo") -> str:
312
  """Create an outline or brief for new glossary content"""
313
 
314
  if not topic.strip():
315
  return "Please enter a topic for the outline."
316
 
317
- # Choose appropriate max_tokens based on model
318
- max_tokens = 3000 if "gpt-4" in preferred_model else 2500
319
-
320
  prompt = f"""
321
  Create a comprehensive content brief for developing a glossary focused on "{topic}".
322
  **Scope:** {scope}
@@ -363,7 +280,7 @@ Create a comprehensive content brief for developing a glossary focused on "{topi
363
  Create a comprehensive, actionable brief that will guide the entire glossary development process.
364
  """
365
 
366
- return self._call_openai_safe(api_key, prompt, preferred_model, max_tokens)
367
 
368
  def create_gradio_interface():
369
  """Create the Gradio interface for the glossary generator"""
@@ -383,37 +300,38 @@ def create_gradio_interface():
383
  if len(clean_key) < 20:
384
  return "❌ API key too short - check if you copied the full key"
385
 
386
- # Test the key
387
- if generator._test_api_key(clean_key):
388
- return "βœ… API key is valid and connected!"
 
 
389
  else:
390
- return "❌ API key test failed - check your key and try again"
391
 
392
- def generate_new_wrapper(api_key, term, context, audience, model):
393
  if not term.strip():
394
  return "Please enter a term to generate content for."
395
- return generator.generate_new_content(api_key, term, context, audience, model)
396
 
397
- def update_existing_wrapper(api_key, term, existing_content, update_instructions, model):
398
  if not term.strip() or not existing_content.strip():
399
  return "Please provide both term and existing content.", ""
400
- recommendations, updated_content = generator.update_existing_content(api_key, term, existing_content, update_instructions, model)
401
  return recommendations, updated_content
402
 
403
- def create_outline_wrapper(api_key, topic, scope, model):
404
  if not topic.strip():
405
  return "Please enter a topic for the outline."
406
- return generator.create_outline_brief(api_key, topic, scope, model)
407
 
408
  # Create the Gradio interface
409
  with gr.Blocks(title="Glossary Content Generator", theme=gr.themes.Soft()) as demo:
410
  gr.Markdown("""
411
- # πŸ“š Glossary Content Generator (Rate-Limit Safe)
412
 
413
- **Powered by OpenAI** - Professional glossary content creation and optimization tool.
414
 
415
  πŸ”‘ **Enter your OpenAI API key below to get started!**
416
- βœ… **Now with improved rate limiting and error handling**
417
  """)
418
 
419
  # API Key Section
@@ -458,7 +376,7 @@ def create_gradio_interface():
458
  with gr.Tabs():
459
  # Tab 1: Generate New Content
460
  with gr.TabItem("πŸ†• Generate New Content"):
461
- gr.Markdown("### Create a new glossary entry from scratch")
462
 
463
  with gr.Row():
464
  with gr.Column(scale=1):
@@ -477,12 +395,6 @@ def create_gradio_interface():
477
  choices=["general", "technical", "business", "beginner", "expert"],
478
  value="general"
479
  )
480
- new_model = gr.Dropdown(
481
- label="AI Model",
482
- choices=["gpt-3.5-turbo", "gpt-4o-mini", "gpt-4", "gpt-4-turbo"],
483
- value="gpt-3.5-turbo",
484
- info="gpt-3.5-turbo is recommended for speed and cost"
485
- )
486
  generate_btn = gr.Button("πŸš€ Generate Content", variant="primary", size="lg")
487
 
488
  with gr.Column(scale=2):
@@ -495,7 +407,7 @@ def create_gradio_interface():
495
 
496
  generate_btn.click(
497
  generate_new_wrapper,
498
- inputs=[api_key_input, new_term, new_context, new_audience, new_model],
499
  outputs=[new_output]
500
  )
501
 
@@ -504,7 +416,7 @@ def create_gradio_interface():
504
 
505
  # Tab 2: Update Existing Content
506
  with gr.TabItem("πŸ”„ Update Existing Content"):
507
- gr.Markdown("### Analyze and improve existing glossary entries")
508
 
509
  with gr.Row():
510
  with gr.Column(scale=1):
@@ -523,12 +435,6 @@ def create_gradio_interface():
523
  placeholder="e.g., 'Add more technical details', 'Include recent developments', 'Improve SEO focus'",
524
  lines=3
525
  )
526
- update_model = gr.Dropdown(
527
- label="AI Model",
528
- choices=["gpt-3.5-turbo", "gpt-4o-mini", "gpt-4", "gpt-4-turbo"],
529
- value="gpt-3.5-turbo",
530
- info="Note: This makes 2 API calls (analysis + update)"
531
- )
532
  update_btn = gr.Button("πŸ” Analyze & Update", variant="primary", size="lg")
533
 
534
  with gr.Column(scale=2):
@@ -549,7 +455,7 @@ def create_gradio_interface():
549
 
550
  update_btn.click(
551
  update_existing_wrapper,
552
- inputs=[api_key_input, update_term, existing_content, update_instructions, update_model],
553
  outputs=[recommendations_output, updated_content_output]
554
  )
555
 
@@ -569,12 +475,6 @@ def create_gradio_interface():
569
  choices=["comprehensive", "focused", "basic", "advanced", "specialized"],
570
  value="comprehensive"
571
  )
572
- outline_model = gr.Dropdown(
573
- label="AI Model",
574
- choices=["gpt-3.5-turbo", "gpt-4o-mini", "gpt-4", "gpt-4-turbo"],
575
- value="gpt-3.5-turbo",
576
- info="Larger briefs may benefit from GPT-4"
577
- )
578
  outline_btn = gr.Button("πŸ“‹ Create Strategic Brief", variant="primary", size="lg")
579
 
580
  with gr.Column(scale=2):
@@ -587,7 +487,7 @@ def create_gradio_interface():
587
 
588
  outline_btn.click(
589
  create_outline_wrapper,
590
- inputs=[api_key_input, outline_topic, outline_scope, outline_model],
591
  outputs=[outline_output]
592
  )
593
 
@@ -616,22 +516,19 @@ def create_gradio_interface():
616
  5. **Copy and paste** it into the field above
617
 
618
  ## πŸ’° **Cost Information:**
619
- - **GPT-3.5-turbo**: ~$0.0015 per 1K input tokens, ~$0.002 per 1K output tokens (Recommended)
620
- - **GPT-4**: ~$0.03 per 1K input tokens, ~$0.06 per 1K output tokens
621
- - **Per generation**: $0.003-0.10 depending on model and content length
622
-
623
- ## ✨ **New Features & Fixes:**
624
- - πŸ›‘οΈ **Rate Limit Protection**: 2-second delays between requests
625
- - πŸ”„ **Smart Retry Logic**: Exponential backoff on rate limits
626
- - 🚫 **No More Double Requests**: Removed automatic model fallbacks
627
- - ⚑ **Model Selection**: Choose your preferred AI model
628
- - πŸ“Š **Better Error Messages**: Clear feedback on what went wrong
629
- - 🎯 **GPT-3.5 Default**: Faster, cheaper, and more reliable for most tasks
630
-
631
- ## πŸ“Š **Model Recommendations:**
632
- - **GPT-3.5-turbo**: Best for most glossary content (fast, cheap, high quality)
633
- - **GPT-4o-mini**: Good balance of quality and cost
634
- - **GPT-4**: Use for complex technical content or when highest quality needed
635
  """)
636
 
637
  return demo
 
2
  import os
3
  from typing import Dict, List, Tuple
4
  import time
 
 
5
 
6
  class GlossaryGenerator:
7
  def __init__(self):
 
38
  * **Content:**
39
  * List URLs, book titles, or other references.
40
  """
 
 
 
41
 
42
  def _test_api_key(self, api_key: str) -> bool:
43
  """Test OpenAI API key with minimal request"""
 
45
  return False
46
 
47
  try:
48
+ # Import here to avoid import-time issues
49
+ from openai import OpenAI
50
+
51
+ client = OpenAI(api_key=api_key.strip())
52
+
53
+ # Test with minimal request
54
+ response = client.chat.completions.create(
55
+ model="gpt-3.5-turbo",
56
+ messages=[{"role": "user", "content": "Hi"}],
57
+ max_tokens=1
58
+ )
59
+ return True
60
 
61
  except Exception as e:
62
  print(f"API test error: {e}")
63
  return False
64
 
65
+ def _call_openai_raw(self, api_key: str, prompt: str, model: str = "gpt-4", max_tokens: int = 2000) -> str:
66
+ """Direct OpenAI API call using requests"""
67
+ import requests
68
+ import json
 
 
 
 
 
 
 
 
 
 
69
 
70
  if not api_key or not api_key.strip():
71
  return "πŸ”‘ Please enter your OpenAI API key above to generate content."
72
 
73
  clean_key = api_key.strip()
74
 
75
+ # Direct API call to avoid client issues
 
 
76
  url = "https://api.openai.com/v1/chat/completions"
77
  headers = {
78
  "Authorization": f"Bearer {clean_key}",
 
92
  "presence_penalty": 0
93
  }
94
 
95
+ try:
96
+ print(f"πŸš€ Making direct API call to OpenAI...")
97
+ response = requests.post(url, headers=headers, json=data, timeout=60)
98
+ print(f"πŸ“Š Response status: {response.status_code}")
99
+
100
+ if response.status_code == 200:
101
+ result = response.json()
102
+ content = result['choices'][0]['message']['content'].strip()
103
+ print("βœ… API call successful")
104
+ return content
105
+
106
+ else:
107
+ error_data = response.json() if response.content else {}
108
+ error_msg = error_data.get('error', {}).get('message', 'Unknown error')
109
+ error_code = error_data.get('error', {}).get('code', 'unknown')
110
 
111
+ print(f"❌ API call failed: {response.status_code} - {error_msg}")
 
 
 
 
112
 
113
+ if response.status_code == 401:
114
  return "❌ Invalid API key. Please check your OpenAI API key."
 
115
  elif response.status_code == 429:
116
+ return "❌ Rate limit exceeded. Please wait a moment and try again."
117
+ elif response.status_code == 404 and model == "gpt-4":
118
+ # Try GPT-3.5 fallback
119
+ print("πŸ”„ Trying GPT-3.5-turbo fallback...")
120
+ return self._call_openai_raw(api_key, prompt, "gpt-3.5-turbo", max_tokens)
121
+ elif "insufficient_quota" in error_msg.lower():
122
+ return "❌ OpenAI API quota exceeded. Please check your billing at https://platform.openai.com/usage"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  else:
124
+ return f"❌ OpenAI API Error ({response.status_code}): {error_msg}"
125
 
126
+ except requests.exceptions.Timeout:
127
+ return "❌ Request timeout. Please try again."
128
+ except requests.exceptions.RequestException as e:
129
+ return f"❌ Network error: {str(e)}"
130
+ except Exception as e:
131
+ return f"❌ Unexpected error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
132
 
133
+ def generate_new_content(self, api_key: str, term: str, context: str = "", target_audience: str = "general") -> str:
134
  """Generate new glossary content for a given term"""
135
 
136
  if not term.strip():
137
  return "Please enter a term to generate content for."
138
 
 
 
 
139
  prompt = f"""
140
  Create a comprehensive glossary entry for the term "{term}" following this EXACT template structure:
141
  {self.template}
 
159
  Generate the complete glossary entry now:
160
  """
161
 
162
+ return self._call_openai_raw(api_key, prompt, max_tokens=2500)
163
 
164
+ def update_existing_content(self, api_key: str, term: str, existing_content: str, update_instructions: str = "") -> Tuple[str, str]:
165
  """Analyze existing content and provide update recommendations"""
166
 
167
  if not term.strip() or not existing_content.strip():
168
  return "Please provide both term and existing content.", ""
169
 
 
 
 
170
  # First, analyze the content
171
  analysis_prompt = f"""
172
  Analyze this existing glossary content for "{term}" against the template standard and provide specific improvement recommendations.
 
201
  Format as a professional content analysis report.
202
  """
203
 
204
+ recommendations = self._call_openai_raw(api_key, analysis_prompt, max_tokens=1500)
 
 
 
 
 
 
 
205
 
206
  # Then generate updated content
207
  update_prompt = f"""
 
224
  Generate the complete, improved glossary entry:
225
  """
226
 
227
+ updated_content = self._call_openai_raw(api_key, update_prompt, max_tokens=2500)
228
 
229
  return recommendations, updated_content
230
 
231
+ def create_outline_brief(self, api_key: str, topic: str, scope: str = "comprehensive") -> str:
232
  """Create an outline or brief for new glossary content"""
233
 
234
  if not topic.strip():
235
  return "Please enter a topic for the outline."
236
 
 
 
 
237
  prompt = f"""
238
  Create a comprehensive content brief for developing a glossary focused on "{topic}".
239
  **Scope:** {scope}
 
280
  Create a comprehensive, actionable brief that will guide the entire glossary development process.
281
  """
282
 
283
+ return self._call_openai_raw(api_key, prompt, max_tokens=3000)
284
 
285
  def create_gradio_interface():
286
  """Create the Gradio interface for the glossary generator"""
 
300
  if len(clean_key) < 20:
301
  return "❌ API key too short - check if you copied the full key"
302
 
303
+ # Test the key with direct API call
304
+ test_result = generator._call_openai_raw(clean_key, "Test", "gpt-3.5-turbo", 1)
305
+
306
+ if "❌" in test_result:
307
+ return test_result
308
  else:
309
+ return "βœ… API key is valid and connected!"
310
 
311
+ def generate_new_wrapper(api_key, term, context, audience):
312
  if not term.strip():
313
  return "Please enter a term to generate content for."
314
+ return generator.generate_new_content(api_key, term, context, audience)
315
 
316
+ def update_existing_wrapper(api_key, term, existing_content, update_instructions):
317
  if not term.strip() or not existing_content.strip():
318
  return "Please provide both term and existing content.", ""
319
+ recommendations, updated_content = generator.update_existing_content(api_key, term, existing_content, update_instructions)
320
  return recommendations, updated_content
321
 
322
+ def create_outline_wrapper(api_key, topic, scope):
323
  if not topic.strip():
324
  return "Please enter a topic for the outline."
325
+ return generator.create_outline_brief(api_key, topic, scope)
326
 
327
  # Create the Gradio interface
328
  with gr.Blocks(title="Glossary Content Generator", theme=gr.themes.Soft()) as demo:
329
  gr.Markdown("""
330
+ # πŸ“š Glossary Content Generator
331
 
332
+ **Powered by OpenAI GPT-4** - Professional glossary content creation and optimization tool.
333
 
334
  πŸ”‘ **Enter your OpenAI API key below to get started!**
 
335
  """)
336
 
337
  # API Key Section
 
376
  with gr.Tabs():
377
  # Tab 1: Generate New Content
378
  with gr.TabItem("πŸ†• Generate New Content"):
379
+ gr.Markdown("### Create a new glossary entry from scratch using GPT-4")
380
 
381
  with gr.Row():
382
  with gr.Column(scale=1):
 
395
  choices=["general", "technical", "business", "beginner", "expert"],
396
  value="general"
397
  )
 
 
 
 
 
 
398
  generate_btn = gr.Button("πŸš€ Generate Content", variant="primary", size="lg")
399
 
400
  with gr.Column(scale=2):
 
407
 
408
  generate_btn.click(
409
  generate_new_wrapper,
410
+ inputs=[api_key_input, new_term, new_context, new_audience],
411
  outputs=[new_output]
412
  )
413
 
 
416
 
417
  # Tab 2: Update Existing Content
418
  with gr.TabItem("πŸ”„ Update Existing Content"):
419
+ gr.Markdown("### Analyze and improve existing glossary entries with AI-powered recommendations")
420
 
421
  with gr.Row():
422
  with gr.Column(scale=1):
 
435
  placeholder="e.g., 'Add more technical details', 'Include recent developments', 'Improve SEO focus'",
436
  lines=3
437
  )
 
 
 
 
 
 
438
  update_btn = gr.Button("πŸ” Analyze & Update", variant="primary", size="lg")
439
 
440
  with gr.Column(scale=2):
 
455
 
456
  update_btn.click(
457
  update_existing_wrapper,
458
+ inputs=[api_key_input, update_term, existing_content, update_instructions],
459
  outputs=[recommendations_output, updated_content_output]
460
  )
461
 
 
475
  choices=["comprehensive", "focused", "basic", "advanced", "specialized"],
476
  value="comprehensive"
477
  )
 
 
 
 
 
 
478
  outline_btn = gr.Button("πŸ“‹ Create Strategic Brief", variant="primary", size="lg")
479
 
480
  with gr.Column(scale=2):
 
487
 
488
  outline_btn.click(
489
  create_outline_wrapper,
490
+ inputs=[api_key_input, outline_topic, outline_scope],
491
  outputs=[outline_output]
492
  )
493
 
 
516
  5. **Copy and paste** it into the field above
517
 
518
  ## πŸ’° **Cost Information:**
519
+ - **GPT-4 pricing**: ~$0.03 per 1K input tokens, ~$0.06 per 1K output tokens
520
+ - **Per generation**: Approximately $0.02-0.10 depending on content length
521
+ - **Very cost-effective** for professional content creation
522
+
523
+ ## ✨ **Features:**
524
+ - πŸ€– **GPT-4 Powered**: High-quality, professional content generation
525
+ - πŸ“ **Template Consistency**: Follows your exact 6-section structure
526
+ - πŸ” **Content Analysis**: Detailed improvement recommendations
527
+ - πŸ“Š **Strategic Planning**: Comprehensive content briefs and roadmaps
528
+ - 🎯 **SEO Optimized**: Includes PAA questions and keyword strategies
529
+ - πŸ“‹ **Copy-Friendly**: Easy copy buttons for all outputs
530
+ - πŸ”‘ **No Setup Required**: Just enter your API key and start generating!
531
+ - πŸ› οΈ **Direct API**: Uses raw HTTP requests to avoid library conflicts
 
 
 
532
  """)
533
 
534
  return demo