notes73 commited on
Commit
bf97a64
Β·
1 Parent(s): d2696e6

Updated AI Content Optimizer with improvements

Browse files
Files changed (2) hide show
  1. app.py +70 -89
  2. requirements.txt +7 -0
app.py CHANGED
@@ -4,123 +4,104 @@ import pandas as pd
4
  import textstat
5
  import os
6
  import asyncio
 
7
 
8
  # Initialize OpenAI client
9
- api_key = os.getenv("OPENAI_API_KEY")
10
 
11
- if not api_key:
12
- st.error("API key is missing. Please set the OPENAI_API_KEY environment variable.")
13
- st.stop()
14
-
15
- client = openai.OpenAI(api_key=api_key)
16
-
17
- # Function to fetch available OpenAI models (filtering for GPT models only)
18
  def get_models():
19
  try:
20
- all_models = client.models.list()
21
- text_models = [m.id for m in all_models.data if "gpt" in m.id] # Filter only GPT models
22
- return text_models or ["gpt-4"] # Default to GPT-4 if list is empty
23
  except Exception as e:
24
  st.error(f"Error fetching models: {e}")
25
- return ["gpt-4"] # Fallback model
26
 
27
- # Function to generate AI response
28
- def generate_response(prompt, model, tone):
29
- if model not in ["gpt-4", "gpt-3.5-turbo"]:
30
- model = "gpt-4" # Fallback to GPT-4 for unsupported models
 
31
 
32
- try:
33
- response = client.chat.completions.create(
34
- model=model,
35
- messages=[{"role": "system", "content": f"Rewrite this in {tone} style: {prompt}"}]
36
- )
37
- return response.choices[0].message.content.strip()
38
- except Exception as e:
39
- st.error(f"Error generating response: {e}")
40
- return ""
41
 
42
- # Function for batch processing with async calls
43
  async def process_bulk(prompts, model, tone):
44
- if model not in ["gpt-4", "gpt-3.5-turbo"]:
45
- model = "gpt-4" # Fallback to GPT-4 for unsupported models
46
-
47
  tasks = [
48
  client.chat.completions.acreate(
49
  model=model,
50
  messages=[{"role": "system", "content": f"Rewrite this in {tone} style: {p}"}]
51
  ) for p in prompts
52
  ]
53
- try:
54
- responses = await asyncio.gather(*tasks)
55
- return [response.choices[0].message.content.strip() for response in responses]
56
- except Exception as e:
57
- st.error(f"Error processing bulk prompts: {e}")
58
- return [""] * len(prompts)
59
 
60
  # UI Structure
61
- st.title("AI Prompt Optimizer")
62
- st.write("Enhance, analyze, and optimize your prompts with AI!")
63
 
64
  # Select AI Provider
65
- provider = st.selectbox("Choose AI Provider", ["OpenAI", "Anthropic", "Cohere"])
66
 
67
- # Fetch available models dynamically
68
- if provider == "OpenAI":
69
- available_models = get_models()
70
- model_choice = st.selectbox("Choose AI Model", available_models)
71
  else:
72
- model_choice = st.text_input("Enter model name for selected provider")
73
-
74
- # Card-like structure for input
75
- st.markdown("### **Prompt Customization**")
76
- with st.expander("πŸ”Ή Customize Your Prompt"):
77
- example_prompts = {
78
- "Marketing": "Write an email subject line for a new product launch.",
79
- "Storytelling": "Describe a futuristic city in a cyberpunk world.",
80
- "Technical": "Explain quantum computing in simple terms."
81
- }
82
- category = st.selectbox("Choose a Use Case", list(example_prompts.keys()))
83
- user_prompt = st.text_area("Enter your prompt:", value=example_prompts[category])
84
- custom_tone = st.text_input("Enter a custom tone/style (e.g., poetic, technical, humorous)")
85
- tone_choice = custom_tone if custom_tone else "Professional"
86
-
87
- # Evaluate readability before processing
88
- readability_score = textstat.flesch_reading_ease(user_prompt)
89
- st.write(f"**Original Readability Score:** {readability_score:.2f}")
90
-
91
- # Generate AI-enhanced prompt
92
- if st.button("πŸ”„ Optimize Prompt"):
93
- styled_prompt = generate_response(user_prompt, model_choice, tone_choice)
94
- ai_readability_score = textstat.flesch_reading_ease(styled_prompt)
95
- st.write(f"**Optimized Readability Score:** {ai_readability_score:.2f}")
96
- st.text_area("Optimized Prompt:", styled_prompt, height=150)
97
-
98
- # Store history in session state
99
  if "history" not in st.session_state:
100
  st.session_state["history"] = []
101
- st.session_state["history"].append({"Original": user_prompt, "Optimized": styled_prompt})
102
-
103
- # Upload CSV for batch processing
104
- st.markdown("### **Batch Processing (CSV Upload)**")
105
- with st.expander("πŸ“‚ Upload CSV for Bulk Optimization"):
106
- uploaded_file = st.file_uploader("Upload a CSV file with a column named 'Prompt'", type=["csv"])
107
- if uploaded_file:
108
- df = pd.read_csv(uploaded_file)
109
- if "Prompt" in df.columns:
110
- prompts = df["Prompt"].tolist()
111
- optimized_prompts = asyncio.run(process_bulk(prompts, model_choice, tone_choice))
112
- df["Optimized_Prompt"] = optimized_prompts
113
- st.write(df)
114
- st.download_button("Download Optimized CSV", df.to_csv(index=False).encode('utf-8'), "optimized_prompts.csv", "text/csv")
115
- else:
116
- st.error("CSV must contain a column named 'Prompt'")
117
-
118
- # Show prompt history
119
- st.markdown("### **History of Optimized Prompts**")
120
  if "history" in st.session_state and st.session_state["history"]:
121
  for entry in st.session_state["history"][::-1]:
122
  st.write(f"πŸ”Ή **Original:** {entry['Original']}")
123
  st.write(f"✨ **Optimized:** {entry['Optimized']}")
124
  st.markdown("---")
125
 
126
- st.success("πŸš€ AI Prompt Optimizer Ready!")
 
4
  import textstat
5
  import os
6
  import asyncio
7
+ from textblob import TextBlob
8
 
9
  # Initialize OpenAI client
10
+ client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
11
 
12
+ # Function to fetch available OpenAI models
 
 
 
 
 
 
13
  def get_models():
14
  try:
15
+ models = client.models.list()
16
+ return [model.id for model in models.data]
 
17
  except Exception as e:
18
  st.error(f"Error fetching models: {e}")
19
+ return []
20
 
21
+ # Function to analyze text
22
+ def analyze_text(text):
23
+ readability = textstat.flesch_reading_ease(text)
24
+ sentiment = TextBlob(text).sentiment.polarity
25
+ return readability, sentiment
26
 
27
+ # Function to generate AI-enhanced content
28
+ def generate_response(prompt, model, tone):
29
+ response = client.chat.completions.create(
30
+ model=model,
31
+ messages=[{"role": "system", "content": f"Rewrite this in {tone} style: {prompt}"}]
32
+ )
33
+ return response.choices[0].message.content.strip()
 
 
34
 
35
+ # Function for batch processing asynchronously
36
  async def process_bulk(prompts, model, tone):
 
 
 
37
  tasks = [
38
  client.chat.completions.acreate(
39
  model=model,
40
  messages=[{"role": "system", "content": f"Rewrite this in {tone} style: {p}"}]
41
  ) for p in prompts
42
  ]
43
+ responses = await asyncio.gather(*tasks)
44
+ return [response.choices[0].message.content.strip() for response in responses]
 
 
 
 
45
 
46
  # UI Structure
47
+ st.title("πŸš€ AI Content Optimizer")
48
+ st.write("Enhance, analyze, and optimize your content with AI!")
49
 
50
  # Select AI Provider
51
+ provider = st.selectbox("Choose AI Provider", ["OpenAI"])
52
 
53
+ # Fetch available models
54
+ display_models = get_models()
55
+ if display_models:
56
+ model_choice = st.selectbox("Choose AI Model", display_models)
57
  else:
58
+ model_choice = "gpt-3.5-turbo"
59
+
60
+ # Prompt Customization
61
+ st.markdown("### **Content Customization**")
62
+ user_prompt = st.text_area("Enter your content:")
63
+ tone_choice = st.selectbox("Choose a Writing Tone", ["Formal", "Casual", "Technical", "Poetic", "Persuasive"])
64
+
65
+ if user_prompt:
66
+ readability, sentiment = analyze_text(user_prompt)
67
+ st.write(f"**Original Readability Score:** {readability:.2f}")
68
+ st.write(f"**Sentiment Score:** {sentiment:.2f} (Positive: 1, Negative: -1)")
69
+
70
+ # Generate AI-enhanced content
71
+ if st.button("πŸ”„ Optimize Content"):
72
+ optimized_content = generate_response(user_prompt, model_choice, tone_choice)
73
+ optimized_readability, optimized_sentiment = analyze_text(optimized_content)
74
+
75
+ st.write("### ✨ Optimized Content")
76
+ st.text_area("Optimized Content:", optimized_content, height=150)
77
+
78
+ st.write(f"**Optimized Readability Score:** {optimized_readability:.2f}")
79
+ st.write(f"**Optimized Sentiment Score:** {optimized_sentiment:.2f}")
80
+
 
 
 
 
81
  if "history" not in st.session_state:
82
  st.session_state["history"] = []
83
+ st.session_state["history"].append({"Original": user_prompt, "Optimized": optimized_content})
84
+
85
+ # Batch Processing
86
+ st.markdown("### πŸ“‚ Bulk Optimization (CSV Upload)")
87
+ uploaded_file = st.file_uploader("Upload a CSV file with a column named 'Content'", type=["csv"])
88
+ if uploaded_file:
89
+ df = pd.read_csv(uploaded_file)
90
+ if "Content" in df.columns:
91
+ prompts = df["Content"].tolist()
92
+ optimized_prompts = asyncio.run(process_bulk(prompts, model_choice, tone_choice))
93
+ df["Optimized_Content"] = optimized_prompts
94
+ st.write(df)
95
+ st.download_button("Download Optimized CSV", df.to_csv(index=False).encode('utf-8'), "optimized_content.csv", "text/csv")
96
+ else:
97
+ st.error("CSV must contain a column named 'Content'")
98
+
99
+ # Show Optimization History
100
+ st.markdown("### πŸ”Ή Optimization History")
 
101
  if "history" in st.session_state and st.session_state["history"]:
102
  for entry in st.session_state["history"][::-1]:
103
  st.write(f"πŸ”Ή **Original:** {entry['Original']}")
104
  st.write(f"✨ **Optimized:** {entry['Optimized']}")
105
  st.markdown("---")
106
 
107
+ st.success("πŸš€ AI Content Optimizer Ready!")
requirements.txt CHANGED
@@ -3,3 +3,10 @@ openai
3
  pandas
4
  textstat
5
  asyncio
 
 
 
 
 
 
 
 
3
  pandas
4
  textstat
5
  asyncio
6
+ numpy
7
+ scikit-learn
8
+ transformers
9
+ nltk
10
+ tqdm
11
+ python-dotenv
12
+