Dua Rajper commited on
Commit
a9a24d6
·
verified ·
1 Parent(s): 41e1e8b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -88
app.py CHANGED
@@ -14,7 +14,6 @@ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
14
  # Configure Generative AI model
15
  if GOOGLE_API_KEY:
16
  genai.configure(api_key=GOOGLE_API_KEY)
17
- model = genai.GenerativeModel('gemini-pro') # Or specify your model
18
  else:
19
  st.error(
20
  "Google AI Studio API key not found. Please add it to your .env file. "
@@ -25,55 +24,13 @@ else:
25
  st.title("Prompt Engineering Playground")
26
  st.subheader("Experiment with Prompting Techniques")
27
 
28
- # Sidebar for explanations and resources
29
- with st.sidebar:
30
- st.header("Prompting Concepts")
31
- st.markdown(
32
- """
33
- This app demonstrates various prompt engineering techniques. Explore how different prompts
34
- affect the output of a large language model.
35
- """
36
- )
37
- st.subheader("Key Techniques:")
38
- st.markdown(
39
- """
40
- - **Clear Instructions**: Provide explicit and unambiguous directions.
41
- - **Delimiters**: Use special characters to separate input parts.
42
- - **Structured Output**: Request output in a specific format (JSON).
43
- - **Assumption Checking**: Verify conditions in the input.
44
- - **Few-Shot Prompting**: Provide input-output examples.
45
- - **Temperature Control**: Adjust output randomness.
46
- - **Chain of Thought (CoT)**: Elicit step-by-step reasoning.
47
- - **Prompt Templates**: Employ pre-defined prompt structures.
48
- - **System Prompt**: Influence the model's overall behavior.
49
- - **Retrieval Augmentation**: Provide external knowledge.
50
- """
51
- )
52
- st.subheader("Important Considerations:")
53
- st.markdown(
54
- """
55
- - **Context Window**: Be mindful of the maximum input length the model can handle.
56
- - **Tokenization**: Understand how text is broken down into tokens.
57
- - **Bias and Safety**: Be aware of potential biases in the model's output and take steps to mitigate them.
58
- - **Rate Limits**: The API has usage limits. The app includes basic handling, but monitor your usage.
59
- """
60
- )
61
- st.subheader("Resources:")
62
- st.markdown(
63
- """
64
- - [Google Generative AI Course](https://developers.google.com/learn/generative-ai)
65
- - [Prompt Engineering Guide](https://www.promptingguide.ai/)
66
- - [Google AI Platform](https://cloud.google.com/ai-platform)
67
- """
68
- )
69
-
70
  # --- Helper Functions ---
71
  def code_block(text: str, language: str = "text") -> None:
72
  """Displays text as a formatted code block in Streamlit."""
73
  st.markdown(f"```{language}\n{text}\n```", unsafe_allow_html=True)
74
 
75
 
76
- def display_response(response: Any) -> None: # Removed the type hint GenerateContentResponse
77
  """Displays the model's response, handling text, and error cases."""
78
  if response.text:
79
  st.subheader("Generated Response:")
@@ -85,12 +42,13 @@ def display_response(response: Any) -> None: # Removed the type hint GenerateCon
85
  st.error(f"Full response object: {response}") # Print the full response for debugging
86
 
87
 
88
- def generate_with_retry(prompt: str, generation_config: genai.types.GenerationConfig, max_retries: int = 3, delay: int = 5) -> Any: # Removed the type hint
89
  """
90
- Generates content with retry logic to handle potential API errors (e.g., rate limits).
91
 
92
  Args:
93
  prompt: The prompt string.
 
94
  generation_config: The generation configuration.
95
  max_retries: Maximum number of retries.
96
  delay: Delay in seconds between retries.
@@ -99,20 +57,58 @@ def generate_with_retry(prompt: str, generation_config: genai.types.GenerationCo
99
  The generated response.
100
 
101
  Raises:
102
- Exception: If the generation fails after maximum retries.
103
  """
104
  for i in range(max_retries):
105
  try:
 
106
  response = model.generate_content(prompt, generation_config=generation_config)
107
- return response # Return the response if successful
108
  except Exception as e:
109
- st.warning(f"Error during generation (attempt {i + 1}/{max_retries}): {e}")
110
- if i < max_retries - 1:
 
 
 
 
 
 
111
  st.info(f"Retrying in {delay} seconds...")
112
- time.sleep(delay) # Use time.sleep for retrying
113
  else:
114
  raise # Re-raise the exception after the last retry
115
- raise Exception("Failed to generate content after maximum retries") #Should never reach here.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
  # --- Prompting Techniques Section ---
118
  st.header("Experiment with Prompts")
@@ -129,14 +125,14 @@ prompt_technique = st.selectbox(
129
  "Chain of Thought (CoT)",
130
  "Prompt Templates",
131
  "System Prompt",
132
- "Retrieval Augmentation" # Added Retrieval Augmentation
133
  ],
134
  index=0,
135
  )
136
 
137
  prompt_input = st.text_area("Enter your prompt here:", height=150)
138
 
139
- # Temperature slider (common to several techniques)
140
  temperature = st.slider(
141
  "Temperature:",
142
  min_value=0.0,
@@ -151,14 +147,15 @@ if st.button("Generate Response"):
151
  st.warning("Please enter a prompt.")
152
  else:
153
  with st.spinner("Generating..."):
154
- generation_config = genai.types.GenerationConfig(temperature=temperature) # Create it once
155
 
156
  try:
157
  if prompt_technique == "Using Delimiters":
158
  delimiter = st.text_input("Enter your delimiter (e.g., ###, ---):", "###")
159
  processed_prompt = f"Here is the input, with parts separated by '{delimiter}':\n{prompt_input}\n Please process each part separately."
160
- response = generate_with_retry(processed_prompt, generation_config)
161
- display_response(response)
 
162
 
163
  elif prompt_technique == "Structured Output (JSON)":
164
  json_format = st.text_input(
@@ -166,22 +163,24 @@ if st.button("Generate Response"):
166
  "{'key1': type, 'key2': type}",
167
  )
168
  processed_prompt = f"Please provide the output in JSON format, following this structure: {json_format}. Here is the information: {prompt_input}"
169
- response = generate_with_retry(processed_prompt, generation_config)
170
- try:
171
- json_output = json.loads(response.text)
172
- st.subheader("Generated JSON Output:")
173
- st.json(json_output)
174
- except json.JSONDecodeError:
175
- st.error("Failed to decode JSON. Raw response:")
176
- code_block(response.text, "json")
 
177
 
178
  elif prompt_technique == "Checking Assumptions":
179
  assumption = st.text_input(
180
  "State the assumption you want the model to check:", "The text is about a historical event."
181
  )
182
  processed_prompt = f"First, check if the following assumption is true: '{assumption}'. Then, answer the prompt: {prompt_input}"
183
- response = generate_with_retry(processed_prompt, generation_config)
184
- display_response(response)
 
185
 
186
  elif prompt_technique == "Few-Shot Prompting":
187
  example1_input = st.text_area("Example 1 Input:", height=50)
@@ -195,18 +194,20 @@ if st.button("Generate Response"):
195
  processed_prompt += f"Input: {example2_input}\nOutput: {example2_output}\n"
196
  processed_prompt += f"\nNow, answer the following:\nInput: {prompt_input}"
197
 
198
- response = generate_with_retry(processed_prompt, generation_config)
199
- display_response(response)
 
200
 
201
  elif prompt_technique == "Temperature Control":
202
- # The temperature slider is already handled
203
- response = generate_with_retry(prompt_input, generation_config)
204
- display_response(response)
205
 
206
  elif prompt_technique == "Chain of Thought (CoT)":
207
  cot_prompt = f"Let's think step by step. {prompt_input}"
208
- response = generate_with_retry(cot_prompt, generation_config)
209
- display_response(response)
 
210
 
211
  elif prompt_technique == "Prompt Templates":
212
  template_name = st.selectbox(
@@ -224,35 +225,43 @@ if st.button("Generate Response"):
224
  else:
225
  processed_prompt = prompt_input
226
 
227
- response = generate_with_retry(processed_prompt, generation_config)
228
- display_response(response)
 
229
 
230
  elif prompt_technique == "System Prompt":
231
  system_prompt_text = st.text_area(
232
  "Enter system prompt:", "You are a helpful and informative assistant.", height=100
233
  )
234
- user_prompt = f"{prompt_input}" # User's input
235
 
236
  response = generate_with_retry(
237
  contents=[
238
- genai.Content(role="system", parts=[genai.Part(text=system_prompt_text)]), #send system prompt
239
- genai.Content(role="user", parts=[genai.Part(text=user_prompt)]), # send user prompt
240
  ],
 
241
  generation_config=generation_config,
242
  )
243
- display_response(response)
244
-
 
245
  elif prompt_technique == "Retrieval Augmentation":
246
- context_text = st.text_area("Enter context text (knowledge base):",
247
- "This is the context the model can use to answer the question.",
248
- height=150)
 
 
249
  processed_prompt = f"Given the following context: \n\n {context_text} \n\n Answer the following question: {prompt_input}"
250
- response = generate_with_retry(processed_prompt, generation_config)
251
- display_response(response)
 
252
 
253
  else: # Simple Instruction
254
- response = generate_with_retry(prompt_input, generation_config)
255
- display_response(response)
 
256
 
257
  except Exception as e:
258
  st.error(f"An error occurred: {e}")
 
 
14
  # Configure Generative AI model
15
  if GOOGLE_API_KEY:
16
  genai.configure(api_key=GOOGLE_API_KEY)
 
17
  else:
18
  st.error(
19
  "Google AI Studio API key not found. Please add it to your .env file. "
 
24
  st.title("Prompt Engineering Playground")
25
  st.subheader("Experiment with Prompting Techniques")
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  # --- Helper Functions ---
28
  def code_block(text: str, language: str = "text") -> None:
29
  """Displays text as a formatted code block in Streamlit."""
30
  st.markdown(f"```{language}\n{text}\n```", unsafe_allow_html=True)
31
 
32
 
33
+ def display_response(response: Any) -> None:
34
  """Displays the model's response, handling text, and error cases."""
35
  if response.text:
36
  st.subheader("Generated Response:")
 
42
  st.error(f"Full response object: {response}") # Print the full response for debugging
43
 
44
 
45
+ def generate_with_retry(prompt: str, model_name: str, generation_config: genai.types.GenerationConfig, max_retries: int = 3, delay: int = 5) -> Any:
46
  """
47
+ Generates content with retry logic to handle potential API errors (e.g., rate limits, model not found).
48
 
49
  Args:
50
  prompt: The prompt string.
51
+ model_name: The name of the model to use.
52
  generation_config: The generation configuration.
53
  max_retries: Maximum number of retries.
54
  delay: Delay in seconds between retries.
 
57
  The generated response.
58
 
59
  Raises:
60
+ Exception: If the generation fails after maximum retries or a critical error occurs.
61
  """
62
  for i in range(max_retries):
63
  try:
64
+ model = genai.GenerativeModel(model_name) # Use the selected model name
65
  response = model.generate_content(prompt, generation_config=generation_config)
66
+ return response
67
  except Exception as e:
68
+ error_message = str(e)
69
+ st.warning(f"Error during generation (attempt {i + 1}/{max_retries}): {error_message}")
70
+ if "404" in error_message and "not found" in error_message:
71
+ st.error(
72
+ f"Model '{model_name}' is not available or not supported. Please select a different model."
73
+ )
74
+ return None # Return None to indicate failure. The calling code must handle this.
75
+ elif i < max_retries - 1:
76
  st.info(f"Retrying in {delay} seconds...")
77
+ time.sleep(delay)
78
  else:
79
  raise # Re-raise the exception after the last retry
80
+ raise Exception("Failed to generate content after maximum retries")
81
+
82
+
83
+
84
+ # --- Model Selection and Initialization ---
85
+ available_models = []
86
+ try:
87
+ available_models = genai.list_models() # Get the list of available models
88
+ except Exception as e:
89
+ st.error(f"Error listing models: {e}. Please check your API key and network connection.")
90
+ st.stop()
91
+
92
+ model_names = [
93
+ model.name for model in available_models if "generateContent" in model.supported_generation_methods
94
+ ] #get models supporting generateContent
95
+
96
+ if not model_names:
97
+ st.error(
98
+ "No models supporting 'generateContent' found. This application requires a model that supports this method."
99
+ )
100
+ st.stop()
101
+
102
+ default_model = "gemini-pro" if "gemini-pro" in model_names else model_names[0] #select default model
103
+
104
+ selected_model = st.selectbox("Select a Model:", model_names, index=model_names.index(default_model)) # Let user choose
105
+
106
+ # Re-initialize the model with the selected name. This is done *outside* the generate_with_retry loop.
107
+ try:
108
+ model = genai.GenerativeModel(selected_model)
109
+ except Exception as e:
110
+ st.error(f"Error initializing model {selected_model}: {e}")
111
+ st.stop()
112
 
113
  # --- Prompting Techniques Section ---
114
  st.header("Experiment with Prompts")
 
125
  "Chain of Thought (CoT)",
126
  "Prompt Templates",
127
  "System Prompt",
128
+ "Retrieval Augmentation"
129
  ],
130
  index=0,
131
  )
132
 
133
  prompt_input = st.text_area("Enter your prompt here:", height=150)
134
 
135
+ # Temperature slider
136
  temperature = st.slider(
137
  "Temperature:",
138
  min_value=0.0,
 
147
  st.warning("Please enter a prompt.")
148
  else:
149
  with st.spinner("Generating..."):
150
+ generation_config = genai.types.GenerationConfig(temperature=temperature)
151
 
152
  try:
153
  if prompt_technique == "Using Delimiters":
154
  delimiter = st.text_input("Enter your delimiter (e.g., ###, ---):", "###")
155
  processed_prompt = f"Here is the input, with parts separated by '{delimiter}':\n{prompt_input}\n Please process each part separately."
156
+ response = generate_with_retry(processed_prompt, selected_model, generation_config)
157
+ if response:
158
+ display_response(response)
159
 
160
  elif prompt_technique == "Structured Output (JSON)":
161
  json_format = st.text_input(
 
163
  "{'key1': type, 'key2': type}",
164
  )
165
  processed_prompt = f"Please provide the output in JSON format, following this structure: {json_format}. Here is the information: {prompt_input}"
166
+ response = generate_with_retry(processed_prompt, selected_model, generation_config)
167
+ if response:
168
+ try:
169
+ json_output = json.loads(response.text)
170
+ st.subheader("Generated JSON Output:")
171
+ st.json(json_output)
172
+ except json.JSONDecodeError:
173
+ st.error("Failed to decode JSON. Raw response:")
174
+ code_block(response.text, "json")
175
 
176
  elif prompt_technique == "Checking Assumptions":
177
  assumption = st.text_input(
178
  "State the assumption you want the model to check:", "The text is about a historical event."
179
  )
180
  processed_prompt = f"First, check if the following assumption is true: '{assumption}'. Then, answer the prompt: {prompt_input}"
181
+ response = generate_with_retry(processed_prompt, selected_model, generation_config)
182
+ if response:
183
+ display_response(response)
184
 
185
  elif prompt_technique == "Few-Shot Prompting":
186
  example1_input = st.text_area("Example 1 Input:", height=50)
 
194
  processed_prompt += f"Input: {example2_input}\nOutput: {example2_output}\n"
195
  processed_prompt += f"\nNow, answer the following:\nInput: {prompt_input}"
196
 
197
+ response = generate_with_retry(processed_prompt, selected_model, generation_config)
198
+ if response:
199
+ display_response(response)
200
 
201
  elif prompt_technique == "Temperature Control":
202
+ response = generate_with_retry(prompt_input, selected_model, generation_config)
203
+ if response:
204
+ display_response(response)
205
 
206
  elif prompt_technique == "Chain of Thought (CoT)":
207
  cot_prompt = f"Let's think step by step. {prompt_input}"
208
+ response = generate_with_retry(cot_prompt, selected_model, generation_config)
209
+ if response:
210
+ display_response(response)
211
 
212
  elif prompt_technique == "Prompt Templates":
213
  template_name = st.selectbox(
 
225
  else:
226
  processed_prompt = prompt_input
227
 
228
+ response = generate_with_retry(processed_prompt, selected_model, generation_config)
229
+ if response:
230
+ display_response(response)
231
 
232
  elif prompt_technique == "System Prompt":
233
  system_prompt_text = st.text_area(
234
  "Enter system prompt:", "You are a helpful and informative assistant.", height=100
235
  )
236
+ user_prompt = f"{prompt_input}"
237
 
238
  response = generate_with_retry(
239
  contents=[
240
+ genai.Content(role="system", parts=[genai.Part(text=system_prompt_text)]),
241
+ genai.Content(role="user", parts=[genai.Part(text=user_prompt)]),
242
  ],
243
+ model=selected_model, # Pass the model name here as well
244
  generation_config=generation_config,
245
  )
246
+ if response:
247
+ display_response(response)
248
+
249
  elif prompt_technique == "Retrieval Augmentation":
250
+ context_text = st.text_area(
251
+ "Enter context text (knowledge base):",
252
+ "This is the context the model can use to answer the question.",
253
+ height=150,
254
+ )
255
  processed_prompt = f"Given the following context: \n\n {context_text} \n\n Answer the following question: {prompt_input}"
256
+ response = generate_with_retry(processed_prompt, selected_model, generation_config)
257
+ if response:
258
+ display_response(response)
259
 
260
  else: # Simple Instruction
261
+ response = generate_with_retry(prompt_input, selected_model, generation_config)
262
+ if response:
263
+ display_response(response)
264
 
265
  except Exception as e:
266
  st.error(f"An error occurred: {e}")
267
+