Spaces:
Sleeping
Sleeping
File size: 7,559 Bytes
890b76a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 | import streamlit as st
import os
import google.generativeai as genai
from dotenv import load_dotenv
import json
# Load environment variables
load_dotenv()
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
# Configure Generative AI model
if GOOGLE_API_KEY:
genai.configure(api_key=GOOGLE_API_KEY)
model = genai.GenerativeModel('gemini-pro') # You can experiment with other available models
else:
st.error("Google AI Studio API key not found. Please add it to your .env file.")
st.stop()
st.title("Prompt Engineering Playground")
st.subheader("Experiment with Fundamental Prompting Techniques")
with st.sidebar:
st.header("Prompting Concepts")
st.markdown(
"""
This app demonstrates fundamental prompt engineering techniques based on the
Google Generative AI course.
"""
)
st.subheader("Key Techniques:")
st.markdown(
"""
- **Clear and Specific Instructions**: Providing explicit guidance to the model.
- **Using Delimiters**: Clearly separating different parts of the input text.
- **Asking for Structured Output**: Requesting output in a specific format (e.g., JSON).
- **Checking Assumptions**: Verifying if certain conditions are met.
- **Providing Examples (Few-Shot Prompting)**: Giving the model a few examples of the desired input-output behavior.
- **Temperature Control**: Adjusting the randomness of the model's output.
- **Chain-of-Thought Prompting**: Encouraging the model to show its reasoning process.
"""
)
st.subheader("Whitepaper Insights:")
st.markdown(
"""
- Understanding LLM capabilities and limitations.
- Importance of prompt clarity and specificity.
- Iterative prompt development and refinement.
- Context window awareness
"""
)
# --- Prompting Techniques Section ---
st.header("Experiment with Prompts")
prompt_technique = st.selectbox(
"Choose a Prompting Technique to Try:",
[
"Simple Instruction",
"Using Delimiters",
"Requesting JSON Output",
"Checking Assumptions",
"Providing Examples (Few-Shot)",
"Temperature Control",
"Chain-of-Thought Prompting"
],
index=0 # Start with "Simple Instruction"
)
prompt_input = st.text_area("Enter your prompt here:", height=150)
# Temperature slider (common to several techniques)
temperature = st.slider(
"Temperature:",
min_value=0.0,
max_value=1.0,
value=0.7, # Default temperature
step=0.01,
help="Controls the randomness of the output. Lower values are more deterministic; higher values are more creative.",
)
if st.button("Generate Response"):
if not prompt_input:
st.warning("Please enter a prompt.")
else:
with st.spinner("Generating..."):
try:
if prompt_technique == "Using Delimiters":
delimiter = st.text_input("Enter your delimiter (e.g., ###, ---):", "###")
processed_prompt = f"Here is the input, with parts separated by '{delimiter}':\n{prompt_input}\n Please process each part separately."
response = model.generate_content(
processed_prompt, generation_config=genai.types.GenerationConfig(temperature=temperature)
)
st.subheader("Generated Response:")
st.markdown(response.text)
elif prompt_technique == "Requesting JSON Output":
json_format = st.text_input(
"Describe the desired JSON format (e.g., {'name': str, 'age': int}):", "{'key1': type, 'key2': type}"
)
processed_prompt = f"Please provide the output in JSON format, following this structure: {json_format}. Here is the information: {prompt_input}"
response = model.generate_content(
processed_prompt, generation_config=genai.types.GenerationConfig(temperature=temperature)
)
try:
json_output = json.loads(response.text)
st.subheader("Generated JSON Output:")
st.json(json_output)
except json.JSONDecodeError:
st.error("Failed to decode JSON. Raw response:")
st.text(response.text)
elif prompt_technique == "Checking Assumptions":
assumption = st.text_input("State the assumption you want the model to check:", "The main subject is a person")
processed_prompt = f"First, check if the following assumption is true: '{assumption}'. Then, answer the prompt: {prompt_input}"
response = model.generate_content(
processed_prompt, generation_config=genai.types.GenerationConfig(temperature=temperature)
)
st.subheader("Generated Response:")
st.markdown(response.text)
elif prompt_technique == "Providing Examples (Few-Shot)":
example1_input = st.text_area("Example 1 Input:", height=50)
example1_output = st.text_area("Example 1 Output:", height=50)
example2_input = st.text_area("Example 2 Input (Optional):", height=50)
example2_output = st.text_area("Example 2 Output (Optional):", height=50)
processed_prompt = "Here are some examples:\n"
processed_prompt += f"Input: {example1_input}\nOutput: {example1_output}\n"
if example2_input and example2_output:
processed_prompt += f"Input: {example2_input}\nOutput: {example2_output}\n"
processed_prompt += f"\nNow, answer the following:\nInput: {prompt_input}"
response = model.generate_content(
processed_prompt, generation_config=genai.types.GenerationConfig(temperature=temperature)
)
st.subheader("Generated Response:")
st.markdown(response.text)
elif prompt_technique == "Temperature Control":
# The temperature slider is already handled, so we just pass it to the model
response = model.generate_content(
prompt_input, generation_config=genai.types.GenerationConfig(temperature=temperature)
)
st.subheader("Generated Response:")
st.markdown(response.text)
elif prompt_technique == "Chain-of-Thought Prompting":
cot_prompt = f"Let's think step by step. {prompt_input}"
response = model.generate_content(cot_prompt, generation_config=genai.types.GenerationConfig(temperature=temperature))
st.subheader("Generated Response (Chain-of-Thought):")
st.markdown(response.text)
else: # Simple Instruction
response = model.generate_content(
prompt_input, generation_config=genai.types.GenerationConfig(temperature=temperature)
)
st.subheader("Generated Response:")
st.markdown(response.text)
except Exception as e:
st.error(f"An error occurred: {e}")
|