Dua Rajper commited on
Commit
890b76a
·
verified ·
1 Parent(s): 3c5817d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +161 -0
app.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import google.generativeai as genai
4
+ from dotenv import load_dotenv
5
+ import json
6
+
7
+ # Load environment variables
8
+ load_dotenv()
9
+ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
10
+
11
+ # Configure Generative AI model
12
+ if GOOGLE_API_KEY:
13
+ genai.configure(api_key=GOOGLE_API_KEY)
14
+ model = genai.GenerativeModel('gemini-pro') # You can experiment with other available models
15
+ else:
16
+ st.error("Google AI Studio API key not found. Please add it to your .env file.")
17
+ st.stop()
18
+
19
+ st.title("Prompt Engineering Playground")
20
+ st.subheader("Experiment with Fundamental Prompting Techniques")
21
+
22
+ with st.sidebar:
23
+ st.header("Prompting Concepts")
24
+ st.markdown(
25
+ """
26
+ This app demonstrates fundamental prompt engineering techniques based on the
27
+ Google Generative AI course.
28
+ """
29
+ )
30
+ st.subheader("Key Techniques:")
31
+ st.markdown(
32
+ """
33
+ - **Clear and Specific Instructions**: Providing explicit guidance to the model.
34
+ - **Using Delimiters**: Clearly separating different parts of the input text.
35
+ - **Asking for Structured Output**: Requesting output in a specific format (e.g., JSON).
36
+ - **Checking Assumptions**: Verifying if certain conditions are met.
37
+ - **Providing Examples (Few-Shot Prompting)**: Giving the model a few examples of the desired input-output behavior.
38
+ - **Temperature Control**: Adjusting the randomness of the model's output.
39
+ - **Chain-of-Thought Prompting**: Encouraging the model to show its reasoning process.
40
+ """
41
+ )
42
+ st.subheader("Whitepaper Insights:")
43
+ st.markdown(
44
+ """
45
+ - Understanding LLM capabilities and limitations.
46
+ - Importance of prompt clarity and specificity.
47
+ - Iterative prompt development and refinement.
48
+ - Context window awareness
49
+ """
50
+ )
51
+
52
+ # --- Prompting Techniques Section ---
53
+ st.header("Experiment with Prompts")
54
+
55
+ prompt_technique = st.selectbox(
56
+ "Choose a Prompting Technique to Try:",
57
+ [
58
+ "Simple Instruction",
59
+ "Using Delimiters",
60
+ "Requesting JSON Output",
61
+ "Checking Assumptions",
62
+ "Providing Examples (Few-Shot)",
63
+ "Temperature Control",
64
+ "Chain-of-Thought Prompting"
65
+ ],
66
+ index=0 # Start with "Simple Instruction"
67
+ )
68
+
69
+ prompt_input = st.text_area("Enter your prompt here:", height=150)
70
+
71
+ # Temperature slider (common to several techniques)
72
+ temperature = st.slider(
73
+ "Temperature:",
74
+ min_value=0.0,
75
+ max_value=1.0,
76
+ value=0.7, # Default temperature
77
+ step=0.01,
78
+ help="Controls the randomness of the output. Lower values are more deterministic; higher values are more creative.",
79
+ )
80
+
81
+ if st.button("Generate Response"):
82
+ if not prompt_input:
83
+ st.warning("Please enter a prompt.")
84
+ else:
85
+ with st.spinner("Generating..."):
86
+ try:
87
+ if prompt_technique == "Using Delimiters":
88
+ delimiter = st.text_input("Enter your delimiter (e.g., ###, ---):", "###")
89
+ processed_prompt = f"Here is the input, with parts separated by '{delimiter}':\n{prompt_input}\n Please process each part separately."
90
+ response = model.generate_content(
91
+ processed_prompt, generation_config=genai.types.GenerationConfig(temperature=temperature)
92
+ )
93
+ st.subheader("Generated Response:")
94
+ st.markdown(response.text)
95
+
96
+ elif prompt_technique == "Requesting JSON Output":
97
+ json_format = st.text_input(
98
+ "Describe the desired JSON format (e.g., {'name': str, 'age': int}):", "{'key1': type, 'key2': type}"
99
+ )
100
+ processed_prompt = f"Please provide the output in JSON format, following this structure: {json_format}. Here is the information: {prompt_input}"
101
+ response = model.generate_content(
102
+ processed_prompt, generation_config=genai.types.GenerationConfig(temperature=temperature)
103
+ )
104
+ try:
105
+ json_output = json.loads(response.text)
106
+ st.subheader("Generated JSON Output:")
107
+ st.json(json_output)
108
+ except json.JSONDecodeError:
109
+ st.error("Failed to decode JSON. Raw response:")
110
+ st.text(response.text)
111
+
112
+ elif prompt_technique == "Checking Assumptions":
113
+ assumption = st.text_input("State the assumption you want the model to check:", "The main subject is a person")
114
+ processed_prompt = f"First, check if the following assumption is true: '{assumption}'. Then, answer the prompt: {prompt_input}"
115
+ response = model.generate_content(
116
+ processed_prompt, generation_config=genai.types.GenerationConfig(temperature=temperature)
117
+ )
118
+ st.subheader("Generated Response:")
119
+ st.markdown(response.text)
120
+
121
+ elif prompt_technique == "Providing Examples (Few-Shot)":
122
+ example1_input = st.text_area("Example 1 Input:", height=50)
123
+ example1_output = st.text_area("Example 1 Output:", height=50)
124
+ example2_input = st.text_area("Example 2 Input (Optional):", height=50)
125
+ example2_output = st.text_area("Example 2 Output (Optional):", height=50)
126
+
127
+ processed_prompt = "Here are some examples:\n"
128
+ processed_prompt += f"Input: {example1_input}\nOutput: {example1_output}\n"
129
+ if example2_input and example2_output:
130
+ processed_prompt += f"Input: {example2_input}\nOutput: {example2_output}\n"
131
+ processed_prompt += f"\nNow, answer the following:\nInput: {prompt_input}"
132
+
133
+ response = model.generate_content(
134
+ processed_prompt, generation_config=genai.types.GenerationConfig(temperature=temperature)
135
+ )
136
+ st.subheader("Generated Response:")
137
+ st.markdown(response.text)
138
+
139
+ elif prompt_technique == "Temperature Control":
140
+ # The temperature slider is already handled, so we just pass it to the model
141
+ response = model.generate_content(
142
+ prompt_input, generation_config=genai.types.GenerationConfig(temperature=temperature)
143
+ )
144
+ st.subheader("Generated Response:")
145
+ st.markdown(response.text)
146
+
147
+ elif prompt_technique == "Chain-of-Thought Prompting":
148
+ cot_prompt = f"Let's think step by step. {prompt_input}"
149
+ response = model.generate_content(cot_prompt, generation_config=genai.types.GenerationConfig(temperature=temperature))
150
+ st.subheader("Generated Response (Chain-of-Thought):")
151
+ st.markdown(response.text)
152
+
153
+ else: # Simple Instruction
154
+ response = model.generate_content(
155
+ prompt_input, generation_config=genai.types.GenerationConfig(temperature=temperature)
156
+ )
157
+ st.subheader("Generated Response:")
158
+ st.markdown(response.text)
159
+
160
+ except Exception as e:
161
+ st.error(f"An error occurred: {e}")