ilsa15 commited on
Commit
65815f5
Β·
verified Β·
1 Parent(s): 8062995

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +213 -0
app.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ from langchain_groq import ChatGroq
4
+ from langchain.prompts import PromptTemplate
5
+ from langchain.schema import HumanMessage
6
+ from dotenv import load_dotenv
7
+
8
+ # Load environment variables
9
+ load_dotenv()
10
+
11
+ # Configure Streamlit page
12
+ st.set_page_config(
13
+ page_title="AI Research Assistant",
14
+ page_icon="πŸ€–",
15
+ layout="wide"
16
+ )
17
+
18
+ # App title and description
19
+ st.title("πŸ€– Agentic AI Research Assistant")
20
+ st.markdown("Enter a topic and get a structured research summary with key subtopics!")
21
+
22
+ # Sidebar for API key input
23
+ with st.sidebar:
24
+ st.header("πŸ”‘ Configuration")
25
+ # Try to get API key from environment variable first
26
+ default_api_key = os.environ.get("GROQ_API_KEY", "")
27
+
28
+ groq_api_key = st.text_input(
29
+ "Enter your Groq API Key:",
30
+ value=default_api_key,
31
+ type="password",
32
+ help="Get your free API key from https://console.groq.com/"
33
+ )
34
+
35
+ # Model selection
36
+ model_choice = st.selectbox(
37
+ "Choose Model:",
38
+ ["llama-3.1-8b-instant", "mixtral-8x7b-32768"],
39
+ help="LLaMA3 is faster, Mixtral is more capable"
40
+ )
41
+
42
+ def initialize_agent(api_key, model_name):
43
+ """Initialize the Groq LLM agent"""
44
+ try:
45
+ llm = ChatGroq(
46
+ groq_api_key=api_key,
47
+ model_name=model_name,
48
+ temperature=0.3,
49
+ max_tokens=1024
50
+ )
51
+ return llm
52
+ except Exception as e:
53
+ st.error(f"Error initializing agent: {str(e)}")
54
+ return None
55
+
56
+ def create_research_prompt():
57
+ """Create the research prompt template"""
58
+ template = """
59
+ You are an AI research assistant. Your task is to analyze a given topic and break it down into subtopics with summaries.
60
+
61
+ TOPIC: {topic}
62
+
63
+ INSTRUCTIONS:
64
+ 1. Break the topic into exactly 3 relevant subtopics
65
+ 2. For each subtopic, provide 3-5 bullet points summary
66
+ 3. Keep summaries concise and informative
67
+ 4. Focus on the most important and current aspects
68
+
69
+ FORMAT YOUR RESPONSE EXACTLY LIKE THIS:
70
+
71
+ ## Subtopic 1: [Subtopic Name]
72
+ β€’ [Bullet point 1]
73
+ β€’ [Bullet point 2]
74
+ β€’ [Bullet point 3]
75
+ β€’ [Bullet point 4]
76
+ β€’ [Bullet point 5]
77
+
78
+ ## Subtopic 2: [Subtopic Name]
79
+ β€’ [Bullet point 1]
80
+ β€’ [Bullet point 2]
81
+ β€’ [Bullet point 3]
82
+ β€’ [Bullet point 4]
83
+
84
+ ## Subtopic 3: [Subtopic Name]
85
+ β€’ [Bullet point 1]
86
+ β€’ [Bullet point 2]
87
+ β€’ [Bullet point 3]
88
+ β€’ [Bullet point 4]
89
+ β€’ [Bullet point 5]
90
+
91
+ Topic to analyze: {topic}
92
+ """
93
+ return PromptTemplate(template=template, input_variables=["topic"])
94
+
95
+ def process_research_query(agent, topic):
96
+ """Process the research query using the agent"""
97
+ try:
98
+ # Create prompt
99
+ prompt_template = create_research_prompt()
100
+ formatted_prompt = prompt_template.format(topic=topic)
101
+
102
+ # Get response from agent
103
+ with st.spinner("πŸ” Researching and analyzing..."):
104
+ response = agent.invoke([HumanMessage(content=formatted_prompt)])
105
+
106
+ return response.content
107
+
108
+ except Exception as e:
109
+ st.error(f"Error processing query: {str(e)}")
110
+ return None
111
+
112
+ def display_results(results):
113
+ """Display the research results in a formatted way"""
114
+ if results:
115
+ st.markdown("## πŸ“Š Research Summary")
116
+ st.markdown(results)
117
+
118
+ # Add download option
119
+ st.download_button(
120
+ label="πŸ“₯ Download Summary",
121
+ data=results,
122
+ file_name="research_summary.md",
123
+ mime="text/markdown"
124
+ )
125
+
126
+ def main():
127
+ # Check if API key is provided
128
+ if not groq_api_key:
129
+ st.warning("⚠️ Please enter your Groq API key in the sidebar to get started.")
130
+ st.markdown("""
131
+ ### How to get your Groq API key:
132
+ 1. Visit [Groq Console](https://console.groq.com/)
133
+ 2. Sign up for a free account
134
+ 3. Navigate to API Keys section
135
+ 4. Create a new API key
136
+ 5. Copy and paste it in the sidebar
137
+ """)
138
+ return
139
+
140
+ # Initialize the agent
141
+ agent = initialize_agent(groq_api_key, model_choice)
142
+ if not agent:
143
+ return
144
+
145
+ # Main interface
146
+ col1, col2 = st.columns([2, 1])
147
+
148
+ with col1:
149
+ # Topic input
150
+ topic = st.text_input(
151
+ "🎯 Enter your research topic:",
152
+ placeholder="e.g., Latest AI tools for teachers",
153
+ help="Be specific for better results"
154
+ )
155
+
156
+ with col2:
157
+ st.markdown("<br>", unsafe_allow_html=True) # Add space
158
+ research_button = st.button("πŸš€ Start Research", type="primary")
159
+
160
+ # Process query when button is clicked
161
+ if research_button and topic:
162
+ if len(topic.strip()) < 3:
163
+ st.error("Please enter a more specific topic (at least 3 characters)")
164
+ return
165
+
166
+ # Process the research query
167
+ results = process_research_query(agent, topic.strip())
168
+
169
+ if results:
170
+ display_results(results)
171
+
172
+ elif research_button and not topic:
173
+ st.error("Please enter a research topic first!")
174
+
175
+ # Example topics
176
+ st.markdown("---")
177
+ st.markdown("### πŸ’‘ Example Topics:")
178
+ example_topics = [
179
+ "Latest AI tools for teachers",
180
+ "Sustainable energy solutions 2024",
181
+ "Remote work productivity strategies",
182
+ "Cybersecurity trends for small businesses",
183
+ "Digital marketing for startups"
184
+ ]
185
+
186
+ cols = st.columns(len(example_topics))
187
+ for i, example in enumerate(example_topics):
188
+ with cols[i]:
189
+ if st.button(f"πŸ“ {example}", key=f"example_{i}"):
190
+ # Store the example topic in session state and rerun
191
+ st.session_state.example_topic = example
192
+ st.rerun()
193
+
194
+ # Handle example topic selection
195
+ if 'example_topic' in st.session_state:
196
+ st.info(f"Example topic selected: {st.session_state.example_topic}")
197
+ if st.button("Use this example topic"):
198
+ # Process the example topic
199
+ results = process_research_query(agent, st.session_state.example_topic)
200
+ if results:
201
+ display_results(results)
202
+ # Clear the session state
203
+ del st.session_state.example_topic
204
+
205
+ # Footer
206
+ st.markdown("---")
207
+ st.markdown(
208
+ "Built with ❀️ using [Streamlit](https://streamlit.io) and [LangChain](https://langchain.com) | "
209
+ "Powered by [Groq](https://groq.com)"
210
+ )
211
+
212
+ if __name__ == "__main__":
213
+ main()