simar007 commited on
Commit
f916d1b
Β·
verified Β·
1 Parent(s): 5ba69d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -67
app.py CHANGED
@@ -1,24 +1,19 @@
1
  import os
2
  import gradio as gr
3
  from langchain_community.document_loaders import YoutubeLoader, PyPDFLoader
4
- from langchain_text_splitters import RecursiveCharacterTextSplitter
5
  from langchain_huggingface import HuggingFaceEmbeddings, HuggingFaceEndpoint
6
  from langchain_community.vectorstores import FAISS
7
  from langchain.chains import RetrievalQA
8
 
9
  # --- CONFIGURATION ---
10
  hf_token = os.getenv("HF_TOKEN")
11
-
12
- # Global variable
13
- vector_db_state = None
14
-
15
- # Using Mistral-7B-Instruct-v0.3
16
  repo_id = "mistralai/Mistral-7B-Instruct-v0.3"
17
 
 
18
  def get_llm():
19
  if not hf_token:
20
  raise ValueError("HF_TOKEN not found in secrets.")
21
-
22
  return HuggingFaceEndpoint(
23
  repo_id=repo_id,
24
  max_new_tokens=512,
@@ -26,6 +21,9 @@ def get_llm():
26
  huggingfacehub_api_token=hf_token
27
  )
28
 
 
 
 
29
  # --- 1. PROCESSING ENGINE ---
30
  def process_content(url, file_obj):
31
  global vector_db_state
@@ -84,14 +82,9 @@ def generate_summary():
84
  docs = retriever.invoke("Summary")
85
  context = "\n\n".join([d.page_content for d in docs])
86
 
87
- prompt = f"""
88
- [INST] You are an expert tutor. Summarize the following content into concise bullet points.
89
-
90
- Content:
91
  {context}
92
-
93
- Summary: [/INST]
94
- """
95
  return llm.invoke(prompt)
96
  except Exception as e:
97
  return f"Error: {str(e)}"
@@ -103,18 +96,12 @@ def generate_quiz():
103
  try:
104
  llm = get_llm()
105
  retriever = vector_db_state.as_retriever(search_kwargs={"k": 3})
106
- docs = retriever.invoke("Quiz concepts")
107
  context = "\n\n".join([d.page_content for d in docs])
108
 
109
- prompt = f"""
110
- [INST] Generate 3 multiple-choice questions based on this text.
111
- Format: Question, Options, Correct Answer.
112
-
113
- Text:
114
  {context}
115
-
116
- Questions: [/INST]
117
- """
118
  return llm.invoke(prompt)
119
  except Exception as e:
120
  return f"Error: {str(e)}"
@@ -129,64 +116,37 @@ def generate_mindmap():
129
  docs = retriever.invoke("Structure")
130
  context = "\n\n".join([d.page_content for d in docs])
131
 
132
- prompt = f"""
133
- [INST] Create a simple Mermaid.js mindmap (graph TD) based on this text.
134
- Output ONLY the code inside.
135
-
136
- Text:
137
  {context}
138
-
139
- Mermaid Code: [/INST]
140
- """
141
  return llm.invoke(prompt)
142
  except Exception as e:
143
  return f"Error: {str(e)}"
144
 
145
- # --- 3. GRADIO INTERFACE ---
146
- custom_css = """
147
- #component-0 {max-width: 1200px; margin: auto;}
148
- .chat-window {height: 400px; overflow-y: scroll;}
149
- """
150
-
151
- with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
152
- gr.Markdown("# 🧠 OpenLearn AI (Free Edition)\n*Powered by Mistral-7B & Hugging Face*")
153
 
154
  with gr.Row():
155
  with gr.Column(scale=1):
156
- gr.Markdown("### 1. Upload Source Material")
157
- yt_input = gr.Textbox(label="YouTube URL", placeholder="Paste video link here...")
158
- pdf_input = gr.File(label="Or Upload PDF", file_types=[".pdf"])
159
- process_btn = gr.Button("πŸš€ Process Content", variant="primary")
160
  status_output = gr.Textbox(label="Status", interactive=False)
161
 
162
- gr.Markdown("### 2. Smart Tools")
163
- with gr.Accordion("Generate Study Aids", open=True):
164
- summ_btn = gr.Button("πŸ“ Generate Summary")
165
- quiz_btn = gr.Button("❓ Create Quiz")
166
- map_btn = gr.Button("πŸ—ΊοΈ Generate Mind Map")
167
 
168
  with gr.Column(scale=2):
169
- gr.Markdown("### 3. Chat with your Content")
170
- chatbot = gr.ChatInterface(
171
- fn=chat_engine,
172
- type="messages",
173
- examples=["What is the main idea?", "Explain the key concepts."]
174
- )
175
 
176
  with gr.Row():
177
- with gr.Tab("Summary"):
178
- summ_output = gr.Markdown("Summary will appear here...")
179
- with gr.Tab("Quiz"):
180
- quiz_output = gr.Markdown("Quiz will appear here...")
181
- with gr.Tab("Mind Map"):
182
- map_output = gr.Code(language="mermaid", label="Mermaid Code")
183
-
184
- process_btn.click(
185
- process_content,
186
- inputs=[yt_input, pdf_input],
187
- outputs=[status_output]
188
- )
189
-
190
  summ_btn.click(generate_summary, inputs=None, outputs=summ_output)
191
  quiz_btn.click(generate_quiz, inputs=None, outputs=quiz_output)
192
  map_btn.click(generate_mindmap, inputs=None, outputs=map_output)
 
1
  import os
2
  import gradio as gr
3
  from langchain_community.document_loaders import YoutubeLoader, PyPDFLoader
4
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
5
  from langchain_huggingface import HuggingFaceEmbeddings, HuggingFaceEndpoint
6
  from langchain_community.vectorstores import FAISS
7
  from langchain.chains import RetrievalQA
8
 
9
  # --- CONFIGURATION ---
10
  hf_token = os.getenv("HF_TOKEN")
 
 
 
 
 
11
  repo_id = "mistralai/Mistral-7B-Instruct-v0.3"
12
 
13
+ # --- HELPER: LLM SETUP ---
14
  def get_llm():
15
  if not hf_token:
16
  raise ValueError("HF_TOKEN not found in secrets.")
 
17
  return HuggingFaceEndpoint(
18
  repo_id=repo_id,
19
  max_new_tokens=512,
 
21
  huggingfacehub_api_token=hf_token
22
  )
23
 
24
+ # --- STATE ---
25
+ vector_db_state = None
26
+
27
  # --- 1. PROCESSING ENGINE ---
28
  def process_content(url, file_obj):
29
  global vector_db_state
 
82
  docs = retriever.invoke("Summary")
83
  context = "\n\n".join([d.page_content for d in docs])
84
 
85
+ prompt = f"""[INST] Summarize this content into bullet points:
 
 
 
86
  {context}
87
+ [/INST]"""
 
 
88
  return llm.invoke(prompt)
89
  except Exception as e:
90
  return f"Error: {str(e)}"
 
96
  try:
97
  llm = get_llm()
98
  retriever = vector_db_state.as_retriever(search_kwargs={"k": 3})
99
+ docs = retriever.invoke("Key Concepts")
100
  context = "\n\n".join([d.page_content for d in docs])
101
 
102
+ prompt = f"""[INST] Create 3 multiple choice questions (with answers) based on this:
 
 
 
 
103
  {context}
104
+ [/INST]"""
 
 
105
  return llm.invoke(prompt)
106
  except Exception as e:
107
  return f"Error: {str(e)}"
 
116
  docs = retriever.invoke("Structure")
117
  context = "\n\n".join([d.page_content for d in docs])
118
 
119
+ prompt = f"""[INST] Create a Mermaid.js mindmap (graph TD) code block based on this:
 
 
 
 
120
  {context}
121
+ [/INST]"""
 
 
122
  return llm.invoke(prompt)
123
  except Exception as e:
124
  return f"Error: {str(e)}"
125
 
126
+ # --- 3. UI ---
127
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
128
+ gr.Markdown("# 🧠 OpenLearn AI (Stable Build)")
 
 
 
 
 
129
 
130
  with gr.Row():
131
  with gr.Column(scale=1):
132
+ yt_input = gr.Textbox(label="YouTube URL")
133
+ pdf_input = gr.File(label="Upload PDF")
134
+ process_btn = gr.Button("πŸš€ Process", variant="primary")
 
135
  status_output = gr.Textbox(label="Status", interactive=False)
136
 
137
+ summ_btn = gr.Button("πŸ“ Summary")
138
+ quiz_btn = gr.Button("❓ Quiz")
139
+ map_btn = gr.Button("πŸ—ΊοΈ Mind Map")
 
 
140
 
141
  with gr.Column(scale=2):
142
+ chatbot = gr.ChatInterface(fn=chat_engine)
 
 
 
 
 
143
 
144
  with gr.Row():
145
+ summ_output = gr.Markdown(label="Summary")
146
+ quiz_output = gr.Markdown(label="Quiz")
147
+ map_output = gr.Code(label="Mind Map Code")
148
+
149
+ process_btn.click(process_content, inputs=[yt_input, pdf_input], outputs=[status_output])
 
 
 
 
 
 
 
 
150
  summ_btn.click(generate_summary, inputs=None, outputs=summ_output)
151
  quiz_btn.click(generate_quiz, inputs=None, outputs=quiz_output)
152
  map_btn.click(generate_mindmap, inputs=None, outputs=map_output)