Engineer786 commited on
Commit
438e917
·
verified ·
1 Parent(s): 9523bcc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -2
app.py CHANGED
@@ -1,14 +1,71 @@
 
1
  import streamlit as st
2
  from PyPDF2 import PdfReader
3
  from sentence_transformers import SentenceTransformer
4
  import faiss
5
- import os
6
  from groq import Groq
7
 
8
- # Initialize Groq client
9
  GROQ_API_KEY = os.environ.get('GroqApi')
10
  client = Groq(api_key=GROQ_API_KEY)
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  # Initialize embedding model
13
  embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
14
 
 
1
+ import os
2
  import streamlit as st
3
  from PyPDF2 import PdfReader
4
  from sentence_transformers import SentenceTransformer
5
  import faiss
6
+ import numpy as np
7
  from groq import Groq
8
 
9
+ # Initialize Groq Client
10
  GROQ_API_KEY = os.environ.get('GroqApi')
11
  client = Groq(api_key=GROQ_API_KEY)
12
 
13
+ # Initialize Embedding Model
14
+ embedding_model = SentenceTransformer('distilbert-base-uncased')
15
+
16
+ # Streamlit UI
17
+ st.title("RAG-based Quiz App")
18
+
19
+ # File Upload
20
+ uploaded_file = st.file_uploader("Upload a PDF", type="pdf")
21
+ if uploaded_file is not None:
22
+ # Extract Text from PDF
23
+ pdf_reader = PdfReader(uploaded_file)
24
+ text = " ".join([page.extract_text() for page in pdf_reader.pages])
25
+
26
+ # Chunking Text
27
+ st.write("Processing the PDF...")
28
+ chunks = [text[i:i+500] for i in range(0, len(text), 500)]
29
+
30
+ # Create Embeddings
31
+ embeddings = embedding_model.encode(chunks)
32
+ embeddings = np.array(embeddings, dtype="float32")
33
+
34
+ # FAISS Index
35
+ index = faiss.IndexFlatL2(embeddings.shape[1])
36
+ index.add(embeddings)
37
+
38
+ st.success("PDF Processed! Embeddings Created.")
39
+
40
+ # Generate Questions
41
+ st.write("Generating Quiz Questions...")
42
+ questions = []
43
+ for chunk in chunks[:5]: # Generate questions for the first few chunks
44
+ response = client.chat.completions.create(
45
+ messages=[{"role": "user", "content": f"Create a multiple-choice quiz question from this text: {chunk}"}],
46
+ model="llama3-8b-8192"
47
+ )
48
+ question = response.choices[0].message.content
49
+ questions.append(question)
50
+
51
+ st.success("Quiz Questions Generated!")
52
+
53
+ # Display Quiz
54
+ for idx, question in enumerate(questions):
55
+ st.write(f"**Question {idx+1}:** {question}")
56
+ options = ["Option A", "Option B", "Option C", "Option D"] # Placeholder
57
+ selected_option = st.radio(f"Select your answer for Question {idx+1}", options, key=idx)
58
+ if st.button(f"Submit Answer for Question {idx+1}", key=f"submit_{idx}"):
59
+ # Dummy Logic: Assume Option A is correct for demonstration
60
+ correct_option = "Option A"
61
+ if selected_option == correct_option:
62
+ st.success("Correct Answer!")
63
+ else:
64
+ st.error(f"Wrong Answer! Correct Answer: {correct_option}")
65
+
66
+ # Footer
67
+ st.write("App developed and deployed using Hugging Face Spaces.")
68
+
69
  # Initialize embedding model
70
  embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
71