Maira-ghaffar commited on
Commit
e41f4a0
·
verified ·
1 Parent(s): 2e89057

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -32
app.py CHANGED
@@ -1,37 +1,92 @@
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
- import torch
 
 
 
4
 
5
- st.title("📚 AI Adaptive Learning")
 
6
 
7
- MODEL_ID = "microsoft/phi-2"
 
 
 
 
 
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  @st.cache_resource
10
- def load_model():
11
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
12
- model = AutoModelForCausalLM.from_pretrained(
13
- MODEL_ID,
14
- torch_dtype=torch.float32,
15
- device_map="auto"
16
- )
17
- return tokenizer, model
18
-
19
- tokenizer, model = load_model()
20
-
21
- user_input = st.text_input("Ask a question:")
22
-
23
- if st.button("Submit") and user_input:
24
- inputs = tokenizer(user_input, return_tensors="pt")
25
-
26
- with torch.no_grad():
27
- outputs = model.generate(
28
- **inputs,
29
- max_new_tokens=150,
30
- do_sample=True,
31
- temperature=0.7
32
- )
33
-
34
- answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
35
-
36
- st.subheader("AI Answer:")
37
- st.write(answer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import requests
3
+ import os
4
+ from sentence_transformers import SentenceTransformer
5
+ import faiss
6
+ import numpy as np
7
 
8
+ st.set_page_config(page_title="📚 AI Adaptive Learning (Ultimate)", layout="wide")
9
+ st.title("📚 AI Adaptive Learning & Smart Revision System (Mistral 7B)")
10
 
11
+ # -----------------------------
12
+ # 1️⃣ Hugging Face API Setup
13
+ # -----------------------------
14
+ HF_API_TOKEN = st.secrets["HF_API_TOKEN"]
15
+ MODEL_ID = "mistralai/Mistral-7B-Instruct-v0.2"
16
+ API_URL = f"https://api-inference.huggingface.co/models/{MODEL_ID}"
17
 
18
+ HEADERS = {"Authorization": f"Bearer {HF_API_TOKEN}"}
19
+
20
+ def query_hf_api(prompt):
21
+ payload = {
22
+ "inputs": prompt,
23
+ "parameters": {"max_new_tokens": 250, "temperature": 0.7, "top_p":0.9}
24
+ }
25
+ response = requests.post(API_URL, headers=HEADERS, json=payload)
26
+ if response.status_code == 200:
27
+ output = response.json()
28
+ return output[0]["generated_text"]
29
+ else:
30
+ return f"Error: {response.status_code} - {response.text}"
31
+
32
+ # -----------------------------
33
+ # 2️⃣ Build RAG (Context Retrieval)
34
+ # -----------------------------
35
  @st.cache_resource
36
+ def build_rag_index(doc_folder="docs"):
37
+ embeddings = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
38
+ corpus = []
39
+ corpus_texts = []
40
+
41
+ for file_name in os.listdir(doc_folder):
42
+ if file_name.endswith(".txt"):
43
+ path = os.path.join(doc_folder, file_name)
44
+ with open(path, "r", encoding="utf-8") as f:
45
+ text = f.read()
46
+ sentences = text.split("\n")
47
+ for sent in sentences:
48
+ if sent.strip():
49
+ corpus.append(embeddings.encode(sent))
50
+ corpus_texts.append(sent)
51
+
52
+ dim = corpus[0].shape[0]
53
+ index = faiss.IndexFlatL2(dim)
54
+ index.add(np.array(corpus).astype("float32"))
55
+ return index, corpus_texts, embeddings
56
+
57
+ index, corpus_texts, embedder = build_rag_index()
58
+
59
+ # -----------------------------
60
+ # 3️⃣ Retrieve Context
61
+ # -----------------------------
62
+ def retrieve_context(query, k=3):
63
+ query_vec = embedder.encode(query).astype("float32")
64
+ D, I = index.search(np.array([query_vec]), k)
65
+ context = "\n".join([corpus_texts[i] for i in I[0]])
66
+ return context
67
+
68
+ # -----------------------------
69
+ # 4️⃣ Streamlit UI
70
+ # -----------------------------
71
+ difficulty = st.selectbox("Select explanation style:", ["Explain Like I'm 10", "Detailed", "Step-by-Step"])
72
+
73
+ user_question = st.text_input("Ask a question:")
74
+
75
+ if st.button("Submit") and user_question:
76
+ with st.spinner("Generating high-quality answer..."):
77
+ context = retrieve_context(user_question, k=3)
78
+ prompt = f"""
79
+ You are a knowledgeable teacher and explain concepts clearly.
80
+
81
+ Context:
82
+ {context}
83
+
84
+ Question:
85
+ {user_question}
86
+
87
+ Explain the answer in a {difficulty} manner.
88
+ Answer:
89
+ """
90
+ answer = query_hf_api(prompt)
91
+ st.subheader("📖 AI Answer:")
92
+ st.write(answer)