Update app.py
Browse files
app.py
CHANGED
|
@@ -1,25 +1,36 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
import streamlit as st
|
| 3 |
from dotenv import load_dotenv
|
| 4 |
-
import wikipedia
|
| 5 |
from xhtml2pdf import pisa
|
| 6 |
import io
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
st.set_page_config(page_title="Ask Wikipedia", page_icon="π", layout="wide")
|
| 12 |
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
except Exception as e:
|
| 22 |
-
return f"An error occurred: {e}"
|
| 23 |
|
| 24 |
# --- PDF Generation ---
|
| 25 |
def generate_pdf(convo, topic):
|
|
@@ -153,10 +164,7 @@ if st.session_state.current_conversation:
|
|
| 153 |
with col1:
|
| 154 |
if st.button("β
Save", key=f"save_{idx}"):
|
| 155 |
msg["text"] = new_text
|
| 156 |
-
|
| 157 |
-
new_response = get_wikipedia_summary(new_text)
|
| 158 |
-
except:
|
| 159 |
-
new_response = "Failed to retrieve summary."
|
| 160 |
if idx + 1 < len(convo) and convo[idx + 1]["role"] == "assistant":
|
| 161 |
convo[idx + 1]["text"] = new_response
|
| 162 |
st.session_state.edit_mode[idx] = False
|
|
@@ -191,9 +199,9 @@ if st.session_state.current_conversation:
|
|
| 191 |
|
| 192 |
# --- Export PDF ---
|
| 193 |
if st.button("π₯ Export Conversation as PDF"):
|
| 194 |
-
pdf_bytes = generate_pdf(convo,st.session_state.current_conversation)
|
| 195 |
if pdf_bytes:
|
| 196 |
-
st.download_button("Download PDF", pdf_bytes, file_name="
|
| 197 |
else:
|
| 198 |
st.error("β Failed to generate PDF.")
|
| 199 |
|
|
@@ -201,9 +209,18 @@ if st.session_state.current_conversation:
|
|
| 201 |
user_input = st.chat_input("Ask Wikipedia...")
|
| 202 |
if user_input:
|
| 203 |
convo.append({"role": "user", "text": user_input})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
try:
|
| 205 |
-
|
| 206 |
-
except:
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
+
import subprocess
|
| 3 |
import streamlit as st
|
| 4 |
from dotenv import load_dotenv
|
|
|
|
| 5 |
from xhtml2pdf import pisa
|
| 6 |
import io
|
| 7 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM # for loading llama tokenizer
|
| 8 |
+
|
| 9 |
+
# --- Load Model Resources ---
|
| 10 |
+
def load_resources():
|
| 11 |
+
load_dotenv()
|
| 12 |
+
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
| 13 |
+
subprocess.run(["huggingface-cli", "login", "--token", huggingface_token], capture_output=True)
|
| 14 |
+
tokenizer = AutoTokenizer.from_pretrained("istiak101/TinyLlama-1.1B-Finetuned")
|
| 15 |
+
model = AutoModelForCausalLM.from_pretrained("istiak101/TinyLlama-1.1B-Finetuned")
|
| 16 |
+
return model, tokenizer
|
| 17 |
+
|
| 18 |
+
# --- Store model and tokenizer in session state ---
|
| 19 |
+
if "llama_model" not in st.session_state or "llama_tokenizer" not in st.session_state:
|
| 20 |
+
model, tokenizer = load_resources()
|
| 21 |
+
st.session_state.llama_model = model
|
| 22 |
+
st.session_state.llama_tokenizer = tokenizer
|
| 23 |
|
| 24 |
st.set_page_config(page_title="Ask Wikipedia", page_icon="π", layout="wide")
|
| 25 |
|
| 26 |
+
def get_llama_response(query):
|
| 27 |
+
model = st.session_state.llama_model
|
| 28 |
+
tokenizer = st.session_state.llama_tokenizer
|
| 29 |
+
|
| 30 |
+
inputs = tokenizer(query, return_tensors="pt")
|
| 31 |
+
outputs = model.generate(**inputs, max_new_tokens=300)
|
| 32 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 33 |
+
return response
|
|
|
|
|
|
|
| 34 |
|
| 35 |
# --- PDF Generation ---
|
| 36 |
def generate_pdf(convo, topic):
|
|
|
|
| 164 |
with col1:
|
| 165 |
if st.button("β
Save", key=f"save_{idx}"):
|
| 166 |
msg["text"] = new_text
|
| 167 |
+
new_response = get_llama_response(new_text)
|
|
|
|
|
|
|
|
|
|
| 168 |
if idx + 1 < len(convo) and convo[idx + 1]["role"] == "assistant":
|
| 169 |
convo[idx + 1]["text"] = new_response
|
| 170 |
st.session_state.edit_mode[idx] = False
|
|
|
|
| 199 |
|
| 200 |
# --- Export PDF ---
|
| 201 |
if st.button("π₯ Export Conversation as PDF"):
|
| 202 |
+
pdf_bytes = generate_pdf(convo, st.session_state.current_conversation)
|
| 203 |
if pdf_bytes:
|
| 204 |
+
st.download_button("Download PDF", pdf_bytes, file_name=f"{st.session_state.current_conversation}_Conversation.pdf", mime="application/pdf")
|
| 205 |
else:
|
| 206 |
st.error("β Failed to generate PDF.")
|
| 207 |
|
|
|
|
| 209 |
user_input = st.chat_input("Ask Wikipedia...")
|
| 210 |
if user_input:
|
| 211 |
convo.append({"role": "user", "text": user_input})
|
| 212 |
+
st.rerun()
|
| 213 |
+
|
| 214 |
+
# Display assistant response after rerun
|
| 215 |
+
if st.session_state.current_conversation and len(st.session_state.chat_sessions[st.session_state.current_conversation]) % 2 == 1:
|
| 216 |
+
convo = st.session_state.chat_sessions[st.session_state.current_conversation]
|
| 217 |
+
last_user_msg = convo[-1]["text"]
|
| 218 |
+
|
| 219 |
+
with st.spinner("Generating response..."):
|
| 220 |
try:
|
| 221 |
+
assistant_reply = get_llama_response(last_user_msg)
|
| 222 |
+
except Exception as e:
|
| 223 |
+
assistant_reply = f"β οΈ Failed to generate response"
|
| 224 |
+
|
| 225 |
+
convo.append({"role": "assistant", "text": assistant_reply})
|
| 226 |
+
st.rerun()
|