| import streamlit as st |
| import torch |
| from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
| st.title("📚 AI Adaptive Learning") |
|
|
| MODEL_ID = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" |
|
|
| @st.cache_resource |
| def load_model(): |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) |
|
|
| if tokenizer.pad_token is None: |
| tokenizer.pad_token = tokenizer.eos_token |
|
|
| model = AutoModelForCausalLM.from_pretrained( |
| MODEL_ID, |
| torch_dtype=torch.float32 |
| ) |
|
|
| model.eval() |
| return tokenizer, model |
|
|
| tokenizer, model = load_model() |
|
|
| user_input = st.text_input("Ask a question:") |
|
|
| if st.button("Submit") and user_input: |
| with st.spinner("Generating answer..."): |
| inputs = tokenizer(user_input, return_tensors="pt") |
|
|
| with torch.no_grad(): |
| outputs = model.generate( |
| **inputs, |
| max_new_tokens=150, |
| temperature=0.7, |
| do_sample=True, |
| pad_token_id=tokenizer.eos_token_id |
| ) |
|
|
| answer = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
| st.subheader("AI Answer:") |
| st.write(answer) |