|
|
|
|
|
import streamlit as st |
|
|
from transformers import AutoTokenizer, AutoModel |
|
|
|
|
|
|
|
|
@st.cache_resource |
|
|
import streamlit as st |
|
|
from transformers import AutoTokenizer, AutoModel |
|
|
|
|
|
@st.cache_resource |
|
|
def load_model(): |
|
|
model_name = "mradermacher/Indian_Legal_Assistant-GGUF" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=token) |
|
|
model = AutoModel.from_pretrained(model_name, use_auth_token=token) |
|
|
return tokenizer, model |
|
|
|
|
|
tokenizer, model = load_model() |
|
|
|
|
|
|
|
|
tokenizer, model = load_model() |
|
|
|
|
|
|
|
|
st.title("Indian Legal Assistant - Hugging Face Spaces Deployment") |
|
|
st.write("This app provides answers to legal questions using the Indian Legal Assistant model.") |
|
|
|
|
|
|
|
|
user_input = st.text_area("Enter your legal question:") |
|
|
|
|
|
if st.button("Generate Response"): |
|
|
if user_input: |
|
|
|
|
|
inputs = tokenizer(user_input, return_tensors="pt") |
|
|
|
|
|
|
|
|
outputs = model.generate(**inputs, max_length=150) |
|
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
|
|
|
st.write("### Response:") |
|
|
st.write(response) |
|
|
else: |
|
|
st.write("Please enter a question to get a response.") |
|
|
|