| | import streamlit as st |
| | from transformers import AutoTokenizer, AutoModelForCausalLM |
| |
|
| | |
| | tokenizer = AutoTokenizer.from_pretrained("varma007ut/Indian_Legal_Assistant") |
| | model = AutoModelForCausalLM.from_pretrained("varma007ut/Indian_Legal_Assistant") |
| |
|
| | |
| | def chatbot_response(prompt): |
| | inputs = tokenizer(prompt, return_tensors="pt") |
| | outputs = model.generate(**inputs, max_length=200) |
| | response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| | return response |
| |
|
| | |
| | st.title("Indian Legal Assistant Chatbot") |
| | st.write("Ask questions related to Indian law.") |
| |
|
| | |
| | user_input = st.text_input("You:", placeholder="Type your question here...") |
| |
|
| | |
| | if st.button("Send"): |
| | if user_input: |
| | response = chatbot_response(user_input) |
| | st.write("Chatbot: ", response) |
| | else: |
| | st.write("Please enter a question.") |
| |
|