Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig | |
| # Load the model and tokenizer from your Hugging Face Hub repository | |
| model_name = "abdulllah01/outputs" # Replace with your Hugging Face repo name | |
| # Load the model configuration first and modify it if necessary | |
| config = AutoConfig.from_pretrained(model_name) | |
| if hasattr(config, 'quantization_config'): | |
| config.quantization_config = None # Disable any quantization settings | |
| # Load the model and tokenizer | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name, config=config) | |
| # Streamlit interface | |
| st.title("Tech Support Chatbot") | |
| st.write("Ask your technical support questions below:") | |
| # Text input for the question | |
| user_input = st.text_input("Your question:", "") | |
| if user_input: | |
| # Generate a response using the model | |
| inputs = tokenizer.encode(user_input, return_tensors="pt") | |
| response = model.generate(inputs, max_length=100) | |
| answer = tokenizer.decode(response[0], skip_special_tokens=True) | |
| st.write("**Answer:**", answer) | |