| | import streamlit as st |
| | from transformers import AutoModelForCausalLM, AutoTokenizer |
| | import accelerate |
| |
|
| | |
| | @st.cache_resource |
| | def load_model_and_tokenizer(): |
| | model_name_or_path = "anthropic/mistral-7b" |
| | accelerator = accelerate.Accelerator(device_map="auto") |
| | model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map=accelerator.device_map) |
| | tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) |
| | return model, tokenizer |
| |
|
| | |
| | @st.cache_data |
| | def generate_response(prompt): |
| | prompt_template = f''' |
| | <|prompter|>:{prompt} |
| | <|assistant|>: |
| | ''' |
| | input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids |
| | with accelerator.autocast(): |
| | output = model.generate(inputs=input_ids, temperature=0.7, do_sample=True, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, max_new_tokens=512) |
| | response = tokenizer.decode(output[0], skip_special_tokens=True) |
| | return response |
| |
|
| | |
| | def main(): |
| | st.title("Mistral 7B Language Model") |
| | model, tokenizer = load_model_and_tokenizer() |
| |
|
| | prompt = st.text_area("Enter your query:") |
| | if st.button("Submit"): |
| | with st.spinner("Generating response..."): |
| | response = generate_response(prompt) |
| | st.write(response) |
| |
|
| | if __name__ == "__main__": |
| | main() |