| # first time - ran this on command line first in directory where I am putting app.py: git clone https://huggingface.co/spaces/brdemorin/chat | |
| # this will create a "chat" directory. This "app.py" file will need to be saved to that chat directory | |
| # change directory in command line to: C:\Users\brian.morin\Documents\HuggingFace\chat | |
| # then do the below. Must do the below everytime I make changes to app.py | |
| # 1 -> change directory to: cd C:\Users\brian.morin\Documents\HuggingFace\chat | |
| # 2 git add app.py | |
| # 3. git commit -m "Add application file" | |
| # 4. git push | |
| # 5. # I'm not sure if I actually need to do this: in your terminal, navigate to the directory containing your app.py file and run the command: streamlit run app.py | |
| # 6. # then navigate here: https://huggingface.co/spaces/brdemorin/chat | |
| import streamlit as st | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| x = st.slider('Select a value') | |
| st.write(x, 'squared is', x * x) | |
| # Load the tokenizer and model | |
| tokenizer = AutoTokenizer.from_pretrained("brdemorin/Phi3_80_steps") | |
| model = AutoModelForCausalLM.from_pretrained("brdemorin/Phi3_80_steps") | |
| # Create a text input for the user to enter their message | |
| user_input = st.text_input("Enter your message:") | |
| # When the user enters a message and presses enter, generate a response | |
| if user_input: | |
| # Encode the user's message and pass it to the model | |
| input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt') | |
| generated_response_ids = model.generate(input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id) | |
| # Decode the model's output IDs to a string and display it | |
| generated_response = tokenizer.decode(generated_response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True) | |
| st.write(generated_response) |