| import streamlit as st | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| from transformers import pipeline | |
| import os | |
| import logging, sys | |
| from dotenv import load_dotenv | |
| from huggingface_hub import login | |
| #load_dotenv() | |
| #HF_TOKEN = os.environ.get("HF_API_TOKEN") | |
| HF_TOKEN = st.secrets["HF_API_TOKEN"] | |
| login(token=HF_TOKEN) | |
| # Setup logging | |
| logging.basicConfig(stream=sys.stdout, level=logging.INFO) | |
| logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) | |
| # Load the tokenizer and model | |
| tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") | |
| model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") | |
| #tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B") | |
| #model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B") | |
| # Define the directory to save the model | |
| #save_directory = "models" | |
| # Save the tokenizer and model to the specified directory | |
| #Run once | |
| #model.save_pretrained(save_directory) | |
| #tokenizer.save_pretrained(save_directory) | |
| # Load the tokenizer and model from the saved directory | |
| #tokenizer = AutoTokenizer.from_pretrained(save_directory, local_files_only=True,load_in_8bit=True,) | |
| #model = AutoModelForCausalLM.from_pretrained(save_directory, local_files_only=True,load_in_8bit=True) | |
| pipe = pipeline("text-generation", | |
| model=model,#"mistralai/Mistral-7B-v0.1", | |
| tokenizer=tokenizer, | |
| ) | |
| # Generate text using the pipeline | |
| result = pipe("tell me about transformer.", max_length=50, truncation=True) | |
| print(result) | |
| #Using mistralai/Mistral-7B-Instruct-v0.2 | |
| #save_directory = 'Mistral-7B-Instruct-v0.2' | |
| #tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2") | |
| #model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2") | |
| #tokenizer = AutoTokenizer.from_pretrained(save_directory, local_files_only=True,load_in_8bit=True,) | |
| #model = AutoModelForCausalLM.from_pretrained(save_directory, local_files_only=True,load_in_8bit=True) | |
| pipe = pipeline("text-generation", | |
| model=model, #'Mistral-7B-Instruct-v0.2' | |
| tokenizer=tokenizer, | |
| ) | |
| question =st.text_input("enter your question","tell me about transformer.") | |
| # Generate text using the pipeline | |
| result = pipe(question, max_length=50, truncation=True) | |
| print(result) | |