namantjeaswi commited on
Commit
3605144
·
1 Parent(s): e132e5c

add hf auth

Browse files
Files changed (1) hide show
  1. app.py +23 -5
app.py CHANGED
@@ -1,13 +1,31 @@
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
- # Load the tokenizer and model
5
- #tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")#
6
- #model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
7
 
 
 
8
 
9
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B")
10
- model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  # Create text generation pipeline
13
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
 
 
 
4
 
5
+ import logging, sys
6
+ from dotenv import load_dotenv
7
 
8
+
9
+ from huggingface_hub import login
10
+ #load_dotenv()
11
+
12
+ #HF_TOKEN = os.environ.get("HF_API_TOKEN")
13
+ HF_TOKEN = st.secrets["HF_API_TOKEN"]
14
+ login(token=HF_TOKEN)
15
+
16
+ # Setup logging
17
+ logging.basicConfig(stream=sys.stdout, level=logging.INFO)
18
+ logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
19
+
20
+
21
+
22
+ #model_id = "meta-llama/Meta-Llama-3-8B"
23
+ model_id = "mistralai/Mistral-7B-v0.1"
24
+
25
+
26
+
27
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
28
+ model = AutoModelForCausalLM.from_pretrained(model_id)
29
 
30
  # Create text generation pipeline
31
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)