Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -8,8 +8,8 @@ torch.set_num_threads(2)
|
|
| 8 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
| 9 |
|
| 10 |
# Loading the tokenizer and model from Hugging Face's model hub.
|
| 11 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
| 12 |
-
model = AutoModelForCausalLM.from_pretrained("
|
| 13 |
|
| 14 |
def count_tokens(text):
|
| 15 |
return len(tokenizer.tokenize(text))
|
|
@@ -17,7 +17,7 @@ def count_tokens(text):
|
|
| 17 |
# Function to generate model predictions.
|
| 18 |
def predict(message, history):
|
| 19 |
|
| 20 |
-
formatted_prompt = f"<start_of_turn>user\
|
| 21 |
model_inputs = tokenizer(formatted_prompt, return_tensors="pt")
|
| 22 |
|
| 23 |
streamer = TextIteratorStreamer(tokenizer, timeout=120., skip_prompt=True, skip_special_tokens=True)
|
|
|
|
| 8 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
| 9 |
|
| 10 |
# Loading the tokenizer and model from Hugging Face's model hub.
|
| 11 |
+
tokenizer = AutoTokenizer.from_pretrained("oofnan/stegBot2", use_auth_token=HF_TOKEN)
|
| 12 |
+
model = AutoModelForCausalLM.from_pretrained("oofnan/stegBot2", use_auth_token=HF_TOKEN)
|
| 13 |
|
| 14 |
def count_tokens(text):
|
| 15 |
return len(tokenizer.tokenize(text))
|
|
|
|
| 17 |
# Function to generate model predictions.
|
| 18 |
def predict(message, history):
|
| 19 |
|
| 20 |
+
formatted_prompt = f"<start_of_turn>user\nYou are a helpful assistant that provides answers to user questions;{message}<end_of_turn>\n<start_of_turn>model\n"
|
| 21 |
model_inputs = tokenizer(formatted_prompt, return_tensors="pt")
|
| 22 |
|
| 23 |
streamer = TextIteratorStreamer(tokenizer, timeout=120., skip_prompt=True, skip_special_tokens=True)
|