IST199655
commited on
Commit
·
474adaa
1
Parent(s):
3c72036
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,7 +5,7 @@ from huggingface_hub import InferenceClient
|
|
| 5 |
Copied from inference in colab notebook
|
| 6 |
"""
|
| 7 |
|
| 8 |
-
from transformers import AutoModel, AutoTokenizer
|
| 9 |
import torch
|
| 10 |
|
| 11 |
# Load model and tokenizer globally to avoid reloading for every request
|
|
@@ -15,7 +15,7 @@ model_path = "llama_lora_model_1"
|
|
| 15 |
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True, legacy=False)
|
| 16 |
|
| 17 |
# Load model
|
| 18 |
-
model =
|
| 19 |
|
| 20 |
# Define the response function
|
| 21 |
def respond(
|
|
|
|
| 5 |
Copied from inference in colab notebook
|
| 6 |
"""
|
| 7 |
|
| 8 |
+
from transformers import AutoModel, AutoTokenizer , AutoModelForCausalLM
|
| 9 |
import torch
|
| 10 |
|
| 11 |
# Load model and tokenizer globally to avoid reloading for every request
|
|
|
|
| 15 |
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True, legacy=False)
|
| 16 |
|
| 17 |
# Load model
|
| 18 |
+
model = AutoModelForCausalLM.from_pretrained("Heit39/llama_lora_model_1")
|
| 19 |
|
| 20 |
# Define the response function
|
| 21 |
def respond(
|