Commit
·
46ed7eb
1
Parent(s):
e6bbd2f
Update handler.py
Browse filesexplicitly setting dtype to torch.float32
- handler.py +4 -3
handler.py
CHANGED
|
@@ -4,15 +4,16 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
|
| 4 |
# testing changes
|
| 5 |
|
| 6 |
# get dtype
|
| 7 |
-
|
|
|
|
| 8 |
|
| 9 |
|
| 10 |
class EndpointHandler:
|
| 11 |
def __init__(self, path=""):
|
| 12 |
# load the model
|
| 13 |
tokenizer = AutoTokenizer.from_pretrained(path)
|
| 14 |
-
|
| 15 |
-
|
| 16 |
# create inference pipeline
|
| 17 |
self.pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device='cuda:0')
|
| 18 |
|
|
|
|
| 4 |
# testing changes
|
| 5 |
|
| 6 |
# get dtype
|
| 7 |
+
dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.float16
|
| 8 |
+
dtype = torch.float32
|
| 9 |
|
| 10 |
|
| 11 |
class EndpointHandler:
|
| 12 |
def __init__(self, path=""):
|
| 13 |
# load the model
|
| 14 |
tokenizer = AutoTokenizer.from_pretrained(path)
|
| 15 |
+
model = AutoModelForCausalLM.from_pretrained(path, device_map="auto", torch_dtype=dtype, trust_remote_code=True)
|
| 16 |
+
|
| 17 |
# create inference pipeline
|
| 18 |
self.pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device='cuda:0')
|
| 19 |
|