Update README.md
Browse files
README.md
CHANGED
|
@@ -40,6 +40,7 @@ model_id = "malhajar/phi-2-meditron"
|
|
| 40 |
model = AutoModelForCausalLM.from_pretrained(model_name_or_path,
|
| 41 |
device_map="auto",
|
| 42 |
torch_dtype=torch.float16,
|
|
|
|
| 43 |
revision="main")
|
| 44 |
|
| 45 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
@@ -52,7 +53,7 @@ prompt = '''
|
|
| 52 |
|
| 53 |
### Response:'''
|
| 54 |
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
|
| 55 |
-
output = model.generate(inputs=input_ids,max_new_tokens=512,pad_token_id=tokenizer.eos_token_id,top_k=50, do_sample=True,
|
| 56 |
top_p=0.95)
|
| 57 |
response = tokenizer.decode(output[0])
|
| 58 |
|
|
|
|
| 40 |
model = AutoModelForCausalLM.from_pretrained(model_name_or_path,
|
| 41 |
device_map="auto",
|
| 42 |
torch_dtype=torch.float16,
|
| 43 |
+
trust_remote_code= True,
|
| 44 |
revision="main")
|
| 45 |
|
| 46 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
|
|
| 53 |
|
| 54 |
### Response:'''
|
| 55 |
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
|
| 56 |
+
output = model.generate(inputs=input_ids,max_new_tokens=512,pad_token_id=tokenizer.eos_token_id,top_k=50, do_sample=True,
|
| 57 |
top_p=0.95)
|
| 58 |
response = tokenizer.decode(output[0])
|
| 59 |
|