File size: 3,154 Bytes
f440d97 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
# -*- coding: utf-8 -*-
"""Getting-Started-with-Mistral-7b-Instruct.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ckGJcooxH_jiohgmb7PIKrFsDxHoUPq2
"""
!pip install -q git+https://github.com/huggingface/transformers
model_path="/kaggle/input/mistral/pytorch/7b-instruct-v0.1-hf/1"
from transformers import AutoTokenizer
tokenizer=AutoTokenizer.from_pretrained(model_path)
from transformers import AutoModelForCausalLM
import torch
model = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype = torch.bfloat16,
device_map = "auto",
trust_remote_code = True
)
messages = [{
"role":"user",
"content": "Can you tell us 3 cities to visit in Turkey"
}]
tokenizer.apply_chat_template(messages, tokenize=False)
model_inputs = tokenizer.apply_chat_template(messages,
return_tensors = "pt")
model_inputs
generated_ids = model.generate(
model_inputs,
max_new_tokens = 1000,
do_sample = True,
)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
messages = [{
"role": "user",
"content": "Act as a gourmet chef. I have a friend coming over who is a vegetarian. \
I want to impress my friend with a special vegetarian dish. \
What do you recommend? \
Give me two options, along with the whole recipe for each"
}]
model_inputs = tokenizer.apply_chat_template(messages,return_tensors = "pt")
generated_ids = model.generate(
model_inputs,
max_new_tokens = 1000,
do_sample = True,
)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
messages = [{
"role": "user", "content": "How many helicopters can a human eat in one sitting?"
}]
model_inputs = tokenizer.apply_chat_template(messages,return_tensors = "pt")
generated_ids = model.generate(
model_inputs,
max_new_tokens = 1000,
do_sample = True,
)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
messages = [
{"role": "user", "content": "What fascinates you about AI?"},
{"role": "assistant", "content": "I'm fascinated \
by AI's data analysis and prediction abilities. \
It has the potential to revolutionize industries and improve problem-solving."},
{"role": "user", "content": "Should people be afraid of AI?"}
]
model_inputs = tokenizer.apply_chat_template(messages,return_tensors = "pt")
generated_ids = model.generate(
model_inputs,
max_new_tokens = 1000,
do_sample = True,
)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
messages = [
{"role": "user", "content": "Türkiye'de en fazla ziyaret edilen 3 şehir hangidir?"},
]
model_inputs = tokenizer.apply_chat_template(messages,return_tensors = "pt")
generated_ids = model.generate(
model_inputs,
max_new_tokens = 1000,
do_sample = True,
)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
"""Let's connect [YouTube](http://youtube.com/tirendazacademy) | [Medium](http://tirendazacademy.medium.com) | [X](http://x.com/tirendazacademy) | [Linkedin](https://www.linkedin.com/in/tirendaz-academy)""" |