|
|
|
|
|
"""Getting-Started-with-Mistral-7b-Instruct.ipynb |
|
|
|
|
|
Automatically generated by Colaboratory. |
|
|
|
|
|
Original file is located at |
|
|
https://colab.research.google.com/drive/1ckGJcooxH_jiohgmb7PIKrFsDxHoUPq2 |
|
|
""" |
|
|
|
|
|
!pip install -q git+https://github.com/huggingface/transformers |
|
|
|
|
|
model_path="/kaggle/input/mistral/pytorch/7b-instruct-v0.1-hf/1" |
|
|
|
|
|
from transformers import AutoTokenizer |
|
|
|
|
|
tokenizer=AutoTokenizer.from_pretrained(model_path) |
|
|
|
|
|
from transformers import AutoModelForCausalLM |
|
|
|
|
|
import torch |
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
model_path, |
|
|
torch_dtype = torch.bfloat16, |
|
|
device_map = "auto", |
|
|
trust_remote_code = True |
|
|
) |
|
|
|
|
|
messages = [{ |
|
|
"role":"user", |
|
|
"content": "Can you tell us 3 cities to visit in Turkey" |
|
|
}] |
|
|
|
|
|
tokenizer.apply_chat_template(messages, tokenize=False) |
|
|
|
|
|
model_inputs = tokenizer.apply_chat_template(messages, |
|
|
return_tensors = "pt") |
|
|
|
|
|
model_inputs |
|
|
|
|
|
generated_ids = model.generate( |
|
|
model_inputs, |
|
|
max_new_tokens = 1000, |
|
|
do_sample = True, |
|
|
) |
|
|
|
|
|
decoded = tokenizer.batch_decode(generated_ids) |
|
|
|
|
|
print(decoded[0]) |
|
|
|
|
|
messages = [{ |
|
|
"role": "user", |
|
|
"content": "Act as a gourmet chef. I have a friend coming over who is a vegetarian. \ |
|
|
I want to impress my friend with a special vegetarian dish. \ |
|
|
What do you recommend? \ |
|
|
Give me two options, along with the whole recipe for each" |
|
|
}] |
|
|
|
|
|
model_inputs = tokenizer.apply_chat_template(messages,return_tensors = "pt") |
|
|
|
|
|
generated_ids = model.generate( |
|
|
model_inputs, |
|
|
max_new_tokens = 1000, |
|
|
do_sample = True, |
|
|
) |
|
|
|
|
|
decoded = tokenizer.batch_decode(generated_ids) |
|
|
|
|
|
print(decoded[0]) |
|
|
|
|
|
messages = [{ |
|
|
"role": "user", "content": "How many helicopters can a human eat in one sitting?" |
|
|
}] |
|
|
model_inputs = tokenizer.apply_chat_template(messages,return_tensors = "pt") |
|
|
generated_ids = model.generate( |
|
|
model_inputs, |
|
|
max_new_tokens = 1000, |
|
|
do_sample = True, |
|
|
) |
|
|
decoded = tokenizer.batch_decode(generated_ids) |
|
|
print(decoded[0]) |
|
|
|
|
|
messages = [ |
|
|
{"role": "user", "content": "What fascinates you about AI?"}, |
|
|
{"role": "assistant", "content": "I'm fascinated \ |
|
|
by AI's data analysis and prediction abilities. \ |
|
|
It has the potential to revolutionize industries and improve problem-solving."}, |
|
|
{"role": "user", "content": "Should people be afraid of AI?"} |
|
|
] |
|
|
model_inputs = tokenizer.apply_chat_template(messages,return_tensors = "pt") |
|
|
generated_ids = model.generate( |
|
|
model_inputs, |
|
|
max_new_tokens = 1000, |
|
|
do_sample = True, |
|
|
) |
|
|
decoded = tokenizer.batch_decode(generated_ids) |
|
|
print(decoded[0]) |
|
|
|
|
|
messages = [ |
|
|
{"role": "user", "content": "Türkiye'de en fazla ziyaret edilen 3 şehir hangidir?"}, |
|
|
] |
|
|
model_inputs = tokenizer.apply_chat_template(messages,return_tensors = "pt") |
|
|
generated_ids = model.generate( |
|
|
model_inputs, |
|
|
max_new_tokens = 1000, |
|
|
do_sample = True, |
|
|
) |
|
|
decoded = tokenizer.batch_decode(generated_ids) |
|
|
print(decoded[0]) |
|
|
|
|
|
"""Let's connect [YouTube](http://youtube.com/tirendazacademy) | [Medium](http://tirendazacademy.medium.com) | [X](http://x.com/tirendazacademy) | [Linkedin](https://www.linkedin.com/in/tirendaz-academy)""" |