File size: 2,556 Bytes
ae6fdd1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 | # -*- coding: utf-8 -*-
"""Model Pull and Prompt.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1Ap0yRsMk8on-NcFPSYay6W3Oble43kyi
"""
#!pip install -q -U peft bitsandbytes
import torch
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer
peft_model_id = "vhs01/mistral-7b-dolly"
config = PeftConfig.from_pretrained(peft_model_id)
#!pip install accelerate
#!pip install -i https://pypi.org/simple/ bitsandbytes
from transformers import BitsAndBytesConfig
model = AutoModelForCausalLM.from_pretrained(
config.base_model_name_or_path,
return_dict=True,
load_in_4bit=True,
device_map='auto'
)
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path,
padding_side = "right",
add_eos_token = True)
tokenizer.pad_token = tokenizer.eos_token
fine_tuned_model = PeftModel.from_pretrained(model, peft_model_id)
from transformers import pipeline, logging
logging.set_verbosity(logging.CRITICAL)
pipe = pipeline(
task="text-generation",
model=fine_tuned_model,
tokenizer=tokenizer,
eos_token_id=model.config.eos_token_id,
max_new_tokens=500)
prompt = """
What is a Python? Here is some context: Python is a high-level, general-purpose programming language.
"""
pipe = pipeline(task="text-generation",
model=fine_tuned_model,
tokenizer=tokenizer,
eos_token_id=model.config.eos_token_id,
max_new_tokens=500)
result = pipe(f"<s>[INST] {prompt} [/INST]")
generated = result[0]['generated_text']
print(generated[generated.find('[/INST]')+8:])
prompt = """
Please summarize what Linkedin does. Here is some context: LinkedIn is a business and employment-focused social media platform
"""
pipe = pipeline(task="text-generation",
model=fine_tuned_model,
tokenizer=tokenizer,
eos_token_id=model.config.eos_token_id,
max_new_tokens=500)
result = pipe(f"<s>[INST] {prompt} [/INST]")
generated = result[0]['generated_text']
print(generated[generated.find('[/INST]')+8:])
#!pip install -q gradio
import gradio as gr
outputs = pipe(f"<s>[INST] {prompt} [/INST]")
demo=gr.Interface(pipe,
inputs=gr.Textbox(label="Prompt"),
outputs=gr.Textbox(generated[generated.find('[/INST]')+8:]))
demo.launch(share=True) |