File size: 3,862 Bytes
b1c0d9b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163

import transformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, pipeline
from huggingface_hub import notebook_login
from datasets import Dataset
import pandas as pd
from IPython.display import display
from google.colab import userdata

!rm -rf ~/.cache/huggingface/


notebook_login()




import pandas as pd
df = pd.read_csv("/content/sample.csv")
print("Shape of dataset:", df.shape)
display(df.head(5))


inbound_df = df[df["inbound"] == True]
outbound_df = df[df["inbound"] == False]


merged_df = pd.merge(
    inbound_df,
    outbound_df,
    left_on="tweet_id",
    right_on="in_response_to_tweet_id",
    suffixes=("_customer", "_brand")
)

merged_df = merged_df[["tweet_id_customer", "text_customer",
                       "tweet_id_brand", "text_brand"]]
display(merged_df.head())

def build_chat_example(row):
    return {
        "prompt": f"User: {row['text_customer']}\nAssistant:",
        "response": row["text_brand"]
    }

paired_data = merged_df.apply(build_chat_example, axis=1).to_list()


from datasets import Dataset
dataset = Dataset.from_list(paired_data)
dataset = dataset.train_test_split(test_size=0.1, seed=42)
train_dataset = dataset["train"]
eval_dataset = dataset["test"]

print(train_dataset[0])

model_id = "meta-llama/Meta-Llama-3-8B"

import bitsandbytes as bnb
from transformers import BitsAndBytesConfig
import torch # Import torch here
bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_use_double_quant = True,
    bnb_4bit_quant_type = "nf4",
    bnb_4bit_compute_dtype = torch.bfloat16
)

tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.pad_token = tokenizer.eos_token

model = AutoModelForCausalLM.from_pretrained(
    model_id,
    device_map = "auto",
    quantization_config = bnb_config,
)


llama_pipeline = pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer, 
    torch_dtype=torch.bfloat16,
    device_map="auto" 
)


from peft import LoraConfig, get_peft_model, TaskType

lora_config = LoraConfig(
    r=16,
    lora_alpha=32,
    lora_dropout=0.05,
    bias="none",
    task_type=TaskType.CAUSAL_LM,
    target_modules=["q_proj", "v_proj"]  
)

model = get_peft_model(model, lora_config)
print("LoRA layers added to the model!")


def tokenize_function(examples):
    full_texts = [
        f"{p}\n{r}" for p, r in zip(examples["prompt"], examples["response"])
    ]
    return tokenizer(full_texts, truncation=True, max_length=512)

train_tokenized = train_dataset.map(tokenize_function, batched=True)
eval_tokenized = eval_dataset.map(tokenize_function, batched=True)


from transformers import TrainingArguments, Trainer

training_args = TrainingArguments(
    output_dir="./results",
    overwrite_output_dir=True,
    num_train_epochs=3,
    per_device_train_batch_size=2,
    per_device_eval_batch_size=2,
    
    eval_strategy="epoch",  
    save_strategy="epoch",
    logging_steps=50,
    fp16=True,  
    report_to="none"
)

def tokenize_function(examples):
    full_texts = [
        f"{p}\n{r}" for p, r in zip(examples["prompt"], examples["response"])
    ]
    
    tokenized_inputs = tokenizer(
        full_texts,
        truncation=True,
        max_length=512,
        padding="max_length", 
        return_tensors="pt"  
    )
    return tokenized_inputs

train_tokenized = train_dataset.map(tokenize_function, batched=True)
eval_tokenized = eval_dataset.map(tokenize_function, batched=True)

model.eval()

test_prompt = "User: What is an IPhone?\nAssistant:"
inputs = tokenizer(test_prompt, return_tensors="pt").to("cuda")

with torch.no_grad():
    outputs = model.generate(
        **inputs,
        max_new_tokens=100,
        do_sample=True,
        top_k=50,
        top_p=0.9
    )

print("=== Model Reply ===")
print(tokenizer.decode(outputs[0], skip_special_tokens=True))