AnnaMathews commited on
Commit
0b3aa28
·
verified ·
1 Parent(s): 252f723

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +121 -0
app.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from datasets import Dataset
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer, BitsAndBytesConfig
4
+ from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
5
+ model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
6
+
7
+ bnb_config = BitsAndBytesConfig(
8
+ load_in_4bit=True,
9
+ bnb_4bit_compute_dtype=torch.float16,
10
+ bnb_4bit_use_double_quant=True,
11
+ bnb_4bit_quant_type="nf4",
12
+ )
13
+
14
+ model = AutoModelForCausalLM.from_pretrained(
15
+ model_name,
16
+ quantization_config=bnb_config,
17
+ device_map="auto"
18
+ )
19
+
20
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
21
+ tokenizer.pad_token = tokenizer.eos_token
22
+ model.config.use_cache = False
23
+ model.gradient_checkpointing_enable()
24
+ model = prepare_model_for_kbit_training(model)
25
+
26
+ lora_config = LoraConfig(
27
+ r=8,
28
+ lora_alpha=32,
29
+ target_modules=["q_proj", "v_proj"],
30
+ lora_dropout=0.05,
31
+ bias="none",
32
+ task_type="CAUSAL_LM"
33
+ )
34
+
35
+ model = get_peft_model(model, lora_config)
36
+ import pandas as pd
37
+ from datasets import Dataset
38
+
39
+
40
+ # Load data from CSV
41
+ df = pd.read_csv("Customer-Support.csv")
42
+
43
+
44
+ # Rename columns to match expected keys
45
+ df = df.rename(columns={"query": "instruction", "response": "output"})
46
+
47
+
48
+ # Select required columns
49
+ data = df[["instruction", "output"]].fillna("")
50
+
51
+
52
+ # Convert DataFrame to list of dictionaries
53
+ data = data.to_dict(orient="records")
54
+
55
+
56
+ # Create Hugging Face Dataset
57
+ dataset = Dataset.from_list(data)
58
+
59
+
60
+ # Format each example
61
+ def format_instruction(example):
62
+ return f"### Instruction:\n{example['instruction']}\n\n### Response:\n{example['output']}"
63
+
64
+
65
+ # Map formatted text
66
+ dataset = dataset.map(lambda x: {"text": format_instruction(x)})
67
+
68
+ def tokenize_function(example):
69
+ tokenized = tokenizer(example["text"], truncation=True, padding="max_length", max_length=512)
70
+ tokenized["labels"] = tokenized["input_ids"].copy()
71
+ return tokenized
72
+
73
+ tokenized_dataset = dataset.map(tokenize_function, batched=True)
74
+ training_args = TrainingArguments(
75
+ output_dir="./tinyllama-qlora-support-bot",
76
+ per_device_train_batch_size=2,
77
+ gradient_accumulation_steps=4,
78
+ learning_rate=2e-4,
79
+ logging_dir="./logs",
80
+ num_train_epochs=3,
81
+ logging_steps=10,
82
+ save_total_limit=2,
83
+ save_strategy="epoch",
84
+ bf16=True,
85
+ optim="paged_adamw_8bit"
86
+ )
87
+ trainer = Trainer(
88
+ model=model,
89
+ args=training_args,
90
+ train_dataset=tokenized_dataset,
91
+ tokenizer=tokenizer
92
+ )
93
+
94
+ trainer.train()
95
+ model.save_pretrained("tinyllama-qlora-support-bot")
96
+ tokenizer.save_pretrained("tinyllama-qlora-support-bot")
97
+ from transformers import pipeline
98
+
99
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
100
+
101
+ instruction = "How do I update the app?"
102
+ prompt = f"### Instruction:\n{instruction}\n\n### Response:\n"
103
+
104
+ output = pipe(prompt, max_new_tokens=100)
105
+ print(output[0]['generated_text'])
106
+ import gradio as gr
107
+
108
+ def generate_response(instruction):
109
+ prompt = f"### Instruction:\n{instruction}\n\n### Response:\n"
110
+ output = pipe(prompt, max_new_tokens=100, do_sample=True, temperature=0.7)
111
+ # Extract only the response part
112
+ response = output[0]["generated_text"].split("### Response:\n")[-1].strip()
113
+ return response
114
+
115
+ gr.Interface(
116
+ fn=generate_response,
117
+ inputs=gr.Textbox(lines=3, label="Enter your question"),
118
+ outputs=gr.Textbox(lines=5, label="Support Bot's Response"),
119
+ title="📞 Customer Support Chatbot",
120
+ description="Ask a question and get a response from your fine-tuned TinyLLaMA model.",
121
+ ).launch()