File size: 2,941 Bytes
a663164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
979ad48
ef89ab8
 
 
979ad48
 
 
ef89ab8
 
 
 
a663164
979ad48
a663164
979ad48
 
 
a663164
979ad48
a663164
 
979ad48
a663164
979ad48
ef89ab8
a663164
 
 
 
 
 
 
 
 
 
 
979ad48
a663164
 
 
 
 
 
 
979ad48
a663164
 
 
 
 
 
979ad48
a663164
 
 
 
 
ef89ab8
a663164
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# import torch
# import gradio as gr
# from transformers import AutoTokenizer, AutoModelForCausalLM
# from peft import PeftModel
# from transformers import BitsAndBytesConfig

# device = "cuda" if torch.cuda.is_available() else "cpu"

# base_model = "unsloth/Phi-3-mini-4k-instruct-bnb-4bit"
# finetuned_model = "saadkhi/SQL_Chat_finetuned_model"

# tokenizer = AutoTokenizer.from_pretrained(base_model)

# bnb = BitsAndBytesConfig(load_in_4bit=True)

# model = AutoModelForCausalLM.from_pretrained(
#     base_model,
#     quantization_config=bnb,
#     torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32,
#     device_map="auto"
# )

# model = PeftModel.from_pretrained(model, finetuned_model).to(device)
# model.eval()

# def chat(prompt):
#     inputs = tokenizer(prompt, return_tensors="pt").to(device)

#     with torch.inference_mode():
#         output = model.generate(
#             **inputs,
#             max_new_tokens=60,
#             temperature=0.1,
#             do_sample=False
#         )

#     return tokenizer.decode(output[0], skip_special_tokens=True)

# iface = gr.Interface(fn=chat, inputs="text", outputs="text", title="SQL Chatbot")
# iface.launch()









import torch
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
from transformers import BitsAndBytesConfig

device = "cuda" if torch.cuda.is_available() else "cpu"

base_model = "unsloth/Phi-3-mini-4k-instruct-bnb-4bit"
finetuned_model = "saadkhi/SQL_Chat_finetuned_model"

tokenizer = AutoTokenizer.from_pretrained(base_model, trust_remote_code=True)

bnb_config = BitsAndBytesConfig(load_in_4bit=True)

model = AutoModelForCausalLM.from_pretrained(
    base_model,
    quantization_config=bnb_config,
    torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32,
    device_map="auto",
    trust_remote_code=True,
)
model = PeftModel.from_pretrained(model, finetuned_model)
model.eval()

def chat(user_prompt):
    # Proper Phi-3 chat format
    messages = [{"role": "user", "content": user_prompt}]
    
    inputs = tokenizer.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to(device)
    
    with torch.inference_mode():
        outputs = model.generate(
            inputs,
            max_new_tokens=256,      # Increased a bit for full SQL
            temperature=0.7,
            do_sample=True,          # Better for creativity, faster
            top_p=0.9,
            repetition_penalty=1.1,
        )
    
    # Clean response
    response = tokenizer.decode(outputs[0], skip_special_tokens=False)
    response = response.split("<|assistant|>")[-1].split("<|end|>")[0].strip()
    
    return response

iface = gr.ChatInterface(
    fn=chat,
    title="Fast SQL Chatbot",
    description="Ask SQL questions (e.g., 'delete duplicate rows based on email')"
)

iface.launch()