File size: 1,133 Bytes
ed1eebe
ef89ab8
ed1eebe
43c048b
ed1eebe
ef89ab8
ed1eebe
43c048b
ed1eebe
 
979ad48
ed1eebe
43c048b
ed1eebe
979ad48
ed1eebe
 
 
 
 
 
0ddc005
ed1eebe
 
0ddc005
ed1eebe
 
0ddc005
ed1eebe
 
 
 
cdd8e55
ed1eebe
 
979ad48
ed1eebe
ef89ab8
ed1eebe
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import torch
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
from transformers import BitsAndBytesConfig

device = "cuda" if torch.cuda.is_available() else "cpu"

base_model = "unsloth/Phi-3-mini-4k-instruct-bnb-4bit"
finetuned_model = "saadkhi/SQL_Chat_finetuned_model"

tokenizer = AutoTokenizer.from_pretrained(base_model)

bnb = BitsAndBytesConfig(load_in_4bit=True)

model = AutoModelForCausalLM.from_pretrained(
    base_model,
    quantization_config=bnb,
    torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32,
    device_map="auto"
)

model = PeftModel.from_pretrained(model, finetuned_model).to(device)
model.eval()

def chat(prompt):
    inputs = tokenizer(prompt, return_tensors="pt").to(device)

    with torch.inference_mode():
        output = model.generate(
            **inputs,
            max_new_tokens=60,
            temperature=0.8,
            do_sample=False
        )

    return tokenizer.decode(output[0], skip_special_tokens=True)

iface = gr.Interface(fn=chat, inputs="text", outputs="text", title="SQL Chatbot")
iface.launch()