# import torch # import gradio as gr # from transformers import AutoTokenizer, AutoModelForCausalLM # from peft import PeftModel # from transformers import BitsAndBytesConfig # device = "cuda" if torch.cuda.is_available() else "cpu" # base_model = "unsloth/Phi-3-mini-4k-instruct-bnb-4bit" # finetuned_model = "saadkhi/SQL_Chat_finetuned_model" # tokenizer = AutoTokenizer.from_pretrained(base_model) # bnb = BitsAndBytesConfig(load_in_4bit=True) # model = AutoModelForCausalLM.from_pretrained( # base_model, # quantization_config=bnb, # torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32, # device_map="auto" # ) # model = PeftModel.from_pretrained(model, finetuned_model).to(device) # model.eval() # def chat(prompt): # inputs = tokenizer(prompt, return_tensors="pt").to(device) # with torch.inference_mode(): # output = model.generate( # **inputs, # max_new_tokens=60, # temperature=0.1, # do_sample=False # ) # return tokenizer.decode(output[0], skip_special_tokens=True) # iface = gr.Interface(fn=chat, inputs="text", outputs="text", title="SQL Chatbot") # iface.launch() import torch import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM from peft import PeftModel from transformers import BitsAndBytesConfig device = "cuda" if torch.cuda.is_available() else "cpu" base_model = "unsloth/Phi-3-mini-4k-instruct-bnb-4bit" finetuned_model = "saadkhi/SQL_Chat_finetuned_model" tokenizer = AutoTokenizer.from_pretrained(base_model, trust_remote_code=True) bnb_config = BitsAndBytesConfig(load_in_4bit=True) model = AutoModelForCausalLM.from_pretrained( base_model, quantization_config=bnb_config, torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32, device_map="auto", trust_remote_code=True, ) model = PeftModel.from_pretrained(model, finetuned_model) model.eval() def chat(user_prompt): # Proper Phi-3 chat format messages = [{"role": "user", "content": user_prompt}] inputs = tokenizer.apply_chat_template( messages, tokenize=True, add_generation_prompt=True, return_tensors="pt" ).to(device) with torch.inference_mode(): outputs = model.generate( inputs, max_new_tokens=256, # Increased a bit for full SQL temperature=0.7, do_sample=True, # Better for creativity, faster top_p=0.9, repetition_penalty=1.1, ) # Clean response response = tokenizer.decode(outputs[0], skip_special_tokens=False) response = response.split("<|assistant|>")[-1].split("<|end|>")[0].strip() return response iface = gr.ChatInterface( fn=chat, title="Fast SQL Chatbot", description="Ask SQL questions (e.g., 'delete duplicate rows based on email')" ) iface.launch()