lora_model / app.py
jasperBHOS's picture
Upload app.py
1f95faf verified
raw
history blame contribute delete
633 Bytes
import gradio as gr
from unsloth import FastLanguageModel
import torch
model, tokenizer = FastLanguageModel.from_pretrained("yourusername/lora_model", load_in_4bit=True)
FastLanguageModel.for_inference(model)
def chat(input_text):
messages = [{"role": "user", "content": input_text}]
inputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to("cuda")
outputs = model.generate(inputs, max_new_tokens=128, temperature=1.0)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
gr.Interface(fn=chat, inputs="text", outputs="text").launch()