File size: 1,427 Bytes
6081e28 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 | import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Nama model dasar dan lokasi adapter kamu di Hugging Face
base_model_id = "microsoft/phi-2"
adapter_model_id = "username_kamu/Deeper-Logic-Phi2" # Ganti dengan repo kamu
# Load Tokenizer dan Model
tokenizer = AutoTokenizer.from_pretrained(base_model_id)
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
base_model_id,
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True
)
# Gabungkan dengan hasil fine-tuning kamu
model = PeftModel.from_pretrained(model, adapter_model_id)
def predict(message, history):
prompt = f"Instruct: {message}\nOutput:"
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=200,
temperature=0.7,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response.split("Output:")[-1].strip()
# Membuat Interface Chat dengan Gradio
demo = gr.ChatInterface(
fn=predict,
title="Deeper-Logic AI",
description="Asisten Riset & Produktivitas Berbasis Phi-2 (Fine-tuned)",
theme="soft"
)
if __name__ == "__main__":
demo.launch() |