Spaces:
Sleeping
Sleeping
File size: 1,676 Bytes
f130a02 b33a5ab f130a02 b33a5ab f130a02 b33a5ab f130a02 b33a5ab f130a02 b33a5ab f130a02 b33a5ab f130a02 b33a5ab f130a02 b33a5ab f130a02 b33a5ab f130a02 b33a5ab |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
import torch
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
MODEL_ID = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
# -----------------------------
# Load tokenizer
# -----------------------------
tokenizer = AutoTokenizer.from_pretrained(
MODEL_ID,
use_fast=True
)
# -----------------------------
# Load model (CPU, non-quantized)
# -----------------------------
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
torch_dtype=torch.float32,
device_map="cpu"
)
model.eval()
# -----------------------------
# Generation function
# -----------------------------
def generate(
prompt,
max_new_tokens=512,
temperature=0.7,
top_p=0.9
):
inputs = tokenizer(
prompt,
return_tensors="pt",
truncation=True,
max_length=2048
)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_p=top_p,
do_sample=True
)
return tokenizer.decode(
outputs[0],
skip_special_tokens=True
)
# -----------------------------
# Gradio Interface (API enabled)
# -----------------------------
demo = gr.Interface(
fn=generate,
inputs=[
gr.Textbox(label="Prompt", lines=6),
gr.Slider(64, 1024, value=512, step=64, label="Max New Tokens"),
gr.Slider(0.1, 1.0, value=0.7, step=0.05, label="Temperature"),
gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p"),
],
outputs=gr.Textbox(label="Response", lines=10),
title="TinyLlama-1.1B-Chat (Non-Quantized, CPU)"
)
demo.launch()
|