Spaces:
Sleeping
Sleeping
File size: 1,322 Bytes
7ddd5cc 91014aa 7ddd5cc 91014aa 7ddd5cc 91014aa 7ddd5cc 91014aa 7ddd5cc 91014aa 7ddd5cc 91014aa 7ddd5cc 91014aa 7ddd5cc 91014aa | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 | import torch
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
MODEL_NAME = "ibm-granite/granite-3.0-2b-base"
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
)
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
model.eval()
def generate_text(prompt, max_new_tokens=100, temperature=0.7):
inputs = tokenizer(prompt, return_tensors="pt").to(device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=max_new_tokens,
do_sample=True,
temperature=temperature,
top_p=0.9,
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
demo = gr.Interface(
fn=generate_text,
inputs=[
gr.Textbox(lines=5, label="Input Prompt"),
gr.Slider(10, 300, value=100, step=10, label="Max New Tokens"),
gr.Slider(0.1, 1.5, value=0.7, step=0.1, label="Temperature"),
],
outputs=gr.Textbox(lines=10, label="Generated Output"),
title="IBM Granite 3.0 – 2B Base",
description="Text generation using IBM Granite 3.0 2B Base model",
)
demo.launch()
|