import torch import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM MODEL_NAME = "ibm-granite/granite-3.0-2b-base" # Load tokenizer and model tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) model = AutoModelForCausalLM.from_pretrained( MODEL_NAME, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, ) device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) model.eval() def generate_text(prompt, max_new_tokens=100, temperature=0.7): inputs = tokenizer(prompt, return_tensors="pt").to(device) with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=max_new_tokens, do_sample=True, temperature=temperature, top_p=0.9, ) return tokenizer.decode(outputs[0], skip_special_tokens=True) demo = gr.Interface( fn=generate_text, inputs=[ gr.Textbox(lines=5, label="Input Prompt"), gr.Slider(10, 300, value=100, step=10, label="Max New Tokens"), gr.Slider(0.1, 1.5, value=0.7, step=0.1, label="Temperature"), ], outputs=gr.Textbox(lines=10, label="Generated Output"), title="IBM Granite 3.0 – 2B Base", description="Text generation using IBM Granite 3.0 2B Base model", ) demo.launch()