import torch import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM MODEL_ID = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" # ----------------------------- # Load tokenizer # ----------------------------- tokenizer = AutoTokenizer.from_pretrained( MODEL_ID, use_fast=True ) # ----------------------------- # Load model (CPU, non-quantized) # ----------------------------- model = AutoModelForCausalLM.from_pretrained( MODEL_ID, torch_dtype=torch.float32, device_map="cpu" ) model.eval() # ----------------------------- # Generation function # ----------------------------- def generate( prompt, max_new_tokens=512, temperature=0.7, top_p=0.9 ): inputs = tokenizer( prompt, return_tensors="pt", truncation=True, max_length=2048 ) with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p, do_sample=True ) return tokenizer.decode( outputs[0], skip_special_tokens=True ) # ----------------------------- # Gradio Interface (API enabled) # ----------------------------- demo = gr.Interface( fn=generate, inputs=[ gr.Textbox(label="Prompt", lines=6), gr.Slider(64, 1024, value=512, step=64, label="Max New Tokens"), gr.Slider(0.1, 1.0, value=0.7, step=0.05, label="Temperature"), gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p"), ], outputs=gr.Textbox(label="Response", lines=10), title="TinyLlama-1.1B-Chat (Non-Quantized, CPU)" ) demo.launch()