livecoder / app.py
Vladislav Krasnov
I hope it's final commit debug=true..
e2cc05e
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Use lighter model for CPU
#model_name = "microsoft/phi-2" # 2.7B - TOO HEAVY
model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" # 1.1B - much lighter
try:
print(f"Loading {model_name}...")
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float32,
device_map="cpu",
low_cpu_mem_usage=True # Critical for CPU
)
print("Model loaded successfully")
except Exception as e:
print(f"Failed to load model: {e}")
# Fallback to dummy function
model, tokenizer = None, None
def generate_response(message):
"""Process user input and generate response"""
if not message.strip():
return "Please enter a question."
if model is None or tokenizer is None:
return f"Model not loaded. Testing UI with: {message}"
try:
# Format for chat model
prompt = f"<|user|>\n{message}\n<|assistant|>\n"
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=384)
# Generate with lower token count for CPU
with torch.no_grad():
outputs = model.generate(
inputs.input_ids,
attention_mask=inputs.attention_mask, # FIX: Add attention mask
max_new_tokens=600, # Reduced for CPU
temperature=0.8,
do_sample=True,
top_p=0.9,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
return response.strip()
except Exception as e:
return f"Error: {str(e)[:100]}"
# Create interface
interface = gr.Interface(
fn=generate_response,
inputs=gr.Textbox(label="Input", placeholder="Enter programming question...", lines=3),
outputs=gr.Textbox(label="Output", lines=10),
title="LiveCoder API",
description="LLM programming assistant",
allow_flagging="never"
)
# API endpoint info
USERNAME = "sarekuwa"
SPACE_NAME = "livecoder"
print(f"API Endpoint: https://{USERNAME}-{SPACE_NAME}.hf.space/api/predict")
# CRITICAL: Enable queue for request processing
interface.queue(default_concurrency_limit=1)
# Launch application
interface.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
debug=True
)