File size: 799 Bytes
f6fe981
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
# trust_remote_code=True is required for Replit's custom architecture
model_id = "replit/replit-code-v1_5-3b"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)

def generate_code(prompt):
    inputs = tokenizer(prompt, return_tensors="pt")
    # Generate code with basic parameters
    outputs = model.generate(**inputs, max_length=100, do_sample=True, temperature=0.2)
    return tokenizer.decode(outputs[0], skip_special_tokens=True)

# Build a simple web UI
demo = gr.Interface(fn=generate_code, inputs="text", outputs="code", title="Replit Code AI")
demo.launch()