raj-vir-singh's picture
Update app.py
5f10640
import gradio as gr
import os
import torch
from transformers import RobertaTokenizer, T5ForConditionalGeneration
model_name = "ThoughtFocusAI/CodeGeneration-CodeT5-small"
device = "cuda" if torch.cuda.is_available() else "cpu"
model = T5ForConditionalGeneration.from_pretrained(
model_name).to(device)
tokenizer = RobertaTokenizer.from_pretrained(
model_name)
def generate_code(user_input):
query = "Generate Python: " + user_input
encoded_text = tokenizer(query, return_tensors='pt', padding='max_length',
truncation=True, max_length=512).input_ids.to(device)
# inference
generated_code = model.generate(encoded_text, max_length=512)
# decode generated tokens
decoded_code = tokenizer.decode(
generated_code.numpy()[0], skip_special_tokens=True)
return decoded_code
interface = gr.Interface(fn=generate_code,
inputs=gr.inputs.Textbox(
lines=3, label="Enter Text", placeholder="Ex-Add two numbers"),
outputs=gr.outputs.Textbox(label="Generated Code"))
interface.launch()