File size: 1,417 Bytes
ee21840 59dc89c ee21840 59dc89c ee21840 59dc89c ee21840 59dc89c 4798ce4 ee21840 4798ce4 ee21840 4798ce4 ee21840 4798ce4 ee21840 bc5bef1 ee21840 879c6bc ee21840 4798ce4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import gradio as gr # web interface
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
model_name = "Salesforce/codegen-350M-multi"
# initialize the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
device = "cuda" if torch.cuda.is_available() else "cpu"
model = model.to(device)
def generate_code(prompt, max_length=100, temperature=0.7, top_p=0.95):
inputs = tokenizer(prompt, return_tensors='pt').to(device)
output = model.generate(**inputs, max_length=max_length, temperature=temperature, top_p=top_p, do_sample=True)
generated_code = tokenizer.decode(output[0], skip_special_tokens=True)
return generated_code
# gradio interface
with gr.Blocks() as demo:
gr.Markdown("## CODE GENERATION WITH CODEGEN MODEL")
prompt = gr.Textbox(lines=10, label="Enter your prompt for CodeGen")
max_length = gr.Slider(50, 500, value=100, label="Max Length")
temperature = gr.Slider(0.1, 0.9, value=0.7, label="Choose Temperature")
top_p = gr.Slider(0.1, 1.0, value=0.95, label="Top P Value")
output_box = gr.Textbox(lines=20, label="Generated Code")
generate_button = gr.Button("Generate Code")
generate_button.click(fn=generate_code,
inputs=[prompt, max_length, temperature, top_p],
outputs=output_box)
demo.launch()
|