Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import AutoModelForSeq2SeqLM, AutoTokenizer | |
| import openai | |
| import secrets | |
| # Set up the OpenAI API credentials | |
| openai.api_key = secrets.OPENAI_API_KEY | |
| # Load the Hugging Face model and tokenizer | |
| model_name = "Helsinki-NLP/opus-mt-python-en" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
| # Define a function that takes a user's input code as a prompt and uses the OpenAI API and Hugging Face model to generate a corrected version of the code | |
| def correct_code(prompt): | |
| # Use the OpenAI API to generate suggestions for fixing syntax errors in the code | |
| response = openai.Completion.create( | |
| engine="davinci-codex", | |
| prompt=prompt, | |
| max_tokens=1024, | |
| n=1, | |
| stop=None, | |
| temperature=0.5, | |
| ) | |
| # Extract the corrected code from the API response | |
| corrected_code = response.choices[0].text.strip() | |
| # Use the Hugging Face model to generate a more natural-sounding version of the corrected code | |
| input_ids = tokenizer.encode(corrected_code, return_tensors="pt") | |
| outputs = model.generate(input_ids) | |
| corrected_code = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return corrected_code | |
| # Define a Gradio interface for the code assistant | |
| input_text = gr.inputs.Textbox(lines=10, label="Input Code") | |
| output_text = gr.outputs.Textbox(label="Corrected Code") | |
| def generate_code(input_text): | |
| corrected_code = correct_code(input_text) | |
| return corrected_code | |
| interface = gr.Interface(fn=generate_code, inputs=input_text, outputs=output_text, title="AI Code Assistant", description="Enter your code and click submit to generate a corrected version.") | |
| # Run the Gradio interface | |
| interface.launch() |