Spaces:
Runtime error
Runtime error
| from langchain_openai import ChatOpenAI | |
| from langchain_core.prompts import ChatPromptTemplate | |
| import os | |
| from langchain_core.pydantic_v1 import BaseModel, Field | |
| import gradio as gr | |
| class code(BaseModel): | |
| """Code output""" | |
| prefix: str = Field(description="Description of the problem and approach") | |
| imports: str = Field(description="Code block import statements") | |
| code: str = Field(description="Code block not including import statements") | |
| possible_errors: str = Field(description="Description of potential error and vulnerabilities in the code") | |
| description = "Schema for code solutions to questions about Code." | |
| class revision(BaseModel): | |
| """Revision output""" | |
| imports_revision: str = Field(description="Revision of imports") | |
| code_revision: str = Field(description="Revision of code") | |
| overall_evaluation: str = Field(description="Thorough evaluation of the imports and of the code") | |
| description = "Schema for code solutions to questions about Code." | |
| def dict_to_string(d): | |
| # Create a list of strings in the form '-key value' | |
| parts = [f"{key}:\n\t{value}" for key, value in d.items()] | |
| # Join the parts with a space separator | |
| result = '\n'.join(parts) | |
| return result | |
| def coder_reply_to_string(solution: code): | |
| d = solution.__dict__ | |
| return dict_to_string(d) | |
| class CodeGenerator: | |
| def __init__(self, coder, revisor, maxiters): | |
| self.coder = coder | |
| self.revisor = revisor | |
| self.maxiters = maxiters | |
| def generate(self, prompt, context): | |
| i = 0 | |
| while i < self.maxiters: | |
| solution = self.coder.invoke({"context": context, "messages": [("user", prompt)]}) | |
| revision = self.revisor.invoke({"messages": [("user", coder_reply_to_string(solution))]}) | |
| context = coder_reply_to_string(revision) | |
| i+=1 | |
| return solution | |
| class modularized_code(BaseModel): | |
| prefix: str = Field(description="Description of the problem and approach") | |
| imports: str = Field(description="Code block import statements") | |
| code: str = Field(description="Modularized code block not including import statements") | |
| description = "Schema for code solutions to questions about Code." | |
| def check_code(solution): | |
| try: | |
| exec(solution.imports) | |
| except Exception as e: | |
| return f"An error occurred during import phase: {e}" | |
| try: | |
| exec(solution.imports+"\n"+solution.code) | |
| return "Code ran succesfully" | |
| except Exception as e: | |
| return f"An error occurred during code execution phase: {e}" | |
| class CodeChecker: | |
| def __init__(self, solution, checker): | |
| self.solution = solution | |
| self.checker = checker | |
| def check_n_refactor(self, iterations): | |
| for i in range(iterations): | |
| try: | |
| success_status = check_code(self.solution) | |
| except Exception as e: | |
| success_status = f"An error occurred while checking the code: {e}" | |
| print("Success status: " + success_status) | |
| if success_status == "Code run succesfully": | |
| return self.solution | |
| else: | |
| self.solution = self.checker.invoke({"code": self.solution.imports + "\n" + self.solution.code, "errors": success_status}) | |
| return self.solution | |
| # Grader prompt | |
| code_gen_prompt = ChatPromptTemplate.from_messages( | |
| [ | |
| ( | |
| "system", | |
| """You are a coding assistant with expertise in python. Based on the following context (which can be code or revision suggestions): \n ------- \n {context} \n ------- \n Answer the user | |
| question. Ensure any code you provide can be executed \n | |
| with all required imports and variables defined. First, structure your answer with a description of the code solution. \n | |
| Secondly list the imports. Thirdly list the functioning code block. Finally, describe potential errors one might encounter while executing the code. Here is the user question:""", | |
| ), | |
| ("placeholder", "{messages}"), | |
| ] | |
| ) | |
| revisor_prompt = ChatPromptTemplate.from_messages( | |
| [ | |
| ( | |
| "system", | |
| """You are a coding revisor with expertise in python. From the prefix, import, code and potential errors produced by a programmer, you should provide a thorough review with: | |
| - First what would you improve in the imports | |
| - Second what would you improve in the code block | |
| - Third an overall evaluation of the whole code solution produced by the programmer. Here is the programmer solution you should revise:""", | |
| ), | |
| ("placeholder", "{messages}"), | |
| ] | |
| ) | |
| modules_prompt = ChatPromptTemplate.from_messages( | |
| [ | |
| ( | |
| "system", | |
| """Your job is to refactor code in a modularized way, and you have expertise in python. From the prefix, import, code and potential errors produced by a programmer, you should provide a refactored and modularized code with: | |
| - A description of what you did | |
| - The imports | |
| - The refactored and modularized code. | |
| Orient code generation on this context (which may be user's request or some revision suggestions): | |
| \n\n-----------\n{context}\n-------------\n\n | |
| Here is the programmer solution you should refactor and modularize:""", | |
| ), | |
| ("placeholder", "{messages}"), | |
| ] | |
| ) | |
| checker_prompt = ChatPromptTemplate.from_messages( | |
| [ | |
| ( | |
| "system", | |
| """Your job is to refactor code based on these errors: | |
| ---------- | |
| {errors} | |
| ---------- | |
| This is the code to refactor: | |
| ---------- | |
| {code} | |
| ---------- | |
| You should then reply with: | |
| - A description of what you did | |
| - The imports | |
| - The refactored code""", | |
| ), | |
| ] | |
| ) | |
| import time | |
| def reply(message, history, api_key, context): | |
| os.environ["OPENAI_API_KEY"] = api_key | |
| expt_llm = "gpt-4o" | |
| llm = ChatOpenAI(temperature=0, model=expt_llm) | |
| code_gen_chain = code_gen_prompt | llm.with_structured_output(code) | |
| code_revision_chain = revisor_prompt | llm.with_structured_output(revision) | |
| aicodegenerator = CodeGenerator(code_gen_chain, code_revision_chain, 5) | |
| solution = aicodegenerator.generate(prompt=message, context=context) | |
| code_modules_chain = modules_prompt | llm.with_structured_output(modularized_code) | |
| aicodemodularizer = CodeGenerator(code_modules_chain, code_revision_chain, 5) | |
| modules_solution = aicodemodularizer.generate(prompt=coder_reply_to_string(solution), context=message) | |
| code_checker_chain = checker_prompt | llm.with_structured_output(code) | |
| aicodechecker = CodeChecker(modules_solution, code_checker_chain) | |
| final_solution = aicodechecker.check_n_refactor(5) | |
| response = "The final solution for your code is:\n\n```python" + final_solution.imports + "\n" + final_solution.code + "\n```" | |
| this_hist = "" | |
| for char in response: | |
| this_hist+=char | |
| time.sleep(0.001) | |
| yield this_hist | |
| api_key_user = gr.Textbox(label="OpenAI API key", type="password") | |
| context_user = gr.Textbox(label="Context", info="Add some contextual instructions for the model to know how to generate code", value="def hello_world():\n\tprint('Hello world!')\n\nhello_world()") | |
| chatbot = gr.Chatbot(height=400) | |
| additional_accordion = gr.Accordion(label="Parameters to be set before you start chatting", open=True) | |
| with gr.Blocks() as demo: | |
| gr.HTML("<h1 align='center'>Self-Reviewing Coding Assistant🤖💻</h1>") | |
| gr.Image('coding_assistant.png') | |
| gr.ChatInterface(fn=reply, additional_inputs=[api_key_user, context_user], additional_inputs_accordion=additional_accordion, chatbot=chatbot) | |
| demo.launch(server_name="0.0.0.0", server_port=7860) |