Spaces:
Sleeping
Sleeping
| import os | |
| import json | |
| import streamlit as st | |
| from streamlit_ace import st_ace | |
| from typing_extensions import TypedDict | |
| from langchain.chat_models import ChatOpenAI | |
| from langchain.schema import HumanMessage | |
| # ------------------------------------------ | |
| # 1. Define a TypedDict for review state | |
| # ------------------------------------------ | |
| class CodeReviewState(TypedDict): | |
| code_content: str | |
| review_outcome: str | |
| revised_code: str | |
| similar_examples: str | |
| # ------------------------------------------ | |
| # 2. LLM Classes for Analyzing & Proposing | |
| # ------------------------------------------ | |
| class LLMAnalyzer: | |
| """Analyzes Python code snippet: 'Approved' or 'NeedsRevision'.""" | |
| def __init__(self, llm: ChatOpenAI) -> None: | |
| self.llm = llm | |
| def analyze(self, code_content: str) -> str: | |
| prompt_text = f"""You are a senior Python reviewer. | |
| Analyze the following code: | |
| {code_content} | |
| Respond with: | |
| - "Approved" if the code is correct or good enough. | |
| - "NeedsRevision" if changes or improvements are needed. | |
| """ | |
| # Use 'generate()' with a HumanMessage. | |
| response = self.llm.generate([[HumanMessage(content=prompt_text)]]) | |
| outcome_text = response.generations[0][0].text.strip() | |
| return outcome_text | |
| class LLMProposer: | |
| """Proposes an improved version of the code.""" | |
| def __init__(self, llm: ChatOpenAI) -> None: | |
| self.llm = llm | |
| def propose(self, code_content: str) -> str: | |
| prompt_text = f"""You are a senior Python reviewer. | |
| Review the following code and provide an improved version, with any necessary fixes or best practices: | |
| {code_content} | |
| """ | |
| response = self.llm.generate([[HumanMessage(content=prompt_text)]]) | |
| return response.generations[0][0].text.strip() | |
| class LLMExampleGenerator: | |
| """Generates similar example snippets for educational purposes.""" | |
| def __init__(self, llm: ChatOpenAI) -> None: | |
| self.llm = llm | |
| def generate_examples(self, code_content: str) -> str: | |
| prompt_text = f"""You are a senior Python mentor. | |
| The user learned this Python snippet: | |
| {code_content} | |
| Now provide a few similar Python code examples or patterns with short explanations. | |
| Show best practices and different variations so the user can learn and improve. | |
| """ | |
| response = self.llm.generate([[HumanMessage(content=prompt_text)]]) | |
| return response.generations[0][0].text.strip() | |
| # ------------------------------------------ | |
| # 3. Streamlit Page Layout | |
| # ------------------------------------------ | |
| st.set_page_config(page_title="Code Reviewer with AI", layout="wide") | |
| st.title("Code Reviewer with suggested Examples") | |
| # ------------------------------------------ | |
| # 4. Check for OpenAI API Key | |
| # ------------------------------------------ | |
| openai_api_key = os.getenv("OPENAI_API_KEY") | |
| if not openai_api_key: | |
| st.error("Error: OPENAI_API_KEY not set. Please provide a valid API key.") | |
| st.stop() | |
| # ------------------------------------------ | |
| # 5. Initialize ChatOpenAI | |
| # ------------------------------------------ | |
| # Make sure to have a compatible version: langchain>=0.0.300, openai>=1.0.0 | |
| llm = ChatOpenAI( | |
| openai_api_key=openai_api_key, | |
| model_name="gpt-4o-mini", | |
| temperature=0.7 | |
| ) | |
| analyzer = LLMAnalyzer(llm) | |
| proposer = LLMProposer(llm) | |
| example_gen = LLMExampleGenerator(llm) | |
| # ------------------------------------------ | |
| # 6. Initialize Session State | |
| # ------------------------------------------ | |
| if "review_state" not in st.session_state: | |
| st.session_state["review_state"] = CodeReviewState( | |
| code_content="", | |
| review_outcome="", | |
| revised_code="", | |
| similar_examples="" | |
| ) | |
| # ------------------------------------------ | |
| # 7. ACE Editor for User Code | |
| # ------------------------------------------ | |
| st.write("### Enter or modify your Python code:") | |
| code_input = st_ace( | |
| language="python", | |
| #theme="monokai", | |
| auto_update=True, | |
| key="ace-editor", | |
| height=250, | |
| #value="def greet(name):\n print(f'Hello, {name}!')" | |
| ) | |
| # ------------------------------------------ | |
| # 8. On Submit: Analyze, Propose (if needed), & Provide Examples | |
| # ------------------------------------------ | |
| if st.button("Submit for Review"): | |
| if not code_input.strip(): | |
| st.error("Please enter some code.") | |
| else: | |
| outcome = analyzer.analyze(code_input) | |
| revised_snippet = "" | |
| if outcome == "NeedsRevision": | |
| revised_snippet = proposer.propose(code_input) | |
| examples = example_gen.generate_examples(code_input) | |
| # Update session state | |
| st.session_state["review_state"]["code_content"] = code_input | |
| st.session_state["review_state"]["review_outcome"] = outcome | |
| st.session_state["review_state"]["revised_code"] = revised_snippet | |
| st.session_state["review_state"]["similar_examples"] = examples | |
| # ------------------------------------------ | |
| # 9. Display Output | |
| # ------------------------------------------ | |
| review_state = st.session_state["review_state"] | |
| if review_state["review_outcome"]: | |
| if review_state["review_outcome"] == "Approved": | |
| st.success("Your code is approved! Great job.") | |
| else: | |
| st.warning("Your code needs some revision based on best practices.") | |
| st.markdown("**Proposed Fixes / Improved Version:**") | |
| st.code(review_state["revised_code"], language="python") | |
| st.markdown("---") | |
| st.markdown("### Similar Examples to Learn & Improve") | |
| st.write(review_state["similar_examples"]) | |