Spaces:
Configuration error
Configuration error
| import httpx | |
| import logging | |
| import openai | |
| from openai import AsyncOpenAI | |
| import sys | |
| import yaml | |
| import string | |
| import os | |
| from fastapi import FastAPI, Request | |
| from fastapi.responses import JSONResponse | |
| from typing import Tuple | |
| from utils import JWTGenerator | |
| from utils import ( | |
| get_installation_access_token, | |
| get_diff_url, | |
| ) | |
| logging.basicConfig(stream=sys.stdout, level=logging.INFO) | |
| logger = logging.getLogger("Code Review Assistant") | |
| class GitHubService: | |
| GREETING = f""" | |
| 👋 Hi, I'm @code-review-assistant, a LLM-powered GitHub app powered by | |
| [Open AI API Endpoints](https://api.openai.com/v1/chat/completions) | |
| that gives you actionable feedback on your code. | |
| Simply create a new comment in this PR that says: | |
| `@code-review-assistant review` | |
| and I will start my analysis. I only look at what you changed in this PR. All good? Let's get started! | |
| """ | |
| def __init__(self, jwt_generator: JWTGenerator): | |
| self.jwt_generator = jwt_generator | |
| async def get_headers(self, data): | |
| installation_data = data['installation'] | |
| if installation_data and installation_data.get("id"): | |
| installation_id = installation_data.get("id") | |
| jwt_token = self.jwt_generator.generate_jwt() | |
| installation_access_token = await get_installation_access_token(jwt_token, installation_id) | |
| logger.info(f"Installation access token = {installation_access_token}") | |
| return { | |
| "Authorization": f"token {installation_access_token}", | |
| "User-Agent": "code-review-assistant", | |
| "Accept": "application/vnd.github.diff", | |
| } | |
| else: | |
| raise ValueError("No app installation found.") | |
| async def send_greetings(self, data): | |
| pr = data['pull_request'] | |
| headers = await self.get_headers(data) | |
| # Greet the user and show instructions. | |
| async with httpx.AsyncClient() as client: | |
| await client.post( | |
| f"{pr['issue_url']}/comments", | |
| json={"body": self.GREETING}, | |
| headers=headers, | |
| ) | |
| return JSONResponse(content={}, status_code=200) | |
| async def handle_issue(self, data): | |
| issue = data['issue'] | |
| headers = await self.get_headers(data) | |
| # Check if the issue is a pull request | |
| if "/pull/" in issue['html_url']: | |
| pr = issue['pull_request'] | |
| # Get the comment body | |
| comment = data['comment'] | |
| comment_body = comment['body'] | |
| # Remove all whitespace characters except for regular spaces | |
| comment_body = comment_body.translate( | |
| str.maketrans("", "", string.whitespace.replace(" ", "")) | |
| ) | |
| # Skip if the bot talks about itself | |
| author_handle = comment['user']['login'] | |
| # Check if the bot is mentioned in the comment | |
| if (author_handle != "code-review-assistant[bot]" and "@code-review-assistant review" in comment_body): | |
| url = get_diff_url(pr) | |
| async with httpx.AsyncClient() as client: | |
| response = await client.get(url, headers=headers) | |
| diff = response.text | |
| return (f"{comment['issue_url']}/comments", diff) | |
| async def post_code_review_analysis(self, data, comment_url, analysis_result, model_used): | |
| async with httpx.AsyncClient() as client: | |
| await client.post( | |
| comment_url, | |
| json={ | |
| "body": f":rocket: Code Review Assistant Analysis finished " | |
| + "analysing your PR! :rocket:\n\n" | |
| + "Take a look at your results:\n" | |
| + f"{analysis_result}\n\n" | |
| + "This bot is proudly powered by " | |
| + "[Open AI API Endpoints](https://api.openai.com/v1/chat/completions).\n" | |
| + f"It used the model {model_used}" | |
| }, | |
| headers=await self.get_headers(data), | |
| ) | |
| class CodeReviewAssistant: | |
| SYSTEM_CONTENT = """You’ll act as a code review assistant. | |
| Provide informative, constructive feedback on code quality, identify potential bugs, suggests improvements in coding style and offer explanation for suggested changes. | |
| You should be able to analyze code written in popular programming languages. | |
| Prioritize recommendations based on severity, testability and impact on maintainability. | |
| Prioritize suggestions that address major problems, issues and bugs in the PR code. As a second priority, suggestions should focus on enhancement, best practice, performance, maintainability, and other aspects. | |
| Consider incorporating features like highlighting specific lines of code, providing inline comments, and generating a summary report. | |
| When quoting variables or names from the code, use backticks (`) instead of single quote ('). | |
| Ensure that the assistant promotes collaboration and learning among engineers while adhering to best practices in software development. | |
| If the content is good, don’t comment on it. | |
| You can use GitHub-flavored markdown syntax in your answer. | |
| If you encounter several files, give very concise feedback per file. | |
| """ | |
| PROMPT = """Review the below code difference and give concise actionable code review comments only | |
| for the changed code. If code change looks good overall, just say \"change looks good\" and stop commenting more. | |
| Don’t try to make up comments. Don’t comment on file names or other meta data, just the actual text. | |
| The <content> will be in JSON format and contains file name keys and text values. | |
| """ | |
| MODEL = "codellama-34b-instruct" | |
| def __init__(self, github_service: GitHubService): | |
| self.client = None # Placeholder for AsyncOpenAI client initialization. | |
| self.github_service = github_service | |
| async def review( | |
| self, | |
| content) -> Tuple[str]: | |
| client = AsyncOpenAI( | |
| base_url="https://api.openai.com/v1", | |
| api_key=os.getenv("OPENAI_API_KEY"), | |
| ) | |
| response = await client.chat.completions.create( | |
| messages=[ | |
| {"role": "system", "content": self.SYSTEM_CONTENT}, | |
| {"role": "user", "content": f"This is the content: {content}. {self.PROMPT}"}, | |
| ], | |
| temperature=0.7, | |
| model=self.MODEL, | |
| ) | |
| logger.info(f"Result from Model analysis = {response}") | |
| return response.choices[0].message.content, self.MODEL | |
| app = FastAPI() | |
| try: | |
| jwt_generator = JWTGenerator(os.getenv("APP_ID"), os.getenv("PRIVATE_KEY")) | |
| except FileNotFoundError as e: | |
| logger.error(f"Error in secret manager: {e}") | |
| github_service = GitHubService(jwt_generator) | |
| code_review_assistant = CodeReviewAssistant(github_service) | |
| async def root(): | |
| return {"message": "Code review assistant reporting for duty!"} | |
| async def handle_webhook_route(request: Request): | |
| data = await request.json() | |
| # If PR exists and is opened | |
| if "pull_request" in data.keys() and (data["action"] in ["opened", "reopened"]): | |
| await github_service.send_greetings(data) | |
| # Check if the event is a new or modified issue comment | |
| if "issue" in data.keys() and data.get("action") in ["created", "edited"]: | |
| try: | |
| result = await github_service.handle_issue(data) | |
| if result is not None: | |
| issue_comment_url, content_to_review = result | |
| analysis_result, model_used = await code_review_assistant.review(content_to_review) | |
| logger.info(f"Result from LLM Analysis = {analysis_result}") | |
| await github_service.post_code_review_analysis(data, issue_comment_url, analysis_result, model_used) | |
| except Exception as e: | |
| logger.error(f"Exception occured while calling github_service.handle_issue: {e}") | |