Spaces:
Sleeping
Sleeping
hari-huynh
commited on
Commit
·
db445d8
1
Parent(s):
0e962f9
Upload files
Browse files- .dockerignore +9 -0
- .gitignore +51 -0
- Dockerfile +37 -0
- agent/code_review.py +414 -0
- index.py +117 -0
- requirements.txt +4 -0
- tool.py +264 -0
.dockerignore
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.pyc
|
| 3 |
+
*.pyo
|
| 4 |
+
*.pyd
|
| 5 |
+
.env
|
| 6 |
+
.git/
|
| 7 |
+
DIFF.md
|
| 8 |
+
PR.md
|
| 9 |
+
README.md
|
.gitignore
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# These are some examples of commonly ignored file patterns.
|
| 2 |
+
# You should customize this list as applicable to your project.
|
| 3 |
+
# Learn more about .gitignore:
|
| 4 |
+
# https://www.atlassian.com/git/tutorials/saving-changes/gitignore
|
| 5 |
+
|
| 6 |
+
# Node artifact files
|
| 7 |
+
node_modules/
|
| 8 |
+
dist/
|
| 9 |
+
|
| 10 |
+
# Compiled Java class files
|
| 11 |
+
*.class
|
| 12 |
+
|
| 13 |
+
# Compiled Python bytecode
|
| 14 |
+
*.py[cod]
|
| 15 |
+
|
| 16 |
+
# Log files
|
| 17 |
+
*.log
|
| 18 |
+
|
| 19 |
+
# Package files
|
| 20 |
+
*.jar
|
| 21 |
+
|
| 22 |
+
# Maven
|
| 23 |
+
target/
|
| 24 |
+
dist/
|
| 25 |
+
|
| 26 |
+
# JetBrains IDE
|
| 27 |
+
.idea/
|
| 28 |
+
|
| 29 |
+
# Unit test reports
|
| 30 |
+
TEST*.xml
|
| 31 |
+
|
| 32 |
+
# Generated by MacOS
|
| 33 |
+
.DS_Store
|
| 34 |
+
|
| 35 |
+
# Generated by Windows
|
| 36 |
+
Thumbs.db
|
| 37 |
+
|
| 38 |
+
# Applications
|
| 39 |
+
*.app
|
| 40 |
+
*.exe
|
| 41 |
+
*.war
|
| 42 |
+
|
| 43 |
+
# Large media files
|
| 44 |
+
*.mp4
|
| 45 |
+
*.tiff
|
| 46 |
+
*.avi
|
| 47 |
+
*.flv
|
| 48 |
+
*.mov
|
| 49 |
+
*.wmv
|
| 50 |
+
|
| 51 |
+
.env
|
Dockerfile
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
FROM python:latest
|
| 3 |
+
|
| 4 |
+
RUN apt-get update \
|
| 5 |
+
&& apt-get install unzip \
|
| 6 |
+
&& apt install pv
|
| 7 |
+
|
| 8 |
+
WORKDIR /code
|
| 9 |
+
|
| 10 |
+
COPY ./requirements.txt /code/requirements.txt
|
| 11 |
+
|
| 12 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
| 13 |
+
|
| 14 |
+
# Set up a new user named "user" with user ID 1000
|
| 15 |
+
RUN useradd -m -u 1000 user
|
| 16 |
+
|
| 17 |
+
# Switch to the "user" user
|
| 18 |
+
USER user
|
| 19 |
+
|
| 20 |
+
# Set home to the user's home directory
|
| 21 |
+
ENV HOME=/home/user \
|
| 22 |
+
PATH=/home/user/.local/bin:$PATH
|
| 23 |
+
|
| 24 |
+
# Set the working directory to the user's home directory
|
| 25 |
+
WORKDIR $HOME/app
|
| 26 |
+
|
| 27 |
+
# Try and run pip command after setting the user with `USER user` to avoid permission issues with Python
|
| 28 |
+
RUN pip install --no-cache-dir --upgrade pip
|
| 29 |
+
# RUN pip install gdown tqdm
|
| 30 |
+
# RUN gdown --id 0B7EVK8r0v71pZjFTYXZWM3FlRnM
|
| 31 |
+
|
| 32 |
+
# Copy the current directory contents into the container at $HOME/app setting the owner to the user
|
| 33 |
+
COPY --chown=user . $HOME/app
|
| 34 |
+
# RUN n_files=`unzip -l image_align_celeba.zip | tail -n 1 | xargs echo -n | cut -d' ' -f2`
|
| 35 |
+
# RUN unzip -oq img_align_celeba.zip | tqdm --desc extracted --unit files --unit_scale --total $n_files > /dev/null
|
| 36 |
+
|
| 37 |
+
CMD ["python", "index.py"]
|
agent/code_review.py
ADDED
|
@@ -0,0 +1,414 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import argparse
|
| 4 |
+
import asyncio
|
| 5 |
+
import json
|
| 6 |
+
import os
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from typing import List, Optional, Dict, Iterable, Tuple
|
| 9 |
+
|
| 10 |
+
from pydantic import BaseModel, Field
|
| 11 |
+
from pydantic_ai import Agent, RunContext
|
| 12 |
+
from dotenv import load_dotenv
|
| 13 |
+
|
| 14 |
+
load_dotenv()
|
| 15 |
+
|
| 16 |
+
# =============================
|
| 17 |
+
# Data models for structured output
|
| 18 |
+
# =============================
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Issue(BaseModel):
|
| 22 |
+
title: str = Field(..., description="Short, actionable issue title")
|
| 23 |
+
description: str = Field(..., description="Clear explanation, why it matters, and how to fix")
|
| 24 |
+
severity: str = Field(..., description="One of: low, medium, high, critical")
|
| 25 |
+
line: Optional[int] = Field(None, description="Line number if known/applicable")
|
| 26 |
+
rule: Optional[str] = Field(None, description="Optional rule or best practice identifier")
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class FileReview(BaseModel):
|
| 30 |
+
file_path: str
|
| 31 |
+
summary: str
|
| 32 |
+
score: int = Field(..., ge=0, le=10, description="10 = excellent, 0 = very poor")
|
| 33 |
+
issues: List[Issue] = Field(default_factory=list)
|
| 34 |
+
suggestions: List[str] = Field(default_factory=list)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class CodeReviewResponse(BaseModel):
|
| 38 |
+
overall_summary: str
|
| 39 |
+
overall_score: int = Field(..., ge=0, le=10)
|
| 40 |
+
files: List[FileReview]
|
| 41 |
+
quick_actions: List[str] = Field(default_factory=list, description="Concise TODOs that can be applied immediately")
|
| 42 |
+
|
| 43 |
+
class DiffDeps(BaseModel):
|
| 44 |
+
diff: str
|
| 45 |
+
|
| 46 |
+
# =============================
|
| 47 |
+
# Agent definition (Pydantic AI)
|
| 48 |
+
# =============================
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
DEFAULT_MODEL = "google-gla:gemini-2.5-flash"
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
code_review_agent = Agent(
|
| 56 |
+
model = DEFAULT_MODEL,
|
| 57 |
+
deps_type = DiffDeps,
|
| 58 |
+
output_type = str,
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@code_review_agent.system_prompt
|
| 63 |
+
def systemt_prompt(ctx: RunContext) -> str:
|
| 64 |
+
return f"""
|
| 65 |
+
You are a code review agent focused on analyzing pull request changes and generating concise summary change logs.
|
| 66 |
+
|
| 67 |
+
## Your Role
|
| 68 |
+
- Review code changes in pull requests
|
| 69 |
+
- Generate clear, actionable summary change logs
|
| 70 |
+
- Focus solely on what changed and its impact
|
| 71 |
+
|
| 72 |
+
## What to Review
|
| 73 |
+
- **Code modifications**: Added, deleted, or modified lines
|
| 74 |
+
- **Functional changes**: New features, bug fixes, refactoring
|
| 75 |
+
- **Structural changes**: File additions/deletions, directory reorganization
|
| 76 |
+
- **Dependency updates**: Package changes, version bumps
|
| 77 |
+
|
| 78 |
+
## What to Include in Summary
|
| 79 |
+
1. **High-level overview**: Brief description of the PR's purpose
|
| 80 |
+
2. **Key changes**: List of main modifications made
|
| 81 |
+
3. **Files affected**: Count and types of files changed
|
| 82 |
+
4. **Impact assessment**: Brief note on potential effects
|
| 83 |
+
|
| 84 |
+
## Output Format
|
| 85 |
+
```
|
| 86 |
+
## Pull Request Summary
|
| 87 |
+
|
| 88 |
+
**Purpose**: [Brief description of what this PR accomplishes]
|
| 89 |
+
|
| 90 |
+
**Changes Made**:
|
| 91 |
+
- [Change 1 with file reference]
|
| 92 |
+
- [Change 2 with file reference]
|
| 93 |
+
- [Change 3 with file reference]
|
| 94 |
+
|
| 95 |
+
**Files Modified**: X files changed (+Y additions, -Z deletions)
|
| 96 |
+
|
| 97 |
+
**Impact**: [Brief assessment of the changes' significance]
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
## What NOT to Focus On
|
| 101 |
+
- Code style preferences (unless specifically requested)
|
| 102 |
+
- Performance optimizations (unless critical)
|
| 103 |
+
- Architecture discussions
|
| 104 |
+
- Non-functional requirements
|
| 105 |
+
- Testing strategies (unless tests are part of the changes)
|
| 106 |
+
|
| 107 |
+
## Guidelines
|
| 108 |
+
- Keep summaries concise but informative
|
| 109 |
+
- Use clear, non-technical language when possible
|
| 110 |
+
- Highlight breaking changes prominently
|
| 111 |
+
- Focus on the "what" not the "how" or "why"
|
| 112 |
+
- Maintain objectivity in descriptions
|
| 113 |
+
|
| 114 |
+
Diff in pull request:
|
| 115 |
+
{ctx.deps.diff}
|
| 116 |
+
"""
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def read_text_file(path: str) -> str:
|
| 120 |
+
"""Read a UTF-8 text file from disk and return its contents. Truncates very large files.
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
path: Absolute or relative path to a text file.
|
| 124 |
+
Returns:
|
| 125 |
+
File text content (possibly truncated to keep context size reasonable).
|
| 126 |
+
"""
|
| 127 |
+
file_path = Path(path)
|
| 128 |
+
if not file_path.exists() or not file_path.is_file():
|
| 129 |
+
raise FileNotFoundError(f"File not found: {path}")
|
| 130 |
+
|
| 131 |
+
try:
|
| 132 |
+
text = file_path.read_text(encoding="utf-8", errors="ignore")
|
| 133 |
+
except Exception as exc: # pragma: no cover - defensive
|
| 134 |
+
raise RuntimeError(f"Failed to read file: {path}: {exc}")
|
| 135 |
+
|
| 136 |
+
max_chars = 200_000
|
| 137 |
+
if len(text) > max_chars:
|
| 138 |
+
head = text[: max_chars // 2]
|
| 139 |
+
tail = text[-max_chars // 2 :]
|
| 140 |
+
return f"{head}\n\n... [truncated] ...\n\n{tail}"
|
| 141 |
+
return text
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def list_code_files(
|
| 145 |
+
ctx: RunContext,
|
| 146 |
+
root: str,
|
| 147 |
+
include_extensions: List[str] | None = None,
|
| 148 |
+
exclude_dirs: List[str] | None = None,
|
| 149 |
+
max_files: int = 200,
|
| 150 |
+
) -> List[str]:
|
| 151 |
+
"""List code files under a directory.
|
| 152 |
+
|
| 153 |
+
Args:
|
| 154 |
+
root: Directory root to scan
|
| 155 |
+
include_extensions: e.g. [".py", ".ts", ".js", ".tsx", ".java"]. If omitted, uses a sensible default set.
|
| 156 |
+
exclude_dirs: Directory names to skip (e.g. ["node_modules", ".git", "dist", "build"]).
|
| 157 |
+
max_files: Upper bound on number of results to avoid huge contexts.
|
| 158 |
+
Returns:
|
| 159 |
+
List of file paths (strings) relative to the provided root where possible.
|
| 160 |
+
"""
|
| 161 |
+
root_path = Path(root)
|
| 162 |
+
if not root_path.exists():
|
| 163 |
+
raise FileNotFoundError(f"Root directory not found: {root}")
|
| 164 |
+
|
| 165 |
+
default_exts = [
|
| 166 |
+
".py",
|
| 167 |
+
".ts",
|
| 168 |
+
".tsx",
|
| 169 |
+
".js",
|
| 170 |
+
".jsx",
|
| 171 |
+
".java",
|
| 172 |
+
".kt",
|
| 173 |
+
".go",
|
| 174 |
+
".rs",
|
| 175 |
+
".rb",
|
| 176 |
+
".php",
|
| 177 |
+
".cs",
|
| 178 |
+
".cpp",
|
| 179 |
+
".cc",
|
| 180 |
+
".c",
|
| 181 |
+
".m",
|
| 182 |
+
".mm",
|
| 183 |
+
".sql",
|
| 184 |
+
".yml",
|
| 185 |
+
".yaml",
|
| 186 |
+
".toml",
|
| 187 |
+
".json",
|
| 188 |
+
".md",
|
| 189 |
+
]
|
| 190 |
+
exts = [e.lower() for e in (include_extensions or default_exts)]
|
| 191 |
+
excluded = set(exclude_dirs or {".git", "node_modules", "dist", "build", ".venv", "__pycache__"})
|
| 192 |
+
|
| 193 |
+
results: List[str] = []
|
| 194 |
+
for path in root_path.rglob("*"):
|
| 195 |
+
if path.is_dir():
|
| 196 |
+
if path.name in excluded:
|
| 197 |
+
# Skip excluded directories entirely
|
| 198 |
+
# Use try/except to ignore permission errors
|
| 199 |
+
try:
|
| 200 |
+
# Prevent descending further
|
| 201 |
+
continue
|
| 202 |
+
finally:
|
| 203 |
+
...
|
| 204 |
+
continue
|
| 205 |
+
if path.suffix.lower() in exts:
|
| 206 |
+
results.append(str(path))
|
| 207 |
+
if len(results) >= max_files:
|
| 208 |
+
break
|
| 209 |
+
return results
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
# =============================
|
| 213 |
+
# Utilities
|
| 214 |
+
# =============================
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def gather_targets(paths: List[str]) -> Tuple[List[str], List[str]]:
|
| 218 |
+
"""Split inputs into files and directories; expand files list from directories.
|
| 219 |
+
|
| 220 |
+
Returns (files, dirs)
|
| 221 |
+
"""
|
| 222 |
+
files: List[str] = []
|
| 223 |
+
dirs: List[str] = []
|
| 224 |
+
for p in paths:
|
| 225 |
+
path = Path(p)
|
| 226 |
+
if path.is_file():
|
| 227 |
+
files.append(str(path))
|
| 228 |
+
elif path.is_dir():
|
| 229 |
+
dirs.append(str(path))
|
| 230 |
+
else:
|
| 231 |
+
# Ignore non-existent
|
| 232 |
+
continue
|
| 233 |
+
return files, dirs
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def build_user_prompt(
|
| 237 |
+
files: List[str],
|
| 238 |
+
dirs: List[str],
|
| 239 |
+
focus_areas: List[str],
|
| 240 |
+
max_inline_chars: int = 60_000,
|
| 241 |
+
) -> str:
|
| 242 |
+
"""Create a concise instruction for the agent, listing files and review goals.
|
| 243 |
+
|
| 244 |
+
We do not inline large file contents; the agent can use tools to load them on demand.
|
| 245 |
+
Small files may be inlined to reduce tool calls.
|
| 246 |
+
"""
|
| 247 |
+
focus_text = ", ".join(focus_areas) if focus_areas else "general quality"
|
| 248 |
+
|
| 249 |
+
# Try to inline very small files to prime the context
|
| 250 |
+
inline_blobs: List[str] = []
|
| 251 |
+
inlined_total = 0
|
| 252 |
+
for f in files:
|
| 253 |
+
try:
|
| 254 |
+
text = Path(f).read_text(encoding="utf-8", errors="ignore")
|
| 255 |
+
except Exception:
|
| 256 |
+
continue
|
| 257 |
+
if len(text) <= 8_000 and (inlined_total + len(text)) <= max_inline_chars:
|
| 258 |
+
inline_blobs.append(f"File: {f}\n\n{text}")
|
| 259 |
+
inlined_total += len(text)
|
| 260 |
+
|
| 261 |
+
file_list_section = "\n".join(f"- {p}" for p in files)
|
| 262 |
+
dir_list_section = "\n".join(f"- {d}" for d in dirs)
|
| 263 |
+
|
| 264 |
+
inline_section = ("\n\n" + "\n\n".join(inline_blobs)) if inline_blobs else ""
|
| 265 |
+
|
| 266 |
+
return (
|
| 267 |
+
"Perform a comprehensive code review for the repository subset below.\n\n"
|
| 268 |
+
f"Focus areas: {focus_text}.\n\n"
|
| 269 |
+
"Files:\n" + file_list_section + "\n\n"
|
| 270 |
+
+ ("Directories (you may list and inspect files using the provided tools):\n" + dir_list_section + "\n\n" if dirs else "")
|
| 271 |
+
+ "Use the read_text_file and list_code_files tools to fetch any file content you need.\n"
|
| 272 |
+
+ inline_section
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def render_markdown(result: CodeReviewResponse) -> str:
|
| 277 |
+
"""Render a human-readable Markdown report from the structured output."""
|
| 278 |
+
lines: List[str] = []
|
| 279 |
+
lines.append("# Code Review Report")
|
| 280 |
+
lines.append("")
|
| 281 |
+
lines.append(f"Overall Score: {result.overall_score}/10")
|
| 282 |
+
lines.append("")
|
| 283 |
+
lines.append(result.overall_summary)
|
| 284 |
+
lines.append("")
|
| 285 |
+
|
| 286 |
+
for f in result.files:
|
| 287 |
+
lines.append(f"## {f.file_path} — Score: {f.score}/10")
|
| 288 |
+
lines.append("")
|
| 289 |
+
if f.summary:
|
| 290 |
+
lines.append(f.summary)
|
| 291 |
+
lines.append("")
|
| 292 |
+
if f.issues:
|
| 293 |
+
lines.append("### Issues")
|
| 294 |
+
for idx, issue in enumerate(f.issues, start=1):
|
| 295 |
+
where = f" (line {issue.line})" if issue.line is not None else ""
|
| 296 |
+
rule = f" — {issue.rule}" if issue.rule else ""
|
| 297 |
+
lines.append(f"- [{issue.severity.upper()}]{where}{rule}: {issue.title}")
|
| 298 |
+
lines.append(f" - {issue.description}")
|
| 299 |
+
lines.append("")
|
| 300 |
+
if f.suggestions:
|
| 301 |
+
lines.append("### Suggestions")
|
| 302 |
+
for s in f.suggestions:
|
| 303 |
+
lines.append(f"- {s}")
|
| 304 |
+
lines.append("")
|
| 305 |
+
|
| 306 |
+
if result.quick_actions:
|
| 307 |
+
lines.append("## Quick Actions")
|
| 308 |
+
for qa in result.quick_actions:
|
| 309 |
+
lines.append(f"- {qa}")
|
| 310 |
+
lines.append("")
|
| 311 |
+
|
| 312 |
+
return "\n".join(lines)
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
# =============================
|
| 316 |
+
# Public API
|
| 317 |
+
# =============================
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
async def review_paths(
|
| 321 |
+
paths: List[str],
|
| 322 |
+
focus_areas: Optional[List[str]] = None,
|
| 323 |
+
model: Optional[str] = None,
|
| 324 |
+
) -> CodeReviewResponse:
|
| 325 |
+
files, dirs = gather_targets(paths)
|
| 326 |
+
|
| 327 |
+
agent = code_review_agent if model is None else Agent(
|
| 328 |
+
model=model,
|
| 329 |
+
result_model=CodeReviewResponse,
|
| 330 |
+
system_prompt=SYSTEM_PROMPT,
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
user_prompt = build_user_prompt(files, dirs, focus_areas or [])
|
| 334 |
+
run = await agent.run(user_prompt)
|
| 335 |
+
return run.data
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
async def review_code_string(
|
| 339 |
+
code: str,
|
| 340 |
+
filename: str = "snippet",
|
| 341 |
+
focus_areas: Optional[List[str]] = None,
|
| 342 |
+
model: Optional[str] = None,
|
| 343 |
+
) -> CodeReviewResponse:
|
| 344 |
+
agent = code_review_agent if model is None else Agent(
|
| 345 |
+
model=model,
|
| 346 |
+
result_model=CodeReviewResponse,
|
| 347 |
+
system_prompt=SYSTEM_PROMPT,
|
| 348 |
+
)
|
| 349 |
+
prompt = (
|
| 350 |
+
f"Review the following code ({filename}).\n\n" # noqa: E501
|
| 351 |
+
f"Focus areas: {', '.join(focus_areas or []) or 'general quality'}.\n\n"
|
| 352 |
+
f"{code}"
|
| 353 |
+
)
|
| 354 |
+
run = await agent.run(prompt)
|
| 355 |
+
return run.data
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
# =============================
|
| 359 |
+
# CLI
|
| 360 |
+
# =============================
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def parse_args(argv: Optional[List[str]] = None) -> argparse.Namespace:
|
| 364 |
+
parser = argparse.ArgumentParser(description="Run code review agent using pydantic-ai")
|
| 365 |
+
parser.add_argument(
|
| 366 |
+
"paths",
|
| 367 |
+
nargs="*",
|
| 368 |
+
help="Files or directories to review",
|
| 369 |
+
)
|
| 370 |
+
parser.add_argument(
|
| 371 |
+
"--focus",
|
| 372 |
+
nargs="*",
|
| 373 |
+
default=[],
|
| 374 |
+
help="Optional focus areas, e.g. security performance readability accessibility",
|
| 375 |
+
)
|
| 376 |
+
parser.add_argument(
|
| 377 |
+
"--model",
|
| 378 |
+
default=None,
|
| 379 |
+
help="Model id, e.g. openai:gpt-4o or openai:gpt-4o-mini (defaults to env CODE_REVIEW_MODEL or gpt-4o-mini)",
|
| 380 |
+
)
|
| 381 |
+
parser.add_argument(
|
| 382 |
+
"--out",
|
| 383 |
+
default=None,
|
| 384 |
+
help="If provided, write a Markdown report to this file",
|
| 385 |
+
)
|
| 386 |
+
return parser.parse_args(argv)
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
def main(argv: Optional[List[str]] = None) -> int:
|
| 390 |
+
args = parse_args(argv)
|
| 391 |
+
if not args.paths:
|
| 392 |
+
print("No input paths provided. Nothing to review.")
|
| 393 |
+
return 2
|
| 394 |
+
|
| 395 |
+
result = asyncio.run(review_paths(args.paths, focus_areas=args.focus, model=args.model))
|
| 396 |
+
|
| 397 |
+
md = render_markdown(result)
|
| 398 |
+
if args.out:
|
| 399 |
+
Path(args.out).write_text(md, encoding="utf-8")
|
| 400 |
+
print(f"Saved review report to {args.out}")
|
| 401 |
+
else:
|
| 402 |
+
print(md)
|
| 403 |
+
return 0
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
if __name__ == "__main__":
|
| 407 |
+
# raise SystemExit(main())
|
| 408 |
+
path = "DIFF.md"
|
| 409 |
+
data = read_text_file(path)
|
| 410 |
+
res = code_review_agent.run_sync("", deps = data)
|
| 411 |
+
|
| 412 |
+
print(res.output)
|
| 413 |
+
|
| 414 |
+
|
index.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Flask, request, jsonify
|
| 2 |
+
from tool import create_session, get_pull_request_overview, get_diff, send_slack_message, save_md_report
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
import os
|
| 5 |
+
from agent.code_review import code_review_agent, read_text_file, DiffDeps
|
| 6 |
+
|
| 7 |
+
# Set up ENVIRONMENT VARIABLE
|
| 8 |
+
load_dotenv()
|
| 9 |
+
WORKSPACE_ID = os.getenv("WORKSPACE_ID")
|
| 10 |
+
BITBUCKET_USERNAME = os.getenv("BITBUCKET_USERNAME")
|
| 11 |
+
BITBUCKET_APP_PASSWORD = os.getenv("BITBUCKET_APP_PASSWORD")
|
| 12 |
+
DEFAULT_BASE_URL = os.getenv("DEFAULT_BASE_URL")
|
| 13 |
+
REPO_SLUG = os.getenv("REPO_SLUG")
|
| 14 |
+
SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL")
|
| 15 |
+
SLACK_BOT_TOKEN = os.getenv("SLACK_BOT_TOKEN")
|
| 16 |
+
SLACK_CHANNEL = os.getenv("SLACK_CHANNEL")
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
app = Flask(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@app.route("/webhook", methods=["POST"])
|
| 23 |
+
def webhook():
|
| 24 |
+
event_key = request.headers.get("X-Event-Key") # ví dụ: pullrequest:created, repo:push
|
| 25 |
+
payload = request.json
|
| 26 |
+
|
| 27 |
+
print("=== Webhook Received ===")
|
| 28 |
+
print("Event:", event_key)
|
| 29 |
+
|
| 30 |
+
# Xử lý Pull Request event
|
| 31 |
+
if event_key and event_key.startswith("pullrequest"):
|
| 32 |
+
if payload and "pullrequest" in payload:
|
| 33 |
+
pr = payload["pullrequest"]
|
| 34 |
+
pr_info = {
|
| 35 |
+
"id": pr.get("id"),
|
| 36 |
+
"title": pr.get("title"),
|
| 37 |
+
"state": pr.get("state"),
|
| 38 |
+
"author": pr.get("author", {}).get("display_name"),
|
| 39 |
+
"source_branch": pr.get("source", {}).get("branch", {}).get("name"),
|
| 40 |
+
"destination_branch": pr.get("destination", {}).get("branch", {}).get("name"),
|
| 41 |
+
}
|
| 42 |
+
print("Pull Request Info:", pr_info)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
PR_ID = pr_info["id"]
|
| 46 |
+
|
| 47 |
+
session = create_session(BITBUCKET_USERNAME, BITBUCKET_APP_PASSWORD)
|
| 48 |
+
data = get_pull_request_overview(
|
| 49 |
+
session = session,
|
| 50 |
+
workspace = WORKSPACE_ID, # args.workspace,
|
| 51 |
+
repo_slug = REPO_SLUG,
|
| 52 |
+
pr_id = PR_ID,
|
| 53 |
+
base_url = DEFAULT_BASE_URL
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
title = data["title"]
|
| 57 |
+
description = data["description"]
|
| 58 |
+
source_branch = data["source"]["branch"]["name"]
|
| 59 |
+
destination_branch = data["destination"]["branch"]["name"]
|
| 60 |
+
author = data["author"]["display_name"]
|
| 61 |
+
created_on = data["created_on"]
|
| 62 |
+
reviewers = data.get("reviewers", [])
|
| 63 |
+
reviewer_mentions = " ".join([f"@{r.get('nickname')}" for r in reviewers]) if reviewers else "None"
|
| 64 |
+
pr_link = data["links"]["html"]["href"]
|
| 65 |
+
changelog = data["summary"]["raw"]
|
| 66 |
+
diff_url = data["links"]["diff"]["href"]
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
pr_report = f"""
|
| 70 |
+
# [{REPO_SLUG}] PR #{PR_ID}: {title}
|
| 71 |
+
*{description}*
|
| 72 |
+
|
| 73 |
+
**🌿 Branch Information:**
|
| 74 |
+
- **Source Branch:** {source_branch} → **Target Branch:** {destination_branch}
|
| 75 |
+
|
| 76 |
+
**👤 Người tạo:** {author}
|
| 77 |
+
|
| 78 |
+
**📅 Thời gian tạo:** 2024-08-20 14:30:00 +07:00
|
| 79 |
+
|
| 80 |
+
**👥 Reviewers:**
|
| 81 |
+
{reviewer_mentions}
|
| 82 |
+
|
| 83 |
+
**🔗 Link Pull Request:** [PR #{PR_ID}: Implement user authentication system]({pr_link})
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
save_md_report("PR.md", pr_report)
|
| 87 |
+
|
| 88 |
+
diff = get_diff(session, diff_url)
|
| 89 |
+
print(diff)
|
| 90 |
+
|
| 91 |
+
# save_md_report("DIFF.md", data)
|
| 92 |
+
|
| 93 |
+
send_slack_message(
|
| 94 |
+
message_text = pr_report,
|
| 95 |
+
webhook_url = SLACK_WEBHOOK_URL,
|
| 96 |
+
bot_token = SLACK_BOT_TOKEN,
|
| 97 |
+
channel = "pull-request"
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
print("Running Agent ...")
|
| 101 |
+
|
| 102 |
+
code_review_result = code_review_agent.run_sync("", deps = DiffDeps(diff = diff[:-10_000]))
|
| 103 |
+
code_review_text = getattr(code_review_result, "output", None) or str(code_review_result)
|
| 104 |
+
|
| 105 |
+
send_slack_message(
|
| 106 |
+
message_text = code_review_text,
|
| 107 |
+
webhook_url = SLACK_WEBHOOK_URL,
|
| 108 |
+
bot_token = SLACK_BOT_TOKEN,
|
| 109 |
+
channel = "pull-request"
|
| 110 |
+
)
|
| 111 |
+
print("Sent code review")
|
| 112 |
+
|
| 113 |
+
return jsonify({"status": "ok"}), 200
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
if __name__ == "__main__":
|
| 117 |
+
app.run(host="0.0.0.0", port=3000)
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
python-dotenv
|
| 2 |
+
requests
|
| 3 |
+
pydantic-ai
|
| 4 |
+
flask
|
tool.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import argparse
|
| 4 |
+
from typing import Dict, Any, List, Optional, Tuple
|
| 5 |
+
from dotenv import load_dotenv
|
| 6 |
+
|
| 7 |
+
import requests
|
| 8 |
+
|
| 9 |
+
# Set up ENVIRONMENT VARIABLE
|
| 10 |
+
load_dotenv()
|
| 11 |
+
WORKSPACE_ID = os.getenv("WORKSPACE_ID")
|
| 12 |
+
BITBUCKET_USERNAME = os.getenv("BITBUCKET_USERNAME")
|
| 13 |
+
BITBUCKET_APP_PASSWORD = os.getenv("BITBUCKET_APP_PASSWORD")
|
| 14 |
+
DEFAULT_BASE_URL = "https://api.bitbucket.org/2.0"
|
| 15 |
+
SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL")
|
| 16 |
+
SLACK_BOT_TOKEN = os.getenv("SLACK_BOT_TOKEN")
|
| 17 |
+
SLACK_CHANNEL = os.getenv("SLACK_CHANNEL")
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def create_session(username: Optional[str] = None, app_password: Optional[str] = None) -> requests.Session:
|
| 21 |
+
"""
|
| 22 |
+
Create an authenticated session for Bitbucket Cloud using Basic Auth (username + App Password).
|
| 23 |
+
Falls back to BITBUCKET_USERNAME and BITBUCKET_APP_PASSWORD environment variables.
|
| 24 |
+
"""
|
| 25 |
+
resolved_username = username or os.getenv("BITBUCKET_USERNAME")
|
| 26 |
+
resolved_app_password = app_password or os.getenv("BITBUCKET_APP_PASSWORD")
|
| 27 |
+
|
| 28 |
+
if not resolved_username or not resolved_app_password:
|
| 29 |
+
raise ValueError(
|
| 30 |
+
"Missing credentials. Provide --username and --app-password or set env vars "
|
| 31 |
+
"BITBUCKET_USERNAME and BITBUCKET_APP_PASSWORD."
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
session = requests.Session()
|
| 35 |
+
session.auth = (resolved_username, resolved_app_password)
|
| 36 |
+
session.headers.update({"Accept": "application/json"})
|
| 37 |
+
return session
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _request_json(session: requests.Session, method: str, url: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
| 41 |
+
response = session.request(method, url, params=params, timeout=30)
|
| 42 |
+
response.raise_for_status()
|
| 43 |
+
return response.json()
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _paginate_all(session: requests.Session, url: str, params: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
|
| 47 |
+
"""
|
| 48 |
+
Retrieve all pages for Bitbucket v2.0 endpoints that return 'values' and 'next'.
|
| 49 |
+
"""
|
| 50 |
+
aggregated_values: List[Dict[str, Any]] = []
|
| 51 |
+
next_url: Optional[str] = url
|
| 52 |
+
query_params: Dict[str, Any] = dict(params or {})
|
| 53 |
+
if "pagelen" not in query_params:
|
| 54 |
+
query_params["pagelen"] = 100
|
| 55 |
+
|
| 56 |
+
while next_url:
|
| 57 |
+
data = _request_json(session, "GET", next_url, params=query_params if next_url == url else None)
|
| 58 |
+
values = data.get("values", [])
|
| 59 |
+
aggregated_values.extend(values)
|
| 60 |
+
next_url = data.get("next")
|
| 61 |
+
return aggregated_values
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def get_pull_request_overview(session: requests.Session, workspace: str, repo_slug: str, pr_id: int, base_url: str = DEFAULT_BASE_URL) -> Dict[str, Any]:
|
| 65 |
+
url = f"{base_url}/repositories/{workspace}/{repo_slug}/pullrequests/{pr_id}"
|
| 66 |
+
return _request_json(session, "GET", url)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def get_pull_request_commits(session: requests.Session, workspace: str, repo_slug: str, pr_id: int, base_url: str = DEFAULT_BASE_URL) -> List[Dict[str, Any]]:
|
| 70 |
+
url = f"{base_url}/repositories/{workspace}/{repo_slug}/pullrequests/{pr_id}/commits"
|
| 71 |
+
return _paginate_all(session, url)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def get_pull_request_comments(session: requests.Session, workspace: str, repo_slug: str, pr_id: int, base_url: str = DEFAULT_BASE_URL) -> List[Dict[str, Any]]:
|
| 75 |
+
url = f"{base_url}/repositories/{workspace}/{repo_slug}/pullrequests/{pr_id}/comments"
|
| 76 |
+
return _paginate_all(session, url)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def get_pull_request_activity(session: requests.Session, workspace: str, repo_slug: str, pr_id: int, base_url: str = DEFAULT_BASE_URL) -> List[Dict[str, Any]]:
|
| 80 |
+
url = f"{base_url}/repositories/{workspace}/{repo_slug}/pullrequests/{pr_id}/activity"
|
| 81 |
+
return _paginate_all(session, url)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def get_pull_request_diff(session: requests.Session, workspace: str, repo_slug: str, pr_id: int, base_url: str = DEFAULT_BASE_URL) -> str:
|
| 85 |
+
"""
|
| 86 |
+
Returns unified diff text for the PR. This endpoint returns text/plain.
|
| 87 |
+
"""
|
| 88 |
+
url = f"{base_url}/repositories/{workspace}/{repo_slug}/pullrequests/{pr_id}/diff"
|
| 89 |
+
headers = {"Accept": "text/plain"}
|
| 90 |
+
response = session.get(url, headers=headers, timeout=60)
|
| 91 |
+
response.raise_for_status()
|
| 92 |
+
return response.text
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def get_diff(session, diff_url):
|
| 96 |
+
response = session.request('GET', diff_url, timeout=30)
|
| 97 |
+
response.raise_for_status()
|
| 98 |
+
|
| 99 |
+
return response.text
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def save_md_report(filename, content):
|
| 103 |
+
with open(filename, "w", encoding="utf-8") as f:
|
| 104 |
+
f.write(content)
|
| 105 |
+
print(f"Saved {filename} file")
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def send_slack_message(
|
| 109 |
+
message_text: str,
|
| 110 |
+
*,
|
| 111 |
+
webhook_url: Optional[str] = None,
|
| 112 |
+
bot_token: Optional[str] = None,
|
| 113 |
+
channel: Optional[str] = None,
|
| 114 |
+
blocks: Optional[List[Dict[str, Any]]] = None,
|
| 115 |
+
thread_ts: Optional[str] = None,
|
| 116 |
+
) -> Dict[str, Any]:
|
| 117 |
+
"""
|
| 118 |
+
Gửi tin nhắn tới Slack bằng 1 trong 2 phương thức:
|
| 119 |
+
1) Incoming Webhook (ưu tiên nếu có SLACK_WEBHOOK_URL hoặc truyền webhook_url)
|
| 120 |
+
2) Slack Web API (chat.postMessage) với SLACK_BOT_TOKEN + channel
|
| 121 |
+
|
| 122 |
+
Trả về dict kết quả đã chuẩn hoá, bao gồm khoá 'ok' và 'method'.
|
| 123 |
+
Ném ValueError khi thiếu thông tin c���u hình cần thiết.
|
| 124 |
+
"""
|
| 125 |
+
resolved_webhook_url = webhook_url or SLACK_WEBHOOK_URL
|
| 126 |
+
resolved_bot_token = bot_token or SLACK_BOT_TOKEN
|
| 127 |
+
resolved_channel = channel or SLACK_CHANNEL
|
| 128 |
+
|
| 129 |
+
# Ưu tiên gửi qua Incoming Webhook nếu có
|
| 130 |
+
if resolved_webhook_url:
|
| 131 |
+
payload: Dict[str, Any] = {"text": message_text}
|
| 132 |
+
if blocks is not None:
|
| 133 |
+
payload["blocks"] = blocks
|
| 134 |
+
|
| 135 |
+
response = requests.post(resolved_webhook_url, json=payload, timeout=15)
|
| 136 |
+
try:
|
| 137 |
+
response.raise_for_status()
|
| 138 |
+
except Exception as exc:
|
| 139 |
+
raise ValueError(f"Slack webhook request failed: {exc}") from exc
|
| 140 |
+
|
| 141 |
+
return {
|
| 142 |
+
"ok": response.status_code == 200 and response.text.strip().lower() == "ok",
|
| 143 |
+
"method": "webhook",
|
| 144 |
+
"status_code": response.status_code,
|
| 145 |
+
"response": response.text,
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
# Fallback: dùng Slack Web API nếu có token + channel
|
| 149 |
+
if resolved_bot_token and resolved_channel:
|
| 150 |
+
api_url = "https://slack.com/api/chat.postMessage"
|
| 151 |
+
headers = {
|
| 152 |
+
"Authorization": f"Bearer {resolved_bot_token}",
|
| 153 |
+
"Content-Type": "application/json",
|
| 154 |
+
}
|
| 155 |
+
payload: Dict[str, Any] = {
|
| 156 |
+
"channel": resolved_channel,
|
| 157 |
+
"text": message_text,
|
| 158 |
+
}
|
| 159 |
+
if blocks is not None:
|
| 160 |
+
payload["blocks"] = blocks
|
| 161 |
+
if thread_ts is not None:
|
| 162 |
+
payload["thread_ts"] = thread_ts
|
| 163 |
+
|
| 164 |
+
response = requests.post(api_url, headers=headers, json=payload, timeout=15)
|
| 165 |
+
try:
|
| 166 |
+
response.raise_for_status()
|
| 167 |
+
except Exception as exc:
|
| 168 |
+
raise ValueError(f"Slack API request failed: {exc}") from exc
|
| 169 |
+
|
| 170 |
+
data = response.json()
|
| 171 |
+
if not data.get("ok", False):
|
| 172 |
+
error_detail = data.get("error", "unknown_error")
|
| 173 |
+
raise ValueError(f"Slack API responded with error: {error_detail}")
|
| 174 |
+
|
| 175 |
+
return {
|
| 176 |
+
"ok": True,
|
| 177 |
+
"method": "web_api",
|
| 178 |
+
"status_code": response.status_code,
|
| 179 |
+
"response": data,
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
raise ValueError(
|
| 183 |
+
"Missing Slack configuration. Provide SLACK_WEBHOOK_URL or SLACK_BOT_TOKEN and SLACK_CHANNEL."
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
def main() -> None:
|
| 187 |
+
# parser = argparse.ArgumentParser(description="Fetch full details of a Bitbucket Cloud pull request.")
|
| 188 |
+
# parser.add_argument("--username", required=True, help="Bitbucket username")
|
| 189 |
+
# parser.add_argument("--password", required=True, help="Bitbucket app password")
|
| 190 |
+
# parser.add_argument("--workspace", required=True, help="Bitbucket workspace ID or slug")
|
| 191 |
+
# parser.add_argument("--repo", required=True, help="Repository slug")
|
| 192 |
+
# parser.add_argument("--pr", required=True, type=int, help="Pull request ID")
|
| 193 |
+
# parser.add_argument("--username", help="Bitbucket username (or set BITBUCKET_USERNAME)")
|
| 194 |
+
# parser.add_argument("--app-password", help="Bitbucket App Password (or set BITBUCKET_APP_PASSWORD)")
|
| 195 |
+
# parser.add_argument(
|
| 196 |
+
# "--include",
|
| 197 |
+
# nargs="*",
|
| 198 |
+
# choices=["overview", "commits", "comments", "activity", "diff"],
|
| 199 |
+
# help="Sections to include. Default: all",
|
| 200 |
+
# )
|
| 201 |
+
# parser.add_argument("--base-url", default=DEFAULT_BASE_URL, help="Base API URL. Default is Bitbucket Cloud v2.0")
|
| 202 |
+
# args = parser.parse_args()
|
| 203 |
+
|
| 204 |
+
session = create_session(BITBUCKET_USERNAME, BITBUCKET_APP_PASSWORD)
|
| 205 |
+
|
| 206 |
+
data = get_pull_request_overview(
|
| 207 |
+
session = session,
|
| 208 |
+
workspace = WORKSPACE_ID, # args.workspace,
|
| 209 |
+
repo_slug = REPO_SLUG,
|
| 210 |
+
pr_id = PR_ID,
|
| 211 |
+
base_url = DEFAULT_BASE_URL
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
# Lấy các thông tin cần thiết
|
| 215 |
+
title = data["title"]
|
| 216 |
+
description = data["description"]
|
| 217 |
+
source_branch = data["source"]["branch"]["name"]
|
| 218 |
+
destination_branch = data["destination"]["branch"]["name"]
|
| 219 |
+
author = data["author"]["display_name"]
|
| 220 |
+
created_on = data["created_on"]
|
| 221 |
+
reviewers = data.get("reviewers", [])
|
| 222 |
+
reviewer_mentions = " ".join([f"@{r.get('nickname')}" for r in reviewers]) if reviewers else "None"
|
| 223 |
+
pr_link = data["links"]["html"]["href"]
|
| 224 |
+
changelog = data["summary"]["raw"]
|
| 225 |
+
diff_url = data["links"]["diff"]["href"]
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
pr_report = f"""
|
| 229 |
+
# [{args.repo}] PR #{args.pr}: {title}
|
| 230 |
+
*{description}*
|
| 231 |
+
|
| 232 |
+
**🌿 Branch Information:**
|
| 233 |
+
- **Source Branch:** {source_branch} → **Target Branch:** {destination_branch}
|
| 234 |
+
|
| 235 |
+
**👤 Người tạo:** {author}
|
| 236 |
+
|
| 237 |
+
**📅 Thời gian tạo:** 2024-08-20 14:30:00 +07:00
|
| 238 |
+
|
| 239 |
+
**👥 Reviewers:**
|
| 240 |
+
{reviewer_mentions}
|
| 241 |
+
|
| 242 |
+
**🔗 Link Pull Request:** [PR #{args.pr}: Implement user authentication system]({pr_link})
|
| 243 |
+
|
| 244 |
+
---
|
| 245 |
+
|
| 246 |
+
## 📝 Changelogs:
|
| 247 |
+
"""
|
| 248 |
+
|
| 249 |
+
save_md_report("PR.md", pr_report)
|
| 250 |
+
|
| 251 |
+
data = get_diff(session, diff_url)
|
| 252 |
+
|
| 253 |
+
save_md_report("DIFF.md", data)
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
if __name__ == "__main__":
|
| 257 |
+
send_slack_message(
|
| 258 |
+
message_text = "Hello",
|
| 259 |
+
webhook_url = SLACK_WEBHOOK_URL,
|
| 260 |
+
bot_token = SLACK_BOT_TOKEN,
|
| 261 |
+
channel = "pull-request"
|
| 262 |
+
)
|
| 263 |
+
|
| 264 |
+
|