3v324v23 commited on
Commit
e1d6e8a
·
1 Parent(s): 07e0bc8

code agent

Browse files
.env ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ OLLAMA_API_URL=http://127.0.0.1:11434
2
+ OLLAMA_MODEL=minimax-m2:cloud
3
+ DEFAULT_TEMPERATURE=0.2
4
+ MAX_TOKENS=1024
.env.example ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Local Ollama server URL
2
+ OLLAMA_API_URL=http://127.0.0.1:11434
3
+ OLLAMA_MODEL=Minimax-M2
4
+ # Optional controls
5
+ DEFAULT_TEMPERATURE=0.2
6
+ MAX_TOKENS=1024
README.md CHANGED
@@ -1,3 +1,13 @@
1
  ---
2
  license: mit
3
  ---
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: mit
3
  ---
4
+ # Code Review Agent — v0.1 (MVP)
5
+
6
+
7
+ Run locally (recommended to use a Python venv):
8
+
9
+
10
+ ```bash
11
+ pip install -r requirements.txt
12
+ # start ollama server separately and ensure Minimax-M2 is available
13
+ uvicorn src.app:app --reload --port 8080
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn[standard]
3
+ python-multipart
4
+ pydantic
5
+ requests
6
+ pytest
7
+ python-dotenv
src/__pycache__/app.cpython-313.pyc ADDED
Binary file (2.84 kB). View file
 
src/app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from fastapi import FastAPI, UploadFile, File, Form, HTTPException
3
+ from fastapi.responses import JSONResponse
4
+ from dotenv import load_dotenv
5
+
6
+ from .core.analyzer import quick_analyze
7
+ from .core.reviewer import review_single_file
8
+ from .utils.file_utils import save_upload_temp, read_file_text
9
+ from .models.schemas import ReviewResponse
10
+
11
+ load_dotenv()
12
+ app = FastAPI(title="Code Review Agent v0.1", description="Single-file code review using MiniMax-M2 (Ollama).")
13
+
14
+ @app.post("/review/file", response_model=ReviewResponse)
15
+ async def review_file(file: UploadFile = File(...), persona: str = Form("general")):
16
+ if not file.filename:
17
+ raise HTTPException(status_code=400, detail="Missing filename")
18
+ temp_path = save_upload_temp(file)
19
+ code = read_file_text(temp_path)
20
+ ext = os.path.splitext(file.filename)[1].lower()
21
+ language = "python" if ext in (".py",) else "unknown"
22
+ analyzer_evidence = quick_analyze(temp_path, language)
23
+ review = review_single_file(temp_path, file.filename, code, analyzer_evidence, persona=persona)
24
+ try: os.remove(temp_path)
25
+ except OSError: pass
26
+ return JSONResponse(status_code=200, content=review.dict())
27
+
28
+ @app.get("/health")
29
+ async def health():
30
+ return {"status": "ok", "model": os.getenv("OLLAMA_MODEL", "Minimax-M2"), "engine": "Ollama"}
31
+
32
+ @app.get("/")
33
+ async def root():
34
+ return {
35
+ "message": "Welcome to Code Review Agent v0.1",
36
+ "usage": {
37
+ "endpoint": "/review/file",
38
+ "method": "POST",
39
+ "params": {"file": "upload your source file", "persona": "general|security|performance|style"},
40
+ "example": "curl -F 'file=@example.py' -F 'persona=security' http://localhost:8080/review/file"
41
+ }
42
+ }
src/core/__init__.py ADDED
File without changes
src/core/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (157 Bytes). View file
 
src/core/__pycache__/analyzer.cpython-313.pyc ADDED
Binary file (1.77 kB). View file
 
src/core/__pycache__/llm_client.cpython-313.pyc ADDED
Binary file (1.94 kB). View file
 
src/core/__pycache__/prompts.cpython-313.pyc ADDED
Binary file (1.51 kB). View file
 
src/core/__pycache__/reviewer.cpython-313.pyc ADDED
Binary file (2.36 kB). View file
 
src/core/analyzer.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Very simple static analyzer (Python only for v0.1)
3
+ """
4
+ import subprocess
5
+ import shutil
6
+ from typing import List, Dict
7
+
8
+ def run_python_flake(path: str) -> List[Dict]:
9
+ if shutil.which("flake8") is None:
10
+ return []
11
+ try:
12
+ out = subprocess.check_output(["flake8", path, "--format=%(row)d:%(col)d:%(code)s:%(text)s"], stderr=subprocess.STDOUT)
13
+ text = out.decode("utf-8", errors="ignore").strip()
14
+ findings = []
15
+ for line in text.splitlines():
16
+ parts = line.split(":", 3)
17
+ if len(parts) == 4:
18
+ row, col, code, msg = parts
19
+ findings.append({
20
+ "tool": "flake8",
21
+ "code": code,
22
+ "line": int(row),
23
+ "col": int(col),
24
+ "message": msg.strip()
25
+ })
26
+ return findings
27
+ except subprocess.CalledProcessError:
28
+ return []
29
+
30
+ def quick_analyze(path: str, language: str) -> List[Dict]:
31
+ if language == "python":
32
+ return run_python_flake(path)
33
+ return []
src/core/llm_client.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ollama client wrapper for MiniMax-M2.
3
+ Compatible with /api/generate streaming endpoint.
4
+ """
5
+ import os
6
+ import requests
7
+ import json
8
+ from typing import Dict
9
+
10
+ OLLAMA_API_URL = os.getenv("OLLAMA_API_URL", "http://127.0.0.1:11434")
11
+ OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "minimax-m2:cloud")
12
+ DEFAULT_TEMPERATURE = float(os.getenv("DEFAULT_TEMPERATURE", 0.2))
13
+ MAX_TOKENS = int(os.getenv("MAX_TOKENS", 1024))
14
+
15
+ def ask_ollama(prompt: str, extra: Dict = None) -> str:
16
+ """Send a prompt to the local Ollama API and return the generated text."""
17
+ payload = {
18
+ "model": OLLAMA_MODEL,
19
+ "prompt": prompt,
20
+ "options": {
21
+ "temperature": DEFAULT_TEMPERATURE,
22
+ "num_predict": MAX_TOKENS
23
+ }
24
+ }
25
+
26
+ if extra:
27
+ payload.update(extra)
28
+
29
+ url = f"{OLLAMA_API_URL}/api/generate"
30
+ resp = requests.post(url, json=payload, stream=True, timeout=180)
31
+ resp.raise_for_status()
32
+
33
+ response_text = ""
34
+ for line in resp.iter_lines():
35
+ if not line:
36
+ continue
37
+ try:
38
+ data = json.loads(line.decode("utf-8"))
39
+ if "response" in data:
40
+ response_text += data["response"]
41
+ except Exception:
42
+ continue
43
+
44
+ return response_text.strip()
src/core/prompts.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BASE_SYSTEM = (
2
+ "You are a helpful code reviewer. For each finding provide: title, severity (blocker/high/medium/low/nit), "
3
+ "a short description, and a suggested fix if possible. Always include confidence percent (0-100). "
4
+ "If you are unsure, say 'confidence <60%'."
5
+ )
6
+
7
+ PERSONA_PROMPTS = {
8
+ "general": "Review for readability and correctness.",
9
+ "security": "Focus on security issues like injections, unsafe crypto, or secrets.",
10
+ "performance": "Focus on performance and complexity issues.",
11
+ "style": "Focus on code style, clarity, and idiomatic improvements."
12
+ }
13
+
14
+ def build_review_prompt(filename: str, code: str, analyzer_evidence: list, persona: str = "general") -> str:
15
+ prompt = BASE_SYSTEM + "\n\n"
16
+ prompt += f"Persona: {PERSONA_PROMPTS.get(persona, PERSONA_PROMPTS['general'])}\n\n"
17
+ prompt += f"Filename: {filename}\nCode:\n{code[:20000]}\n\n"
18
+ if analyzer_evidence:
19
+ prompt += "Static analyzer findings:\n"
20
+ for ev in analyzer_evidence[:10]:
21
+ prompt += str(ev) + "\n"
22
+ prompt += "\nReturn JSON array of findings."
23
+ return prompt
src/core/reviewer.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uuid
2
+ import json
3
+ from typing import List
4
+ from .llm_client import ask_ollama
5
+ from .prompts import build_review_prompt
6
+ from ..models.schemas import Finding, ReviewResponse
7
+
8
+ def make_id() -> str:
9
+ return str(uuid.uuid4())[:8]
10
+
11
+ def review_single_file(path: str, filename: str, code: str, analyzer_evidence: List[dict], persona: str = "general") -> ReviewResponse:
12
+ prompt = build_review_prompt(filename, code, analyzer_evidence, persona)
13
+ raw = ask_ollama(prompt)
14
+
15
+ findings = []
16
+ try:
17
+ arr = json.loads(raw)
18
+ for item in arr:
19
+ findings.append(Finding(
20
+ id=item.get("id", make_id()),
21
+ title=item.get("title", "Untitled"),
22
+ severity=item.get("severity", "low"),
23
+ file=filename,
24
+ line_range=tuple(item.get("line_range")) if item.get("line_range") else None,
25
+ description=item.get("description", ""),
26
+ suggested_fix=item.get("suggested_fix"),
27
+ confidence=int(item.get("confidence", 50)),
28
+ evidence=item.get("evidence")
29
+ ))
30
+ except Exception:
31
+ findings.append(Finding(
32
+ id=make_id(),
33
+ title="Raw Review",
34
+ severity="low",
35
+ file=filename,
36
+ line_range=None,
37
+ description=(raw or "No response from LLM")[:3000],
38
+ suggested_fix=None,
39
+ confidence=50,
40
+ evidence=None
41
+ ))
42
+
43
+ summary = f"Found {len(findings)} issues (persona={persona})."
44
+ return ReviewResponse(summary=summary, findings=findings)
src/models/__pycache__/schemas.cpython-313.pyc ADDED
Binary file (1.17 kB). View file
 
src/models/schemas.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ from typing import List, Optional, Tuple
3
+
4
+ class Finding(BaseModel):
5
+ id: str
6
+ title: str
7
+ severity: str # blocker, high, medium, low, nit
8
+ file: str
9
+ line_range: Optional[Tuple[int, int]] = None
10
+ description: str
11
+ suggested_fix: Optional[str] = None
12
+ confidence: int = 0
13
+ evidence: Optional[List[dict]] = None
14
+
15
+ class ReviewResponse(BaseModel):
16
+ summary: str
17
+ findings: List[Finding]
src/utils/__pycache__/file_utils.cpython-313.pyc ADDED
Binary file (1.29 kB). View file
 
src/utils/file_utils.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+
4
+ def save_upload_temp(upload_file, dest_dir: str = ".tmp_uploads") -> str:
5
+ Path(dest_dir).mkdir(parents=True, exist_ok=True)
6
+ filename = upload_file.filename
7
+ dest_path = os.path.join(dest_dir, filename)
8
+ with open(dest_path, "wb") as f:
9
+ f.write(upload_file.file.read())
10
+ return dest_path
11
+
12
+ def read_file_text(path: str) -> str:
13
+ with open(path, "r", encoding="utf-8", errors="ignore") as f:
14
+ return f.read()
tests/test_basic.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi.testclient import TestClient
2
+ from src.app import app
3
+
4
+ client = TestClient(app)
5
+
6
+ def test_health():
7
+ r = client.get("/health")
8
+ assert r.status_code == 200
9
+ assert r.json()["status"] == "ok"