| | from fastapi import FastAPI, HTTPException |
| | from models import load_model |
| | from utils import get_prompt, preprocess_description, DiagramRequest, DiagramResponse |
| | from utils_pic import get_pic_prompt |
| | import re |
| |
|
| | app = FastAPI(title="Diagram → PlantUML (CPU + Qwen2.5-7B GGUF)", version="0.1.0") |
| |
|
| | llm = None |
| |
|
| | @app.on_event("startup") |
| | def startup_event(): |
| | global llm |
| | llm = load_model() |
| |
|
| | @app.get("/health") |
| | async def health_check(): |
| | return { |
| | "status": "healthy" if llm is not None else "model loading failed", |
| | "model_loaded": llm is not None |
| | } |
| |
|
| | |
| |
|
| | @app.post("/api/v1/generate_plantuml", response_model=DiagramResponse) |
| | async def generate_plantuml(req: DiagramRequest): |
| | global llm |
| | if llm is None: |
| | raise HTTPException(503, "Модель ещё не загружена") |
| |
|
| | desc = preprocess_description(req.description) |
| | if not desc: |
| | raise HTTPException(400, "Description cannot be empty") |
| |
|
| | messages = get_prompt(desc) |
| |
|
| | output = llm.create_chat_completion( |
| | messages, |
| | max_tokens=1500, |
| | temperature=0.6, |
| | top_p=0.9, |
| | top_k=35, |
| | repeat_penalty=1.15, |
| | stop=["</s>", "<|im_end|>", "@enduml\n\n", "```"], |
| | ) |
| |
|
| | generated_text = output["choices"][0]["message"]["content"].strip() |
| |
|
| | |
| | start_marker = "@startuml" |
| | end_marker = "@enduml" |
| | if start_marker in generated_text and end_marker in generated_text: |
| | start_idx = generated_text.find(start_marker) |
| | end_idx = generated_text.rfind(end_marker) + len(end_marker) |
| | plantuml_code = generated_text[start_idx:end_idx] |
| | else: |
| | plantuml_code = generated_text |
| |
|
| | |
| | plantuml_code = re.sub(r'\n\s*\n', '\n', plantuml_code).strip() |
| |
|
| | return {"plantuml_code": plantuml_code} |
| |
|
| |
|
| | from fastapi import UploadFile, File |
| | from utils_pic import ( |
| | PicToUmlRequest, PicToUmlResponse, |
| | load_yolo, preprocess_image, |
| | run_ocr, build_graph_from_detections, |
| | generate_description_from_graph |
| | ) |
| |
|
| |
|
| | llm = None |
| | yolo_model = None |
| |
|
| | @app.on_event("startup") |
| | def startup_event(): |
| | global llm, yolo_model |
| | llm = load_model() |
| | yolo_model = load_yolo() |
| |
|
| | @app.post("/api/v1/pic_to_uml", response_model=PicToUmlResponse) |
| | async def pic_to_uml( |
| | image: UploadFile = File(...), |
| | is_bpmn: bool = True |
| | ): |
| | global yolo_model, llm |
| |
|
| | if not image.content_type.startswith("image/"): |
| | raise HTTPException(400, "Ожидается изображение (png/jpg)") |
| |
|
| | image_bytes = await image.read() |
| |
|
| | img_cv, pil_img = preprocess_image(image_bytes) |
| |
|
| | if yolo_model is None: |
| | raise HTTPException(503, "YOLO модель не загружена") |
| |
|
| | |
| | results = yolo_model( |
| | img_cv, |
| | conf=0.15, |
| | iou=0.4, |
| | max_det=300, |
| | imgsz=1024 |
| | ) |
| |
|
| | ocr_text = run_ocr(pil_img) |
| |
|
| | |
| |
|
| |
|
| | G, nodes = build_graph_from_detections(results, ocr_text) |
| |
|
| | messages = get_pic_prompt(ocr_text, nodes, list(G.edges()), is_bpmn) |
| |
|
| | output = llm.create_chat_completion( |
| | messages, |
| | max_tokens=1500, |
| | temperature=0.65, |
| | top_p=0.9, |
| | stop=["</s>", "<|im_end|>"], |
| | ) |
| |
|
| | result_text = output["choices"][0]["message"]["content"].strip() |
| |
|
| | |
| | if not is_bpmn: |
| | start = result_text.find("@startuml") |
| | end = result_text.rfind("@enduml") |
| | if start != -1 and end != -1: |
| | result_text = result_text[start:end + 8] |
| |
|
| | return PicToUmlResponse(result=result_text) |