ABAO77 commited on
Commit
df1cd25
·
verified ·
1 Parent(s): 34d0b7a

Upload 5 files

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -35
  2. .gitignore +6 -0
  3. Dockerfile +17 -0
  4. app.py +110 -0
  5. requirements.txt +8 -0
.gitattributes CHANGED
@@ -1,35 +1 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.onnx filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ **/__pycache__/
2
+ .env
3
+ src/model/detect.onnx
4
+ src/model/segment.pt
5
+ .venv
6
+ # src/model/segment.onnx
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
+ # you will also find guides on how best to write your Dockerfile
3
+
4
+ FROM python:3.9
5
+
6
+ RUN useradd -m -u 1000 user
7
+ RUN apt-get update && apt-get install -y tesseract-ocr && rm -rf /var/lib/apt/lists/*
8
+ USER user
9
+ ENV PATH="/home/user/.local/bin:$PATH"
10
+
11
+ WORKDIR /app
12
+
13
+ COPY --chown=user ./requirements.txt requirements.txt
14
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
15
+
16
+ COPY --chown=user . /app
17
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv
2
+ from src.inference.segment_inference import inference
3
+ from src.utils.utils_segment import extract_text, draw_bounding_boxes
4
+ from fastapi import FastAPI, UploadFile, status, Form, File
5
+ from fastapi.responses import JSONResponse
6
+ from fastapi.middleware.cors import CORSMiddleware
7
+ import cv2
8
+ import numpy as np
9
+ from src.config.llm import llm
10
+ from src.prompt.promt import format_prompt
11
+ from langchain_core.output_parsers import JsonOutputParser
12
+ import uvicorn
13
+ from io import BytesIO
14
+ import base64
15
+ from pydantic import Field, BaseModel
16
+ from concurrent.futures import ThreadPoolExecutor
17
+ import asyncio
18
+ import os
19
+ import functools
20
+ import threading
21
+
22
+ load_dotenv()
23
+ app = FastAPI(docs_url="/")
24
+ app.add_middleware(
25
+ CORSMiddleware,
26
+ allow_origins=["*"],
27
+ allow_credentials=True,
28
+ allow_methods=["*"],
29
+ allow_headers=["*"],
30
+ )
31
+ model_path = "./src/model/segment.onnx"
32
+ executor = ThreadPoolExecutor(max_workers=int(os.cpu_count() + 4))
33
+
34
+
35
+ def run_in_thread(func, *args, **kwargs):
36
+ loop = asyncio.get_event_loop()
37
+ func_name = func.__name__
38
+
39
+ # Function to wrap the original function to print the thread ID
40
+ def wrapper(*args, **kwargs):
41
+ thread_id = threading.get_ident()
42
+ print(f"[Running function '{func_name}' in thread ID: {thread_id}]")
43
+ return func(*args, **kwargs)
44
+
45
+ # Run the wrapped function in the executor
46
+ return loop.run_in_executor(executor, functools.partial(wrapper, *args, **kwargs))
47
+
48
+
49
+ def predict_func(threshold_confidence, threshold_iou, image):
50
+
51
+ image = np.frombuffer(image, np.uint8)
52
+ image = cv2.imdecode(image, cv2.IMREAD_COLOR)
53
+ outputs = inference(
54
+ image,
55
+ model_path=model_path,
56
+ threshold_confidence=threshold_confidence,
57
+ threshold_iou=threshold_iou,
58
+ )
59
+ text = extract_text(outputs=outputs, image_origin=image)
60
+ image = draw_bounding_boxes(image, outputs)
61
+ buffer = BytesIO()
62
+ image.save(buffer, format="JPEG")
63
+ buffer.seek(0)
64
+
65
+ image_base64 = base64.b64encode(buffer.getvalue()).decode("utf-8")
66
+ response = {"outputs": text, "image_base64": image_base64}
67
+ return response
68
+
69
+
70
+ @app.post("/inference", status_code=status.HTTP_200_OK)
71
+ async def predict(
72
+ threshold_confidence: float = Form(default=0.7, ge=0, le=1),
73
+ threshold_iou: float = Form(default=0.7, ge=0, le=1),
74
+ image: UploadFile = File(...),
75
+ ):
76
+ try:
77
+ image = await image.read()
78
+
79
+ response = await run_in_thread(
80
+ predict_func, threshold_confidence, threshold_iou, image
81
+ )
82
+ return JSONResponse(content=response, status_code=status.HTTP_200_OK)
83
+ except Exception as e:
84
+ response = {"error": str(e)}
85
+ return JSONResponse(content=response, status_code=status.HTTP_400_BAD_REQUEST)
86
+
87
+
88
+ class LLMRequest(BaseModel):
89
+ text: str = Field(..., title="Text to generate completion")
90
+
91
+
92
+ def call_llm(data):
93
+ input = format_prompt.format(input=data)
94
+ response = llm.invoke(input)
95
+ response = JsonOutputParser().parse(response)
96
+ return response
97
+
98
+
99
+ @app.post("/llm", status_code=status.HTTP_200_OK)
100
+ async def llm_predict(data: LLMRequest):
101
+ try:
102
+ response = await run_in_thread(call_llm, data.text)
103
+ return JSONResponse(content=response, status_code=status.HTTP_200_OK)
104
+ except Exception as e:
105
+ response = {"error": str(e)}
106
+ return JSONResponse(content=response, status_code=status.HTTP_400_BAD_REQUEST)
107
+
108
+
109
+ if __name__ == "__main__":
110
+ uvicorn.run("app:app", host="localhost", port=8080, reload=True)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ python-dotenv
3
+ pytesseract
4
+ opencv-python
5
+ langchain-google-genai
6
+ uvicorn
7
+ python-multipart
8
+ asycnio