feat: added the docker file and feature of img
Browse files- Dockerfile +9 -1
- app.py +4 -1
- features/image_classifier/__init__.py +0 -0
- features/image_classifier/controller.py +11 -0
- features/image_classifier/inferencer.py +22 -0
- features/image_classifier/model_loader.py +43 -0
- features/image_classifier/preprocess.py +18 -0
- features/image_classifier/routes.py +26 -0
- features/nepali_text_classifier/controller.py +0 -1
- features/nepali_text_classifier/model_loader.py +1 -1
- features/nepali_text_classifier/preprocess.py +6 -8
- features/text_classifier/model_loader.py +1 -1
Dockerfile
CHANGED
|
@@ -1,9 +1,15 @@
|
|
| 1 |
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
| 2 |
# you will also find guides on how best to write your Dockerfile
|
| 3 |
|
| 4 |
-
FROM python:3.
|
| 5 |
|
|
|
|
| 6 |
RUN useradd -m -u 1000 user
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
USER user
|
| 8 |
ENV PATH="/home/user/.local/bin:$PATH"
|
| 9 |
|
|
@@ -14,4 +20,6 @@ RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
|
| 14 |
RUN python -m spacy download en_core_web_sm || echo "Failed to download model"
|
| 15 |
|
| 16 |
COPY --chown=user . /app
|
|
|
|
| 17 |
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
|
|
|
|
|
| 1 |
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
| 2 |
# you will also find guides on how best to write your Dockerfile
|
| 3 |
|
| 4 |
+
FROM python:3.10
|
| 5 |
|
| 6 |
+
# Create user first
|
| 7 |
RUN useradd -m -u 1000 user
|
| 8 |
+
|
| 9 |
+
# Install system dependencies (requires root)
|
| 10 |
+
RUN apt-get update && apt-get install -y libgl1
|
| 11 |
+
|
| 12 |
+
# Switch to non-root user
|
| 13 |
USER user
|
| 14 |
ENV PATH="/home/user/.local/bin:$PATH"
|
| 15 |
|
|
|
|
| 20 |
RUN python -m spacy download en_core_web_sm || echo "Failed to download model"
|
| 21 |
|
| 22 |
COPY --chown=user . /app
|
| 23 |
+
|
| 24 |
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
| 25 |
+
|
app.py
CHANGED
|
@@ -6,7 +6,9 @@ from slowapi.util import get_remote_address
|
|
| 6 |
from fastapi.responses import JSONResponse
|
| 7 |
from features.text_classifier.routes import router as text_classifier_router
|
| 8 |
from features.nepali_text_classifier.routes import router as nepali_text_classifier_router
|
|
|
|
| 9 |
from config import ACCESS_RATE
|
|
|
|
| 10 |
import requests
|
| 11 |
limiter = Limiter(key_func=get_remote_address, default_limits=[ACCESS_RATE])
|
| 12 |
|
|
@@ -27,11 +29,12 @@ app.add_middleware(SlowAPIMiddleware)
|
|
| 27 |
# Include your routes
|
| 28 |
app.include_router(text_classifier_router, prefix="/text")
|
| 29 |
app.include_router(nepali_text_classifier_router,prefix="/NP")
|
|
|
|
| 30 |
@app.get("/")
|
| 31 |
@limiter.limit(ACCESS_RATE)
|
| 32 |
async def root(request: Request):
|
| 33 |
return {
|
| 34 |
"message": "API is working",
|
| 35 |
-
"endpoints": ["/text/analyse", "/text/upload", "/text/analyse-sentences", "/text/analyse-sentance-file"]
|
| 36 |
}
|
| 37 |
|
|
|
|
| 6 |
from fastapi.responses import JSONResponse
|
| 7 |
from features.text_classifier.routes import router as text_classifier_router
|
| 8 |
from features.nepali_text_classifier.routes import router as nepali_text_classifier_router
|
| 9 |
+
from features.image_classifier.routes import router as image_classifier_router
|
| 10 |
from config import ACCESS_RATE
|
| 11 |
+
|
| 12 |
import requests
|
| 13 |
limiter = Limiter(key_func=get_remote_address, default_limits=[ACCESS_RATE])
|
| 14 |
|
|
|
|
| 29 |
# Include your routes
|
| 30 |
app.include_router(text_classifier_router, prefix="/text")
|
| 31 |
app.include_router(nepali_text_classifier_router,prefix="/NP")
|
| 32 |
+
app.include_router(image_classifier_router,prefix="/AI-image")
|
| 33 |
@app.get("/")
|
| 34 |
@limiter.limit(ACCESS_RATE)
|
| 35 |
async def root(request: Request):
|
| 36 |
return {
|
| 37 |
"message": "API is working",
|
| 38 |
+
"endpoints": ["/text/analyse", "/text/upload", "/text/analyse-sentences", "/text/analyse-sentance-file","/NP/analyse","/NP/upload","/NP/analyse-sentences","/NP/file-sentences-analyse","/AI-image/analyse"]
|
| 39 |
}
|
| 40 |
|
features/image_classifier/__init__.py
ADDED
|
File without changes
|
features/image_classifier/controller.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import HTTPException,File,UploadFile
|
| 2 |
+
from .preprocess import preprocess_image
|
| 3 |
+
from .inferencer import classify_image
|
| 4 |
+
async def Classify_Image_router(file: UploadFile = File(...)):
|
| 5 |
+
try:
|
| 6 |
+
image_array = preprocess_image(file)
|
| 7 |
+
result = classify_image(image_array)
|
| 8 |
+
return result
|
| 9 |
+
except Exception as e:
|
| 10 |
+
raise HTTPException(status_code=400, detail=str(e))
|
| 11 |
+
|
features/image_classifier/inferencer.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from .model_loader import load_model
|
| 3 |
+
model = load_model()
|
| 4 |
+
|
| 5 |
+
def classify_image(image: np.ndarray):
|
| 6 |
+
predictions = model.predict(image)[0]
|
| 7 |
+
human_conf = float(predictions[0])
|
| 8 |
+
ai_conf = float(predictions[1])
|
| 9 |
+
|
| 10 |
+
if ai_conf > 0.55:
|
| 11 |
+
label = "AI Generated"
|
| 12 |
+
elif ai_conf < 0.45:
|
| 13 |
+
label = "Human Generated"
|
| 14 |
+
else:
|
| 15 |
+
label = "Maybe AI"
|
| 16 |
+
|
| 17 |
+
return {
|
| 18 |
+
"label": label,
|
| 19 |
+
"ai_confidence": round(ai_conf * 100, 2),
|
| 20 |
+
"human_confidence": round(human_conf * 100, 2)
|
| 21 |
+
}
|
| 22 |
+
|
features/image_classifier/model_loader.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import tensorflow as tf
|
| 2 |
+
from tensorflow.keras.models import load_model as keras_load_model
|
| 3 |
+
import os
|
| 4 |
+
from huggingface_hub import snapshot_download
|
| 5 |
+
import shutil
|
| 6 |
+
|
| 7 |
+
# Constants
|
| 8 |
+
REPO_ID = "can-org/AI-VS-HUMAN-IMAGE-classifier"
|
| 9 |
+
MODEL_DIR = "./IMG_models"
|
| 10 |
+
MODEL_PATH = os.path.join(MODEL_DIR, 'latest-my_cnn_model.h5') # adjust path as needed
|
| 11 |
+
|
| 12 |
+
_model_img = None # global model variable
|
| 13 |
+
|
| 14 |
+
def warmup():
|
| 15 |
+
global _model_img
|
| 16 |
+
if not os.path.exists(MODEL_DIR):
|
| 17 |
+
download_model_Repo()
|
| 18 |
+
_model_img = load_model()
|
| 19 |
+
|
| 20 |
+
def download_model_Repo():
|
| 21 |
+
if os.path.exists(MODEL_DIR):
|
| 22 |
+
return
|
| 23 |
+
snapshot_path = snapshot_download(repo_id=REPO_ID)
|
| 24 |
+
os.makedirs(MODEL_DIR, exist_ok=True)
|
| 25 |
+
shutil.copytree(snapshot_path, MODEL_DIR, dirs_exist_ok=True)
|
| 26 |
+
|
| 27 |
+
def load_model():
|
| 28 |
+
if not os.path.exists(MODEL_DIR):
|
| 29 |
+
download_model_Repo()
|
| 30 |
+
|
| 31 |
+
# Check for GPU availability
|
| 32 |
+
gpus = tf.config.list_physical_devices('GPU')
|
| 33 |
+
if gpus:
|
| 34 |
+
# GPU is available, load model normally
|
| 35 |
+
print("GPU detected, loading model on GPU.")
|
| 36 |
+
model = keras_load_model(MODEL_PATH)
|
| 37 |
+
else:
|
| 38 |
+
# No GPU, force CPU usage
|
| 39 |
+
print("No GPU detected, forcing model loading on CPU.")
|
| 40 |
+
with tf.device('/CPU:0'):
|
| 41 |
+
model = keras_load_model(MODEL_PATH)
|
| 42 |
+
return model
|
| 43 |
+
|
features/image_classifier/preprocess.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2
|
| 3 |
+
|
| 4 |
+
def preprocess_image(file):
|
| 5 |
+
# Read bytes from UploadFile
|
| 6 |
+
image_bytes = file.file.read()
|
| 7 |
+
# Convert bytes to NumPy array
|
| 8 |
+
nparr = np.frombuffer(image_bytes, np.uint8)
|
| 9 |
+
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
| 10 |
+
if img is None:
|
| 11 |
+
raise ValueError("Could not decode image.")
|
| 12 |
+
|
| 13 |
+
img = cv2.resize(img, (256, 256)) # Changed size to 256x256
|
| 14 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 15 |
+
img = img / 255.0
|
| 16 |
+
img = np.expand_dims(img, axis=0)
|
| 17 |
+
return img
|
| 18 |
+
|
features/image_classifier/routes.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from slowapi import Limiter
|
| 2 |
+
from config import ACCESS_RATE
|
| 3 |
+
from fastapi import APIRouter, File, Request, Depends, HTTPException, UploadFile
|
| 4 |
+
from fastapi.security import HTTPBearer
|
| 5 |
+
from slowapi import Limiter
|
| 6 |
+
from slowapi.util import get_remote_address
|
| 7 |
+
from .controller import Classify_Image_router
|
| 8 |
+
router = APIRouter()
|
| 9 |
+
limiter = Limiter(key_func=get_remote_address)
|
| 10 |
+
security = HTTPBearer()
|
| 11 |
+
|
| 12 |
+
@router.post("/analyse")
|
| 13 |
+
@limiter.limit(ACCESS_RATE)
|
| 14 |
+
async def analyse(
|
| 15 |
+
request: Request,
|
| 16 |
+
file: UploadFile = File(...),
|
| 17 |
+
token: str = Depends(security)
|
| 18 |
+
):
|
| 19 |
+
result = await Classify_Image_router(file) # await the async function
|
| 20 |
+
return result
|
| 21 |
+
|
| 22 |
+
@router.get("/health")
|
| 23 |
+
@limiter.limit(ACCESS_RATE)
|
| 24 |
+
def health(request: Request):
|
| 25 |
+
return {"status": "ok"}
|
| 26 |
+
|
features/nepali_text_classifier/controller.py
CHANGED
|
@@ -3,7 +3,6 @@ from io import BytesIO
|
|
| 3 |
from fastapi import HTTPException, UploadFile, status, Depends
|
| 4 |
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
| 5 |
import os
|
| 6 |
-
|
| 7 |
from features.nepali_text_classifier.inferencer import classify_text
|
| 8 |
from features.nepali_text_classifier.preprocess import *
|
| 9 |
import re
|
|
|
|
| 3 |
from fastapi import HTTPException, UploadFile, status, Depends
|
| 4 |
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
| 5 |
import os
|
|
|
|
| 6 |
from features.nepali_text_classifier.inferencer import classify_text
|
| 7 |
from features.nepali_text_classifier.preprocess import *
|
| 8 |
import re
|
features/nepali_text_classifier/model_loader.py
CHANGED
|
@@ -8,7 +8,7 @@ from huggingface_hub import snapshot_download
|
|
| 8 |
from transformers import AutoTokenizer, AutoModel
|
| 9 |
|
| 10 |
# Configs
|
| 11 |
-
REPO_ID = "
|
| 12 |
BASE_DIR = "./np_text_model"
|
| 13 |
TOKENIZER_DIR = os.path.join(BASE_DIR, "classifier") # <- update this to match your uploaded folder
|
| 14 |
WEIGHTS_PATH = os.path.join(BASE_DIR, "model_95_acc.pth") # <- change to match actual uploaded weight
|
|
|
|
| 8 |
from transformers import AutoTokenizer, AutoModel
|
| 9 |
|
| 10 |
# Configs
|
| 11 |
+
REPO_ID = "can-org/Nepali-AI-VS-HUMAN"
|
| 12 |
BASE_DIR = "./np_text_model"
|
| 13 |
TOKENIZER_DIR = os.path.join(BASE_DIR, "classifier") # <- update this to match your uploaded folder
|
| 14 |
WEIGHTS_PATH = os.path.join(BASE_DIR, "model_95_acc.pth") # <- change to match actual uploaded weight
|
features/nepali_text_classifier/preprocess.py
CHANGED
|
@@ -20,19 +20,17 @@ def parse_pdf(file: BytesIO):
|
|
| 20 |
for page_num in range(doc.page_count):
|
| 21 |
page = doc.load_page(page_num)
|
| 22 |
text += page.get_text()
|
| 23 |
-
return text
|
| 24 |
except Exception as e:
|
| 25 |
logging.error(f"Error while processing PDF: {str(e)}")
|
| 26 |
raise HTTPException(
|
| 27 |
status_code=500, detail="Error processing PDF file")
|
| 28 |
|
| 29 |
-
|
| 30 |
def parse_txt(file: BytesIO):
|
| 31 |
return file.read().decode("utf-8")
|
| 32 |
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
|
|
|
| 20 |
for page_num in range(doc.page_count):
|
| 21 |
page = doc.load_page(page_num)
|
| 22 |
text += page.get_text()
|
| 23 |
+
return text
|
| 24 |
except Exception as e:
|
| 25 |
logging.error(f"Error while processing PDF: {str(e)}")
|
| 26 |
raise HTTPException(
|
| 27 |
status_code=500, detail="Error processing PDF file")
|
| 28 |
|
|
|
|
| 29 |
def parse_txt(file: BytesIO):
|
| 30 |
return file.read().decode("utf-8")
|
| 31 |
|
| 32 |
+
def end_symbol_for_NP_text(text: str) -> str:
|
| 33 |
+
text = text.strip()
|
| 34 |
+
if not text.endswith("।"):
|
| 35 |
+
text += "।"
|
| 36 |
+
return text
|
|
|
features/text_classifier/model_loader.py
CHANGED
|
@@ -6,7 +6,7 @@ from huggingface_hub import snapshot_download
|
|
| 6 |
import torch
|
| 7 |
from dotenv import load_dotenv
|
| 8 |
load_dotenv()
|
| 9 |
-
REPO_ID = "
|
| 10 |
MODEL_DIR = "./models"
|
| 11 |
TOKENIZER_DIR = os.path.join(MODEL_DIR, "model")
|
| 12 |
WEIGHTS_PATH = os.path.join(MODEL_DIR, "model_weights.pth")
|
|
|
|
| 6 |
import torch
|
| 7 |
from dotenv import load_dotenv
|
| 8 |
load_dotenv()
|
| 9 |
+
REPO_ID = "can-org/AI-Content-Checker"
|
| 10 |
MODEL_DIR = "./models"
|
| 11 |
TOKENIZER_DIR = os.path.join(MODEL_DIR, "model")
|
| 12 |
WEIGHTS_PATH = os.path.join(MODEL_DIR, "model_weights.pth")
|