liturriago commited on
Commit
f93586a
1 Parent(s): 85d7bbb

ImageClassificationSpace\nAPI en HuggingFace Space con TensorFlow Serving-like pipeline.

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.keras filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10
2
+
3
+ # Crear usuario no-root
4
+ RUN useradd -m -u 1000 user
5
+ USER user
6
+ ENV PATH="/home/user/.local/bin:$PATH"
7
+
8
+ WORKDIR /app
9
+
10
+ COPY --chown=user ./requirements.txt /app/requirements.txt
11
+ RUN pip install --no-cache-dir -r requirements.txt
12
+
13
+ COPY --chown=user . /app
14
+
15
+ EXPOSE 7860
16
+
17
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,11 +1,11 @@
1
  ---
2
  title: ImageClassificationSpace
3
- emoji: 馃摎
4
- colorFrom: green
5
  colorTo: purple
6
  sdk: docker
7
  pinned: false
8
- license: apache-2.0
9
  ---
10
 
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
1
  ---
2
  title: ImageClassificationSpace
3
+ emoji: 馃殌
4
+ colorFrom: blue
5
  colorTo: purple
6
  sdk: docker
7
  pinned: false
 
8
  ---
9
 
10
+ # ClassificationVCAPI
11
+ API en HuggingFace Space con TensorFlow Serving-like pipeline.
app.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from pydantic import BaseModel
3
+ import base64
4
+ import numpy as np
5
+ from PIL import Image
6
+ import io
7
+ import ai_edge_litert.interpreter as interpreter
8
+
9
+ app = FastAPI(title="AI Edge LiteRT API")
10
+
11
+ # Cargar el modelo TFLite una sola vez al iniciar
12
+ MODEL_PATH = "./my_classification_model_float16.tflite" # Cambia seg煤n tu modelo (float32, float16, int8, etc.)
13
+ litert_interpreter = interpreter.Interpreter(model_path=MODEL_PATH)
14
+ litert_interpreter.allocate_tensors()
15
+
16
+ # Obtener detalles de entrada/salida
17
+ input_details = litert_interpreter.get_input_details()
18
+ output_details = litert_interpreter.get_output_details()
19
+
20
+ # Verificar si el modelo usa cuantizaci贸n INT8
21
+ IS_INT8_MODEL = input_details[0]['dtype'] == np.uint8
22
+
23
+ class ImagePayload(BaseModel):
24
+ image_base64: str
25
+
26
+ @app.get("/")
27
+ def home():
28
+ return {
29
+ "status": "ok",
30
+ "message": "API is running! Use POST /predict",
31
+ "model_info": {
32
+ "input_shape": input_details[0]['shape'].tolist(),
33
+ "input_dtype": str(input_details[0]['dtype']),
34
+ "output_shape": output_details[0]['shape'].tolist(),
35
+ "output_dtype": str(output_details[0]['dtype']),
36
+ "quantized": IS_INT8_MODEL
37
+ }
38
+ }
39
+
40
+ def preprocess_image(img_bytes, target_size=(224, 224)):
41
+ """
42
+ Preprocesa la imagen usando NumPy y PIL
43
+
44
+ Args:
45
+ img_bytes: Bytes de la imagen
46
+ target_size: Tupla (height, width)
47
+
48
+ Returns:
49
+ Imagen preprocesada como numpy array
50
+ """
51
+ # Decodificar imagen con PIL
52
+ img = Image.open(io.BytesIO(img_bytes))
53
+
54
+ # Convertir a RGB si es necesario
55
+ if img.mode != 'RGB':
56
+ img = img.convert('RGB')
57
+
58
+ # Redimensionar
59
+ img = img.resize(target_size, Image.BILINEAR)
60
+
61
+ # Convertir a numpy array
62
+ img_array = np.array(img, dtype=np.float32)
63
+
64
+ # Normalizar a [0, 1]
65
+ img_array = img_array / 255.0
66
+
67
+ # Expandir dimensiones para batch
68
+ img_array = np.expand_dims(img_array, axis=0)
69
+
70
+ # Si es modelo INT8, convertir directamente a uint8 [0, 255]
71
+ # El modelo internamente hace el escalado y zero point
72
+ if IS_INT8_MODEL:
73
+ # Volver a escala [0, 255] y convertir a uint8
74
+ img_array = (img_array).astype(np.uint8)
75
+
76
+ return img_array
77
+
78
+ def postprocess_output(output):
79
+ """
80
+ Postprocesa la salida del modelo
81
+
82
+ Args:
83
+ output: Salida raw del modelo
84
+
85
+ Returns:
86
+ Probabilidades como lista
87
+ """
88
+ # Si es modelo INT8, la salida ya est谩 en uint8 [0, 255]
89
+ # El modelo internamente hace el descalado, solo necesitamos
90
+ # convertir de uint8 a float [0, 1] o [0, 255] dependiendo del caso
91
+ if IS_INT8_MODEL:
92
+ # Convertir de uint8 [0, 255] a float [0, 1]
93
+ output = output.astype(np.float32)
94
+
95
+ # El modelo ya tiene softmax, as铆 que solo convertir a lista
96
+ return output[0].tolist()
97
+
98
+ @app.post("/predict")
99
+ def predict(payload: ImagePayload):
100
+ """
101
+ Endpoint de predicci贸n
102
+
103
+ Args:
104
+ payload: JSON con imagen en base64
105
+
106
+ Returns:
107
+ Predicciones del modelo
108
+ """
109
+ try:
110
+ # Decodificar base64
111
+ img_bytes = base64.b64decode(payload.image_base64)
112
+
113
+ # Preprocesar imagen
114
+ img_array = preprocess_image(img_bytes, target_size=(224, 224))
115
+
116
+ # Inferencia con AI Edge LiteRT
117
+ litert_interpreter.set_tensor(input_details[0]['index'], img_array)
118
+ litert_interpreter.invoke()
119
+ output = litert_interpreter.get_tensor(output_details[0]['index'])
120
+
121
+ # Postprocesar salida
122
+ predictions = postprocess_output(output)
123
+
124
+ # Obtener clase predicha y confianza
125
+ predicted_class = int(np.argmax(predictions))
126
+ confidence = float(predictions[predicted_class])
127
+
128
+ return {
129
+ "prediction": predictions,
130
+ "predicted_class": predicted_class,
131
+ "confidence": confidence,
132
+ "top_5": sorted(
133
+ [(i, float(p)) for i, p in enumerate(predictions)],
134
+ key=lambda x: x[1],
135
+ reverse=True
136
+ )[:5]
137
+ }
138
+
139
+ except Exception as e:
140
+ return {
141
+ "error": str(e),
142
+ "status": "failed"
143
+ }
144
+
145
+ @app.get("/health")
146
+ def health_check():
147
+ """Health check endpoint"""
148
+ return {"status": "healthy", "model_loaded": True}
my_classification_model_float16.tflite ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b3b9010c4b53f7a81599fdccf80e3d85d181b8cd8a89cc7c30c20ddcc04de26
3
+ size 22375136
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn[standard]
3
+ pydantic
4
+ numpy
5
+ pillow
6
+ ai-edge-litert
runtime.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ python-3.10