Spaces:
Running
Running
File size: 5,633 Bytes
e68f9ee f6637e0 e68f9ee f6637e0 e68f9ee 01bcde7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import os
import json
import numpy as np
# Using ai-edge-litert (Google's new TFLite runtime)
from ai_edge_litert.interpreter import Interpreter
# Import MobileNetV2 preprocessing
try:
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
USE_MOBILENET_PREPROCESSING = True
except ImportError:
USE_MOBILENET_PREPROCESSING = False
print("β οΈ TensorFlow not available, using standard normalization")
from flask import Flask, request, jsonify
from flask_cors import CORS
from PIL import Image
import io
app = Flask(__name__)
CORS(app)
# --- KONFIGURASI ---
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Pastikan path ini sesuai struktur folder kamu
MODEL_PATH = os.path.join(BASE_DIR, 'models', 'batik_model.tflite')
CLASSES_PATH = os.path.join(BASE_DIR, 'models', 'batik_classes_mobilenet_ultimate.json')
print("==================================================")
print("π MEMULAI BATIK CLASSIFIER (TFLITE ENGINE V2)")
print("β‘ Mode: ai-edge-litert Runtime (38 Batik Classes)")
print("==================================================")
# --- 1. LOAD MODEL TFLITE ---
if not os.path.exists(MODEL_PATH):
print(f"β ERROR: File model TFLite tidak ditemukan: {MODEL_PATH}")
exit()
try:
# Using ai-edge-litert Interpreter
interpreter = Interpreter(model_path=MODEL_PATH)
interpreter.allocate_tensors()
# Dapat info input/output
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
output_shape = output_details[0]['shape']
print(f"β
Model TFLite V2 berhasil dimuat!")
print(f"π Input Shape: {input_shape}, Output Classes: {output_shape[1]}")
except Exception as e:
print(f"β Gagal load TFLite: {e}")
exit()
# --- 2. LOAD CLASSES ---
if not os.path.exists(CLASSES_PATH):
print(f"β ERROR: File label tidak ditemukan di {CLASSES_PATH}")
exit()
try:
with open(CLASSES_PATH, 'r') as f:
class_data = json.load(f)
# Logika parsing JSON kamu sudah bagus!
if isinstance(class_data, dict) and "classes" in class_data:
class_names = class_data["classes"]
elif isinstance(class_data, dict):
try:
sorted_keys = sorted(class_data.keys(), key=lambda x: int(x))
class_names = [class_data[k] for k in sorted_keys]
except ValueError:
class_names = list(class_data.values())
elif isinstance(class_data, list):
class_names = class_data
else:
raise ValueError("Format JSON tidak dikenali.")
print(f"β
Berhasil memuat {len(class_names)} nama motif batik.")
# print(f"π Contoh: {class_names[:3]}...")
except Exception as e:
print(f"β Gagal membaca file JSON Classes: {e}")
exit()
# --- PREPROCESSING ---
def prepare_image(image, target_size=(224, 224)):
if image.mode != "RGB":
image = image.convert("RGB")
image = image.resize(target_size)
img_array = np.array(image, dtype=np.float32)
# Use MobileNetV2 preprocessing if available (range -1 to 1)
# Otherwise use standard normalization (range 0 to 1)
if USE_MOBILENET_PREPROCESSING:
# MobileNetV2 expects values in range -1 to 1
img_array = np.expand_dims(img_array, axis=0)
img_array = preprocess_input(img_array)
else:
# Standard normalization to 0-1 range
img_array = img_array / 255.0
img_array = np.expand_dims(img_array, axis=0)
return img_array
@app.route('/', methods=['GET'])
def home():
return jsonify({
"status": "Online",
"mode": "ai-edge-litert Runtime (BatikLens V2)",
"model_version": "v2",
"classes_loaded": len(class_names)
})
@app.route('/predict', methods=['POST'])
def predict():
# Support both 'file' and 'image' field names
file = request.files.get('file') or request.files.get('image')
if not file:
return jsonify({"error": "No file part"}), 400
if file.filename == '':
return jsonify({"error": "No selected file"}), 400
try:
# 1. Baca & Proses Gambar
image = Image.open(io.BytesIO(file.read()))
input_data = prepare_image(image)
# 2. Masukkan data ke Interpreter
interpreter.set_tensor(input_details[0]['index'], input_data)
# 3. Jalankan Prediksi
interpreter.invoke()
# 4. Ambil Hasil
output_data = interpreter.get_tensor(output_details[0]['index'])
predictions = output_data[0]
# 5. Cari skor tertinggi
predicted_index = np.argmax(predictions)
predicted_label = class_names[predicted_index]
confidence = float(predictions[predicted_index])
# Get top 5 predictions
top_5_indices = np.argsort(predictions)[-5:][::-1]
top_5_predictions = [
{
"class": class_names[idx],
"confidence": float(predictions[idx]),
"percentage": f"{float(predictions[idx]):.2%}"
}
for idx in top_5_indices
]
return jsonify({
"success": True,
"prediction": predicted_label,
"confidence": confidence,
"percentage": f"{confidence:.2%}",
"top_5_predictions": top_5_predictions
})
except Exception as e:
return jsonify({"success": False, "error": str(e)}), 500
if __name__ == '__main__':
app.run(host='0.0.0.0', port=7860) |