import gradio as gr import tensorflow as tf import numpy as np from PIL import Image import os import glob # Load the model print("Loading model...") model = tf.keras.models.load_model('finetuned_food_270.keras') print("Model loaded successfully!") # Load the class names with open('labels.txt', 'r', encoding='utf-8') as f: class_names = [line.strip() for line in f.readlines()] print(f"{len(class_names)} classes loaded") # --- 1. ADJUSTMENT: IMAGE SIZE --- # Enter the size you trained the model with IMG_SIZE = (224, 224) def preprocess_image(image): """Prepare the image for the model""" # Convert to PIL Image and ensure RGB if isinstance(image, np.ndarray): img = Image.fromarray(image) else: img = image # Convert to RGB (in case it's grayscale) img = img.convert('RGB') # Resize (to the size used during training) img = img.resize(IMG_SIZE) # Convert to Numpy array (should be dtype=float32) img_array = np.array(img, dtype=np.float32) # Add batch dimension img_array = np.expand_dims(img_array, axis=0) # --- 2. ADJUSTMENT: V2 PREPROCESSING --- # EfficientNetV2 (B2) does NOT use this V1 preprocessing function. # DELETE or COMMENT OUT this line: # img_array = tf.keras.applications.efficientnet.preprocess_input(img_array) # Since your model is V2, it expects input in the [0, 255] range (which it is now). return img_array def predict(image): """Food prediction function""" try: # Process the image processed_image = preprocess_image(image) # Make a prediction predictions = model.predict(processed_image, verbose=0) # Get the softmax output (first batch) predictions = predictions[0] # Find top 5 predictions top_indices = np.argsort(predictions)[-5:][::-1] # Prepare the results results = {} for idx in top_indices: label = class_names[idx] confidence = float(predictions[idx]) results[label] = confidence return results except Exception as e: print(f"Error: {e}") return {"Error": str(e)} # --- Örnek resimleri yükle --- def load_sample_images(): """sample_images klasöründeki tüm resimleri yükle""" sample_folder = "sample_images" if os.path.exists(sample_folder): # Desteklenen resim formatları image_extensions = ['*.jpg', '*.jpeg', '*.png', '*.JPG', '*.JPEG', '*.PNG'] sample_images = [] for ext in image_extensions: sample_images.extend(glob.glob(os.path.join(sample_folder, ext))) return sorted(sample_images) return [] # Örnek resimleri al sample_images = load_sample_images() print(f"{len(sample_images)} örnek resim yüklendi") # --- Gradio Interface --- with gr.Blocks(title="Food 270 Classifier") as demo: # Title and description gr.Markdown(""" # 🍽️ Food 270 Classifier **An AI model that can recognize 270 different types of food** 📸 Upload a photo of food and let it guess what it is! """) # Main row with gr.Row(): # Left column - Input with gr.Column(scale=1): input_image = gr.Image(label="Food Photo", type="numpy") # Buttons with gr.Row(): clear_btn = gr.Button("🗑️ Clear", variant="secondary") submit_btn = gr.Button("🔍 Predict", variant="primary") # Right column - Output with gr.Column(scale=1): output = gr.Label(label="Prediction Results", num_top_classes=5, show_label=True) # Additional info gr.Markdown(""" ### 📊 Result Explanation - The **highest score** is the most likely food type - Scores represent the confidence level **between 0-1** - The **Top 5** most likely predictions are shown """) # Örnek resimler bölümü if sample_images: gr.Markdown(""" --- ### 🖼️ Example Images Click on an example image below to try the model! """) gr.Examples( examples=sample_images, inputs=input_image, outputs=output, fn=predict, cache_examples=False, # Her tıklamada yeniden tahmin yap label="Sample Food Images" ) # Bottom section - Tips and info with gr.Row(): gr.Markdown(""" ### 💡 Tips - Clear and well-lit photos give better results - A full view of the food improves the prediction - Photos containing a single type of food are ideal """) # Model info gr.Markdown(""" --- ### 🤖 Model Information - **Model**: EfficientNetV2B2 (Fine-tuned) - **Dataset**: Food 270 - **Number of Classes**: 270 different foods - **Training Size**: 224x224 *Developer: Berker Üveyik* """) # Event handlers submit_btn.click( fn=predict, inputs=input_image, outputs=output ) clear_btn.click( lambda: (None, None), inputs=None, outputs=[input_image, output] ) # Launch the application if __name__ == "__main__": demo.launch( share=False, # Should be False on Spaces debug=False # Should be False in production )