File size: 1,624 Bytes
e619b9a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# src/dataloader.py
import tensorflow as tf
import pandas as pd
import cv2
import numpy as np
import os

IMG_SIZE = (128,128)

def load_image(path):
    path = path.decode("utf-8")

    if not os.path.exists(path):
        return np.zeros((128, 128, 1), dtype=np.float32)

    img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)

    if img is None:
        return np.zeros((128, 128, 1), dtype=np.float32)

    img = cv2.resize(img, (128, 128))
    img = img / 255.0

    # VERY IMPORTANT: add channel dimension
    img = np.expand_dims(img, axis=-1)

    return img.astype(np.float32)



def parse_pair(img1_path, img2_path, label):
    img1 = tf.numpy_function(load_image, [img1_path], tf.float32)
    img2 = tf.numpy_function(load_image, [img2_path], tf.float32)
    label = tf.cast(label, tf.float32)

    img1.set_shape((*IMG_SIZE, 1))
    img2.set_shape((*IMG_SIZE, 1))

    return (img1, img2), label


def create_dataset(csv_file, batch_size=16, validation_split=0.2):
    df = pd.read_csv(csv_file)

    # Shuffle once
    df = df.sample(frac=1).reset_index(drop=True)

    split_idx = int(len(df) * (1 - validation_split))
    train_df = df[:split_idx]
    val_df = df[split_idx:]

    def make_ds(dataframe):
        ds = tf.data.Dataset.from_tensor_slices(
            (dataframe["img1"], dataframe["img2"], dataframe["label"])
        )
        ds = ds.map(parse_pair, num_parallel_calls=tf.data.AUTOTUNE)
        ds = ds.batch(batch_size)
        ds = ds.cache().prefetch(tf.data.AUTOTUNE)
        return ds

    return make_ds(train_df), make_ds(val_df)