Hemant0000 commited on
Commit
05afd51
·
verified ·
1 Parent(s): 1e9366f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +283 -0
app.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+ import matplotlib.image as mpimg
5
+ import seaborn as sns
6
+ %matplotlib inline
7
+
8
+ np.random.seed(2)
9
+
10
+ from sklearn.model_selection import train_test_split
11
+ from sklearn.metrics import confusion_matrix
12
+ import itertools
13
+
14
+ from tensorflow.keras.utils import to_categorical
15
+ from keras.models import Sequential
16
+ from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
17
+ from keras.optimizers import RMSprop
18
+
19
+ from tensorflow.keras.preprocessing.image import ImageDataGenerator
20
+
21
+ from keras.callbacks import ReduceLROnPlateau, EarlyStopping
22
+
23
+ sns.set(style='white', context='notebook', palette='deep')
24
+
25
+ from PIL import Image
26
+ import os
27
+ from pylab import *
28
+ import re
29
+ from PIL import Image, ImageChops, ImageEnhance
30
+
31
+ def get_imlist(path):
32
+ return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.jpg') or f.endswith('.png')]
33
+
34
+ def convert_to_ela_image(path, quality):
35
+ filename = path
36
+ resaved_filename = filename.split('.')[0] + '.resaved.jpg'
37
+ ELA_filename = filename.split('.')[0] + '.ela.png'
38
+
39
+ im = Image.open(filename).convert('RGB')
40
+ im.save(resaved_filename, 'JPEG', quality=quality)
41
+ resaved_im = Image.open(resaved_filename)
42
+
43
+ ela_im = ImageChops.difference(im, resaved_im)
44
+
45
+ extrema = ela_im.getextrema()
46
+ max_diff = max([ex[1] for ex in extrema])
47
+ if max_diff == 0:
48
+ max_diff = 1
49
+ scale = 255.0 / max_diff
50
+
51
+ ela_im = ImageEnhance.Brightness(ela_im).enhance(scale)
52
+
53
+ return ela_im
54
+
55
+ from google.colab import drive
56
+ drive.mount('/content/drive')
57
+
58
+ Image.open('/content/drive/MyDrive/Images for Deep Fake/real_images/6401_0.jpg')
59
+
60
+ convert_to_ela_image('/content/drive/MyDrive/Images for Deep Fake/real_images/6401_0.jpg', 90)
61
+
62
+ Image.open('/content/drive/MyDrive/Images for Deep Fake/fake_images/1601_0.jpg')
63
+
64
+ convert_to_ela_image('/content/drive/MyDrive/Images for Deep Fake/fake_images/1601_0.jpg', 90)
65
+
66
+ import os
67
+ import csv
68
+ from PIL import Image # Use PIL for image processing
69
+
70
+ def create_image_dataset_csv(fake_folder, real_folder, output_csv):
71
+ # Initialize an empty list to store image information
72
+ image_data = []
73
+
74
+ # Process fake images
75
+ fake_files = os.listdir(fake_folder)
76
+ for filename in fake_files:
77
+ if filename.endswith('.jpg') or filename.endswith('.png'): # Adjust based on your image formats
78
+ file_path = os.path.join(fake_folder, filename)
79
+ label = 0 # Assign label 0 for fake
80
+ image_data.append((file_path, label))
81
+
82
+ # Process real images
83
+ real_files = os.listdir(real_folder)
84
+ for filename in real_files:
85
+ if filename.endswith('.jpg') or filename.endswith('.png'): # Adjust based on your image formats
86
+ file_path = os.path.join(real_folder, filename)
87
+ label = 1 # Assign label 1 for real
88
+ image_data.append((file_path, label))
89
+
90
+ # Write image data to CSV file
91
+ with open(output_csv, 'w', newline='') as csvfile:
92
+ csv_writer = csv.writer(csvfile)
93
+ csv_writer.writerow(['file_path', 'label']) # Header row
94
+ csv_writer.writerows(image_data)
95
+
96
+ print(f"CSV file '{output_csv}' has been created successfully with {len(image_data)} entries.")
97
+
98
+ # Example usage:
99
+ fake_images_folder = '/content/drive/MyDrive/Images for Deep Fake/fake_images'
100
+ real_images_folder = '/content/drive/MyDrive/Images for Deep Fake/real_images'
101
+ output_csv_file = 'image_dataset.csv'
102
+
103
+ create_image_dataset_csv(fake_images_folder, real_images_folder, output_csv_file)
104
+
105
+ import pandas as pd
106
+
107
+ # dataset = pd.read_csv('datasets/dataset.csv')
108
+ dataset = pd.read_csv('/content/image_dataset.csv')
109
+
110
+ dataset.head()
111
+
112
+ X = []
113
+ Y = []
114
+
115
+ X
116
+
117
+ for index, row in dataset.iterrows():
118
+ X.append(array(convert_to_ela_image(row[0], 90).resize((128, 128))).flatten() / 255.0)
119
+ Y.append(row[1])
120
+
121
+ X = np.array(X)
122
+ Y = to_categorical(Y, 2)
123
+
124
+ X = X.reshape(-1, 128, 128, 3)
125
+
126
+ X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size = 0.2, random_state=5)
127
+
128
+ model = Sequential()
129
+
130
+ model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'valid',
131
+ activation ='relu', input_shape = (128,128,3)))
132
+ print("Input: ", model.input_shape)
133
+ print("Output: ", model.output_shape)
134
+
135
+ model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'valid',
136
+ activation ='relu'))
137
+ print("Input: ", model.input_shape)
138
+ print("Output: ", model.output_shape)
139
+
140
+ model.add(MaxPool2D(pool_size=(2,2)))
141
+
142
+ model.add(Dropout(0.25))
143
+ print("Input: ", model.input_shape)
144
+ print("Output: ", model.output_shape)
145
+
146
+ model.add(Flatten())
147
+ model.add(Dense(256, activation = "relu"))
148
+ model.add(Dropout(0.5))
149
+ model.add(Dense(2, activation = "softmax"))
150
+
151
+ model.summary()
152
+
153
+ optimizer = RMSprop(learning_rate=0.0005, rho=0.9, epsilon=1e-08, decay=0.0)
154
+
155
+ model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
156
+
157
+ early_stopping = EarlyStopping(monitor='val_acc',
158
+ min_delta=0,
159
+ patience=2,
160
+ verbose=0, mode='max')
161
+
162
+ epochs = 10
163
+ batch_size = 100
164
+
165
+ history = model.fit(X_train, Y_train, batch_size = batch_size, epochs = epochs,
166
+ validation_data = (X_val, Y_val), verbose = 2, callbacks=[early_stopping])
167
+
168
+ # Plot the loss and accuracy curves for training and validation
169
+ fig, ax = plt.subplots(2,1)
170
+ ax[0].plot(history.history['loss'], color='b', label="Training loss")
171
+ ax[0].plot(history.history['val_loss'], color='r', label="validation loss")
172
+ legend = ax[0].legend(loc='best', shadow=True)
173
+
174
+ ax[1].plot(history.history['accuracy'], color='b', label="Training accuracy")
175
+ ax[1].plot(history.history['val_accuracy'], color='r',label="Validation accuracy")
176
+ legend = ax[1].legend(loc='best', shadow=True)
177
+
178
+ from sklearn.metrics import confusion_matrix
179
+
180
+ def plot_confusion_matrix(cm, classes,
181
+ normalize=False,
182
+ title='Confusion matrix',
183
+ cmap=plt.cm.Blues):
184
+ """
185
+ This function prints and plots the confusion matrix.
186
+ Normalization can be applied by setting `normalize=True`.
187
+ """
188
+ plt.imshow(cm, interpolation='nearest', cmap=cmap)
189
+ plt.title(title)
190
+ plt.colorbar()
191
+ tick_marks = np.arange(len(classes))
192
+ plt.xticks(tick_marks, classes, rotation=45)
193
+ plt.yticks(tick_marks, classes)
194
+
195
+ if normalize:
196
+ cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
197
+
198
+ thresh = cm.max() / 2.
199
+ for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
200
+ plt.text(j, i, cm[i, j],
201
+ horizontalalignment="center",
202
+ color="white" if cm[i, j] > thresh else "black")
203
+
204
+ plt.tight_layout()
205
+ plt.ylabel('True label')
206
+ plt.xlabel('Predicted label')
207
+
208
+
209
+ # Predict the values from the validation dataset
210
+ Y_pred = model.predict(X_val)
211
+ # Convert predictions classes to one hot vectors
212
+ Y_pred_classes = np.argmax(Y_pred,axis = 1)
213
+ # Convert validation observations to one hot vectors
214
+ Y_true = np.argmax(Y_val,axis = 1)
215
+
216
+ # compute the confusion matrix
217
+ confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
218
+ plt.xlabel('Predicted')
219
+ plt.ylabel('True')
220
+ plt.title('Confusion Matrix')
221
+ sns.heatmap(confusion_mtx/np.sum(confusion_mtx), annot=True,
222
+ fmt='.2%', cmap='Blues')
223
+
224
+ from sklearn.metrics import classification_report
225
+
226
+ print(classification_report(Y_true, Y_pred_classes))
227
+
228
+ #saving the trained cnn model
229
+ model.save("fake-image-detection.h5")
230
+
231
+ import gradio as gr
232
+ import numpy as np
233
+ from PIL import Image, ImageChops, ImageEnhance
234
+ from keras.models import load_model
235
+ import tensorflow as tf
236
+
237
+ # Load the trained model
238
+ model = load_model("fake-image-detection.h5")
239
+
240
+ # Function to convert an image to its ELA form
241
+ def convert_to_ela_image(image, quality=90):
242
+ resaved_image = image.convert('RGB')
243
+ resaved_image.save("resaved_image.jpg", 'JPEG', quality=quality)
244
+ resaved_image = Image.open("resaved_image.jpg")
245
+
246
+ ela_image = ImageChops.difference(image, resaved_image)
247
+
248
+ extrema = ela_image.getextrema()
249
+ max_diff = max([ex[1] for ex in extrema])
250
+ if max_diff == 0:
251
+ max_diff = 1
252
+ scale = 255.0 / max_diff
253
+
254
+ ela_image = ImageEnhance.Brightness(ela_image).enhance(scale)
255
+ return ela_image
256
+
257
+ # Prediction function
258
+ def predict(image):
259
+ # Convert the input image to an ELA image
260
+ ela_image = convert_to_ela_image(image)
261
+ ela_image = ela_image.resize((128, 128)) # Resize to match the input size of the model
262
+ ela_array = np.array(ela_image).astype('float32') / 255.0
263
+ ela_array = ela_array.reshape(1, 128, 128, 3) # Reshape for model input
264
+
265
+ # Make a prediction
266
+ prediction = model.predict(ela_array)
267
+ class_idx = np.argmax(prediction, axis=1)[0]
268
+
269
+ # Map the prediction to labels
270
+ labels = {0: "Fake", 1: "Real"}
271
+ return labels[class_idx]
272
+
273
+ # Gradio interface
274
+ interface = gr.Interface(
275
+ fn=predict, # Prediction function
276
+ inputs=gr.Image(type="pil"), # Image input (PIL format)
277
+ outputs="label", # Output a label
278
+ title="Deep Fake Detector",
279
+ description="Upload an image to detect if it's a real or fake image using ELA and a trained CNN model."
280
+ )
281
+
282
+ # Launch the interface
283
+ interface.launch()