sarvansh commited on
Commit
a745881
·
1 Parent(s): bb76e17

added feature to add upload video from specific time

Browse files
Files changed (2) hide show
  1. .gitignore +3 -0
  2. app.py +215 -125
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ key.txt
2
+ key.txt.pub
3
+
app.py CHANGED
@@ -11,6 +11,7 @@ import time
11
  import pandas as pd
12
  import matplotlib.pyplot as plt
13
  import base64
 
14
  # TensorFlow log level
15
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
16
 
@@ -20,17 +21,21 @@ HEIGHT, WIDTH = 299, 299
20
 
21
  # Model builder
22
  def build_model(lstm_hidden_size=256, num_classes=2, dropout_rate=0.5):
23
- inputs = layers.Input(shape=(TIME_STEPS, HEIGHT, WIDTH, 3))
24
- base_model = tf.keras.applications.Xception(weights='imagenet', include_top=False, pooling='avg')
25
- x = layers.TimeDistributed(base_model)(inputs)
26
- x = layers.LSTM(lstm_hidden_size)(x)
27
- x = layers.Dropout(dropout_rate)(x)
28
- outputs = layers.Dense(num_classes, activation='softmax')(x)
29
- model = tf.keras.Model(inputs, outputs)
30
- return model
 
 
 
 
31
 
32
  # Load model
33
- model_path = r'COMBINED_best_Phase1.keras'
34
  model = build_model()
35
  model.load_weights(model_path)
36
 
@@ -56,14 +61,15 @@ def preprocess_image(image):
56
 
57
  return image
58
 
59
- def extract_faces_from_video(video_path, num_frames=TIME_STEPS, skip_frames=0):
60
  """
61
- Extract faces from video with more robust frame selection
62
 
63
  Args:
64
  video_path (str): Path to the video file
 
 
65
  num_frames (int): Number of frames to extract
66
- skip_frames (int): Number of initial frames to skip
67
 
68
  Returns:
69
  tuple: (video_array, frames) or (None, None) if no faces detected
@@ -71,17 +77,23 @@ def extract_faces_from_video(video_path, num_frames=TIME_STEPS, skip_frames=0):
71
  detector = MTCNN()
72
  cap = cv2.VideoCapture(video_path)
73
 
74
- # Get total frame count
 
75
  frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
 
76
 
77
- # Validate input parameters
78
- skip_frames = max(0, min(skip_frames, frame_count - num_frames))
 
79
 
80
- # Calculate frame indices to sample
81
- frame_indices = np.linspace(skip_frames, frame_count - 1, num_frames, dtype=int)
 
 
 
 
82
 
83
  frames = []
84
- processed_frames = []
85
 
86
  for idx in range(frame_count):
87
  success, frame = cap.read()
@@ -106,10 +118,13 @@ def extract_faces_from_video(video_path, num_frames=TIME_STEPS, skip_frames=0):
106
  face = frame_rgb[y:y2, x:x2]
107
 
108
  # Convert to PIL Image and preprocess
109
- face_image = Image.fromarray(face)
110
- processed_face = preprocess_image(face_image)
111
-
112
- frames.append(processed_face)
 
 
 
113
  else:
114
  # If no face detected, use a zero array
115
  frames.append(np.zeros((HEIGHT, WIDTH, 3), dtype=np.float32))
@@ -132,31 +147,37 @@ def extract_faces_from_video(video_path, num_frames=TIME_STEPS, skip_frames=0):
132
 
133
  return video_array, frames
134
 
135
- def make_prediction(video_file):
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  """
137
- Make prediction on the uploaded video file
138
 
139
  Args:
140
- video_file: Uploaded video file object
 
141
 
142
  Returns:
143
  tuple: (predicted_class, probabilities, frames) or (None, None, None) if error
144
  """
145
  try:
146
- # Ensure the directory exists
147
- os.makedirs('temp', exist_ok=True)
148
-
149
- # Save the uploaded file
150
- temp_video_path = os.path.join('temp', 'temp_video.mp4')
151
- with open(temp_video_path, "wb") as f:
152
- f.write(video_file.read())
153
-
154
- # Extract faces and video array
155
- video_array, frames = extract_faces_from_video(temp_video_path)
156
 
157
  # Validate the video array
158
  if video_array is None or video_array.shape[1] != TIME_STEPS:
159
- st.error("Unable to process video. Please ensure the video contains clear, visible faces.")
160
  return None, None, None
161
 
162
  # Make prediction
@@ -169,12 +190,20 @@ def make_prediction(video_file):
169
  except Exception as e:
170
  st.error(f"An error occurred while processing the video: {str(e)}")
171
  return None, None, None
172
- finally:
173
- # Clean up temporary file
174
- if os.path.exists(temp_video_path):
175
- os.remove(temp_video_path)
176
 
177
- # (Keep all the previous imports and functions)
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
  # Streamlit UI
180
  st.set_page_config(page_title="Not Ur Face", layout="wide")
@@ -191,16 +220,22 @@ def get_base64_image(file_path):
191
  image_path = "Image2.png" # Ensure this is the correct path to your saved image
192
 
193
  # Convert image to Base64
194
- image_base64 = get_base64_image(image_path)
 
 
 
195
 
196
  # Header Section with Image
197
  with header_col1:
198
- image = Image.open("Image2.png")
199
- desired_height = 300 # Reduced height
200
- aspect_ratio = image.width / image.height
201
- new_width = int(desired_height * aspect_ratio)
202
- resized_image = image.resize((new_width, desired_height))
203
- # st.image(resized_image, use_container_width=True)
 
 
 
204
 
205
  # Title and Description
206
  with header_col2:
@@ -233,101 +268,156 @@ with header_col2:
233
  )
234
 
235
  # HTML content for the header
236
- st.markdown(
237
- f"""
238
- <div class="header-container">
239
- <img src="data:image/png;base64,{image_base64}" class="header-image" />
240
- <div class="header-text">NOT UR FACE: Video Analysis for Real & Synthetic Detection</div>
241
- </div>
242
- """,
243
- unsafe_allow_html=True,
244
- )
 
 
 
 
245
 
246
  # Sidebar
247
  st.sidebar.title("How It Works")
248
  st.sidebar.markdown(
249
  """
250
  1. 📤 **Upload Video:**
251
- - Choose a video file (mp4, mov, avi)
252
- (Disclaimer: this is a test project so it only works for a video with DeepFake within the first 1.5 seconds of the videos so upload the video such that suspected deepfake is within 2 seconds and also not trained on videos fully generated by AI)
253
- 2. 🔍 **Process Frames:**
 
254
  - Detect and analyze faces
255
- 3. 🤖 **AI Analysis:**
256
  - Predict 'Real' or 'Fake'
257
- 4. 📊 **Detailed Results:**
258
  - View probabilities and insights
 
 
259
  """
260
  )
261
- st.sidebar.info("My github: sarvansh30")
 
262
 
263
  # Upload video
264
  st.subheader("🎥 Upload Your Video")
265
  video_file = st.file_uploader("Choose a video file", type=["mp4", "mov", "avi"], label_visibility="collapsed")
266
- st.markdown(
267
- """
268
- <style>
269
- .fixed-height-col {
270
- height: 500px; /* Set the height you want */
271
- display: flex;
272
- justify-content: center;
273
- align-items: center;
274
- border: 1px solid #ccc; /* Optional: Adds a border for visual distinction */
275
- padding: 10px; /* Optional: Adds padding */
276
- }
277
- </style>
278
- """,
279
- unsafe_allow_html=True,
280
- )
281
  if video_file is not None:
282
- # Create columns to make the layout more compact
283
- video_col, results_col = st.columns([1, 1])
 
 
 
 
 
 
 
 
 
284
 
285
- # Video Display
286
  with video_col:
287
  st.subheader("Uploaded Video")
288
- st.video(video_file)
289
 
290
- # Processing and Results
291
- with results_col:
292
- st.subheader("Analysis")
293
- start_time = time.time()
294
-
295
- # Loading animation
296
- with st.spinner("🚀 Processing video... Please wait!"):
297
- predicted_class, probabilities, frames = make_prediction(video_file)
298
-
299
- if predicted_class is None: # No faces detected
300
- st.error("No faces detected in the uploaded video. Please upload a different video.")
301
- else:
302
- end_time = time.time()
303
- processing_time = end_time - start_time
304
-
305
- # Display results
306
- if predicted_class == 0:
307
- st.success("The video is classified as **Real**!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
308
  else:
309
- st.error("The video is classified as **Fake**!")
310
-
311
- st.write(f"**Prediction Confidence:**")
312
- st.progress(int(probabilities[predicted_class] * 100))
313
-
314
- # Detailed Results Tabs
315
- tab1, tab2, tab3 = st.tabs(["📊 Probabilities", "🖼️ Frame Previews", "⏱️ Processing Time"])
316
-
317
- with tab1:
318
- st.subheader("Class Probabilities")
319
- st.bar_chart({"Real": [probabilities[0]], "Fake": [probabilities[1]]})
320
-
321
- with tab2:
322
- st.subheader("Frame Previews")
323
- st.write("Key frames analyzed during the process:")
324
- cols = st.columns(5)
325
- for i, frame in enumerate(frames[:10]):
326
- frame = np.clip(frame, 0, 1)
327
- frame = (frame * 255).astype(np.uint8)
328
- with cols[i % 5]:
329
- st.image(frame, caption=f"Frame {i+1}", use_container_width=True)
330
-
331
- with tab3:
332
- st.subheader("Processing Details")
333
- st.write(f"**Time Taken:** {processing_time:.2f} seconds")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  import pandas as pd
12
  import matplotlib.pyplot as plt
13
  import base64
14
+ import tempfile
15
  # TensorFlow log level
16
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
17
 
 
21
 
22
  # Model builder
23
  def build_model(lstm_hidden_size=256, num_classes=2, dropout_rate=0.5):
24
+ with tf.keras.backend.name_scope('model'): # Add explicit name scope
25
+ inputs = layers.Input(shape=(TIME_STEPS, HEIGHT, WIDTH, 3))
26
+ base_model = tf.keras.applications.Xception(weights='imagenet', include_top=False, pooling='avg')
27
+ base_model.trainable = False # Freeze the base model
28
+
29
+ x = layers.TimeDistributed(base_model)(inputs)
30
+ x = layers.LSTM(lstm_hidden_size, return_sequences=False)(x)
31
+ x = layers.Dropout(dropout_rate)(x)
32
+ outputs = layers.Dense(num_classes, activation='softmax')(x)
33
+
34
+ model = tf.keras.Model(inputs, outputs)
35
+ return model
36
 
37
  # Load model
38
+ model_path = r'D:\Pro-jects\ESE major project\COMBINED_best_Phase1.keras'
39
  model = build_model()
40
  model.load_weights(model_path)
41
 
 
61
 
62
  return image
63
 
64
+ def extract_faces_from_video(video_path, start_time=0, duration=2, num_frames=TIME_STEPS):
65
  """
66
+ Extract faces from a specific time window in the video
67
 
68
  Args:
69
  video_path (str): Path to the video file
70
+ start_time (float): Start time in seconds
71
+ duration (float): Duration in seconds
72
  num_frames (int): Number of frames to extract
 
73
 
74
  Returns:
75
  tuple: (video_array, frames) or (None, None) if no faces detected
 
77
  detector = MTCNN()
78
  cap = cv2.VideoCapture(video_path)
79
 
80
+ # Get video properties
81
+ fps = cap.get(cv2.CAP_PROP_FPS)
82
  frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
83
+ total_duration = frame_count / fps
84
 
85
+ # Check if the requested window is valid
86
+ if start_time >= total_duration:
87
+ return None, None
88
 
89
+ # Calculate frame indices to sample within the window
90
+ start_frame = int(start_time * fps)
91
+ end_frame = min(int((start_time + duration) * fps), frame_count)
92
+
93
+ # Calculate frames to sample
94
+ frame_indices = np.linspace(start_frame, end_frame - 1, num_frames, dtype=int)
95
 
96
  frames = []
 
97
 
98
  for idx in range(frame_count):
99
  success, frame = cap.read()
 
118
  face = frame_rgb[y:y2, x:x2]
119
 
120
  # Convert to PIL Image and preprocess
121
+ try:
122
+ face_image = Image.fromarray(face)
123
+ processed_face = preprocess_image(face_image)
124
+ frames.append(processed_face)
125
+ except Exception as e:
126
+ # If face processing fails, use a zero array
127
+ frames.append(np.zeros((HEIGHT, WIDTH, 3), dtype=np.float32))
128
  else:
129
  # If no face detected, use a zero array
130
  frames.append(np.zeros((HEIGHT, WIDTH, 3), dtype=np.float32))
 
147
 
148
  return video_array, frames
149
 
150
+ def get_video_details(video_path):
151
+ """
152
+ Get video duration and dimensions
153
+ """
154
+ cap = cv2.VideoCapture(video_path)
155
+ fps = cap.get(cv2.CAP_PROP_FPS)
156
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
157
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
158
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
159
+ duration = frame_count / fps
160
+ cap.release()
161
+ return duration, width, height, fps
162
+
163
+ def make_prediction(video_path, start_time):
164
  """
165
+ Make prediction on the selected video window
166
 
167
  Args:
168
+ video_path: Path to the video file
169
+ start_time: Start time in seconds for the 2-second window
170
 
171
  Returns:
172
  tuple: (predicted_class, probabilities, frames) or (None, None, None) if error
173
  """
174
  try:
175
+ # Extract faces and video array from the specified time window
176
+ video_array, frames = extract_faces_from_video(video_path, start_time=start_time, duration=2)
 
 
 
 
 
 
 
 
177
 
178
  # Validate the video array
179
  if video_array is None or video_array.shape[1] != TIME_STEPS:
180
+ st.error("Unable to process video segment. Please ensure the selected portion contains clear, visible faces.")
181
  return None, None, None
182
 
183
  # Make prediction
 
190
  except Exception as e:
191
  st.error(f"An error occurred while processing the video: {str(e)}")
192
  return None, None, None
 
 
 
 
193
 
194
+ def generate_thumbnail(video_path, timestamp):
195
+ """Generate a thumbnail at a specific timestamp"""
196
+ cap = cv2.VideoCapture(video_path)
197
+ cap.set(cv2.CAP_PROP_POS_MSEC, timestamp * 1000)
198
+ success, frame = cap.read()
199
+ cap.release()
200
+
201
+ if success:
202
+ # Convert BGR to RGB
203
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
204
+ return frame_rgb
205
+ else:
206
+ return None
207
 
208
  # Streamlit UI
209
  st.set_page_config(page_title="Not Ur Face", layout="wide")
 
220
  image_path = "Image2.png" # Ensure this is the correct path to your saved image
221
 
222
  # Convert image to Base64
223
+ try:
224
+ image_base64 = get_base64_image(image_path)
225
+ except:
226
+ image_base64 = "" # Default empty if image not found
227
 
228
  # Header Section with Image
229
  with header_col1:
230
+ try:
231
+ image = Image.open("Image2.png")
232
+ desired_height = 300 # Reduced height
233
+ aspect_ratio = image.width / image.height
234
+ new_width = int(desired_height * aspect_ratio)
235
+ resized_image = image.resize((new_width, desired_height))
236
+ # st.image(resized_image, use_container_width=True)
237
+ except:
238
+ pass # Skip if image not found
239
 
240
  # Title and Description
241
  with header_col2:
 
268
  )
269
 
270
  # HTML content for the header
271
+ if image_base64:
272
+ st.markdown(
273
+ f"""
274
+ <div class="header-container">
275
+ <img src="data:image/png;base64,{image_base64}" class="header-image" />
276
+ <div class="header-text">NOT UR FACE: Video Analysis for Real & Synthetic Detection</div>
277
+ </div>
278
+ """,
279
+ unsafe_allow_html=True,
280
+ )
281
+ else:
282
+ # Fallback if image is not available
283
+ st.title("NOT UR FACE: Video Analysis for Real & Synthetic Detection")
284
 
285
  # Sidebar
286
  st.sidebar.title("How It Works")
287
  st.sidebar.markdown(
288
  """
289
  1. 📤 **Upload Video:**
290
+ - Choose a video file (mp4, mov, avi)
291
+ 2. 🎯 **Select Time Window:**
292
+ - Choose a starting point for the 2-second window from your video
293
+ 3. 🔍 **Process Frames:**
294
  - Detect and analyze faces
295
+ 4. 🤖 **AI Analysis:**
296
  - Predict 'Real' or 'Fake'
297
+ 5. 📊 **Detailed Results:**
298
  - View probabilities and insights
299
+
300
+ **Disclaimer:** The model is trained on FaceForensics++ and CelebDFV2 datasets so it works well on deepfake generation techniques used in these datasets. Model may not perform well for AI generated videos.
301
  """
302
  )
303
+ st.sidebar.info(f"""Made by: Sarvansh Pachori✨
304
+ **GitHub:** sarvansh30""" )
305
 
306
  # Upload video
307
  st.subheader("🎥 Upload Your Video")
308
  video_file = st.file_uploader("Choose a video file", type=["mp4", "mov", "avi"], label_visibility="collapsed")
309
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
  if video_file is not None:
311
+ # Save the uploaded video to a temporary file
312
+ temp_dir = tempfile.mkdtemp()
313
+ temp_file = os.path.join(temp_dir, f"temp_video.mp4")
314
+ with open(temp_file, "wb") as f:
315
+ f.write(video_file.read())
316
+
317
+ # Get video details
318
+ video_duration, video_width, video_height, fps = get_video_details(temp_file)
319
+
320
+ # Create columns for video display and window selection
321
+ video_col, selection_col = st.columns([3, 2])
322
 
 
323
  with video_col:
324
  st.subheader("Uploaded Video")
325
+ st.video(temp_file)
326
 
327
+ with selection_col:
328
+ st.subheader("Select 2-Second Window")
329
+ # Ensure max value is at least the video duration
330
+ max_start_time = max(0, video_duration - 2)
331
+
332
+ # Show slider for selecting start time
333
+ start_time = st.slider(
334
+ "Select starting point (seconds):",
335
+ min_value=0.0,
336
+ max_value=max_start_time,
337
+ value=0.0,
338
+ step=0.5
339
+ )
340
+
341
+ # Show thumbnail of selected starting point
342
+ thumbnail = generate_thumbnail(temp_file, start_time)
343
+ if thumbnail is not None:
344
+ # Fixed: Changed use_column_width to use_container_width
345
+ st.image(thumbnail, caption=f"Starting at {start_time:.1f}s", use_container_width=True)
346
+
347
+ # Process button
348
+ process_button = st.button("Process Selected Window", key="process_window")
349
+
350
+ # Process the selected window when the button is clicked
351
+ if process_button:
352
+ st.subheader("Analysis of Selected Window")
353
+
354
+ # Analysis columns
355
+ results_col1, results_col2 = st.columns([1, 1])
356
+
357
+ with results_col1:
358
+ # Loading animation
359
+ with st.spinner("🚀 Processing video window... Please wait!"):
360
+ start_process_time = time.time()
361
+ predicted_class, probabilities, frames = make_prediction(temp_file, start_time)
362
+ end_process_time = time.time()
363
+ processing_time = end_process_time - start_process_time
364
+
365
+ if predicted_class is None:
366
+ st.error("No faces detected in the selected window. Please select a different portion of the video.")
367
  else:
368
+ # Display results
369
+ if predicted_class == 0:
370
+ st.success("The selected video window is classified as **Real**!")
371
+ else:
372
+ st.error("The selected video window is classified as **Fake**!")
373
+
374
+ st.write(f"**Prediction Confidence:**")
375
+ st.progress(int(probabilities[predicted_class] * 100))
376
+
377
+ with results_col2:
378
+ if predicted_class is not None:
379
+ st.subheader("Class Probabilities")
380
+ st.bar_chart({"Real": [probabilities[0]], "Fake": [probabilities[1]]})
381
+
382
+ # Additional tabs for detailed results
383
+ if predicted_class is not None:
384
+ tab1, tab2 = st.tabs(["🖼️ Frame Previews", "⏱️ Processing Details"])
385
+
386
+ with tab1:
387
+ st.subheader("Frame Previews")
388
+ st.write("Key frames analyzed during the process:")
389
+ cols = st.columns(5)
390
+ for i, frame in enumerate(frames[:10]):
391
+ frame = np.clip(frame, 0, 1)
392
+ frame = (frame * 255).astype(np.uint8)
393
+ with cols[i % 5]:
394
+ # Fixed: Changed use_column_width to use_container_width
395
+ st.image(frame, caption=f"Frame {i+1}", use_container_width=True)
396
+
397
+ with tab2:
398
+ st.subheader("Processing Details")
399
+ st.write(f"**Time Window:** {start_time:.1f}s to {min(start_time + 2, video_duration):.1f}s")
400
+ st.write(f"**Processing Time:** {processing_time:.2f} seconds")
401
+ st.write(f"**Frames Analyzed:** {TIME_STEPS}")
402
+ st.write(f"**Video FPS:** {fps:.2f}")
403
+
404
+ # Clean up temp files when done
405
+ try:
406
+ os.remove(temp_file)
407
+ os.rmdir(temp_dir)
408
+ except:
409
+ pass # Ignore clean-up errors
410
+ else:
411
+ # Display placeholder when no video is uploaded
412
+ st.markdown(
413
+ """
414
+ <div style="display: flex; justify-content: center; align-items: center;
415
+ height: 300px; border: 2px dashed #aaa; border-radius: 5px;">
416
+ <div style="text-align: center;">
417
+ <h3>Upload a video to get started</h3>
418
+ <p>Supported formats: MP4, MOV, AVI</p>
419
+ </div>
420
+ </div>
421
+ """,
422
+ unsafe_allow_html=True
423
+ )