vu0018 commited on
Commit
fa3b1cb
·
verified ·
1 Parent(s): b8bde37

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -78
app.py CHANGED
@@ -1,87 +1,42 @@
1
  # app.py
2
  import gradio as gr
3
- import cv2
4
- import numpy as np
5
- from huggingface_hub import from_pretrained_keras
6
- import tempfile
7
-
8
- model = from_pretrained_keras("DaMsTaR/Detecto-DeepFake_Video_Detector")
9
-
10
- # Parameters
11
- NUM_FRAMES = 16 # frames to sample
12
- TARGET_SIZE = (224, 224) # input size expected by the model
13
-
14
- def extract_frames(video_path, num_frames=NUM_FRAMES):
15
- """Extract `num_frames` evenly spaced frames from the video"""
16
- cap = cv2.VideoCapture(video_path)
17
- frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
18
- if frame_count <= 0:
19
- raise RuntimeError("Cannot read video or video is empty")
20
-
21
- indices = np.linspace(0, frame_count - 1, num_frames).astype(int)
22
- frames = []
23
- idx = 0
24
- for i in range(frame_count):
25
- ret, frame = cap.read()
26
- if not ret:
27
- break
28
- if i == indices[idx]:
29
- frames.append(frame)
30
- idx += 1
31
- if idx >= len(indices):
32
- break
33
- cap.release()
34
- # duplicate last frame if not enough
35
- while len(frames) < num_frames:
36
- frames.append(frames[-1])
37
- return frames
38
-
39
- def preprocess_frames(frames):
40
- """Resize and normalize frames for Keras model"""
41
- processed = []
42
- for frame in frames:
43
- img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
44
- img = cv2.resize(img, TARGET_SIZE)
45
- img = img / 255.0 # normalize to [0,1]
46
- processed.append(img)
47
- return np.stack(processed, axis=0)
48
-
49
- def predict_deepfake(video):
50
- """Predict if the video is deepfake"""
51
- # Save uploaded video to temp file
52
- temp_video = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
53
- with open(temp_video, "wb") as f:
54
- f.write(video.read())
55
-
56
- frames = extract_frames(temp_video)
57
- x = preprocess_frames(frames)
58
-
59
- # Model expects batch dimension
60
- preds = model.predict(x, verbose=0) # shape: (num_frames, num_classes)
61
-
62
- # Aggregate predictions (mean across frames)
63
- mean_pred = preds.mean(axis=0)
64
-
65
- # Assuming output: [real_prob, fake_prob]
66
- fake_score = float(mean_pred[1])
67
- real_score = float(mean_pred[0])
68
- label = "Fake" if fake_score > real_score else "Real"
69
-
70
- return label, float(max(real_score, fake_score)), {"Real": real_score, "Fake": fake_score}
71
-
72
- # Gradio interface
73
  with gr.Blocks() as demo:
74
- gr.Markdown("# DeepFake Video Detection")
 
 
75
  video_input = gr.Video(label="Upload video")
76
- predict_btn = gr.Button("Analyze")
77
- output_label = gr.Textbox(label="Prediction")
 
78
  output_score = gr.Number(label="Confidence")
79
- output_all = gr.JSON(label="All Scores")
80
-
81
- predict_btn.click(
82
- fn=predict_deepfake,
83
  inputs=[video_input],
84
  outputs=[output_label, output_score, output_all]
85
  )
86
 
87
- demo.launch(server_name="0.0.0.0", share=True)
 
 
1
  # app.py
2
  import gradio as gr
3
+ from transformers import pipeline
4
+
5
+ # Load the deepfake detection model from Hugging Face
6
+ detector = pipeline(
7
+ "video-classification",
8
+ model="Hemgg/deepfake-vs-real-video-detection"
9
+ )
10
+
11
+ def analyze_video(video_path):
12
+ """
13
+ Run deepfake detection on uploaded video.
14
+ Returns:
15
+ label: predicted class ("FAKE" or "REAL")
16
+ score: confidence of the prediction
17
+ all_scores: full output from the model
18
+ """
19
+ results = detector(video_path)
20
+ top = results[0] # assume the first element is top prediction
21
+ return top["label"], float(top["score"]), results
22
+
23
+ # Gradio UI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  with gr.Blocks() as demo:
25
+ gr.Markdown("# DeepFake Video Detector")
26
+ gr.Markdown("Upload a video and the model will classify it as REAL or FAKE.")
27
+
28
  video_input = gr.Video(label="Upload video")
29
+ analyze_btn = gr.Button("Analyze Video")
30
+
31
+ output_label = gr.Textbox(label="Predicted Label")
32
  output_score = gr.Number(label="Confidence")
33
+ output_all = gr.JSON(label="All Predictions")
34
+
35
+ analyze_btn.click(
36
+ fn=analyze_video,
37
  inputs=[video_input],
38
  outputs=[output_label, output_score, output_all]
39
  )
40
 
41
+ if __name__ == "__main__":
42
+ demo.launch(server_name="0.0.0.0", share=True)