kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
7,668,213
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<train_model>
!pip install /kaggle/input/dfdcpackages/dlib-19.19.0-cp36-cp36m-linux_x86_64.whl
Deepfake Detection Challenge
7,668,213
<load_from_csv><EOS>
DATA_PREFIX = '/kaggle/input' SKIP_FRAMES = 75 detector = dlib.cnn_face_detection_model_v1(os.path.join(DATA_PREFIX, 'dfdcpackages', 'mmod_human_face_detector.dat')) sp = dlib.shape_predictor(os.path.join(DATA_PREFIX, 'dfdcpackages', 'shape_predictor_5_face_landmarks.dat')) predictor = dlib.deep_fake_detection_model_v1(os.path.join(DATA_PREFIX, 'dfdcpackages', 'deepfake_detector.dnn')) def align_face(frame, detection_sp): x_center = int(( detection_sp.part(0 ).x + detection_sp.part(2 ).x + detection_sp.part(4 ).x)/ 3) y_center = int(( detection_sp.part(4 ).y + detection_sp.part(0 ).y + detection_sp.part(2 ).y)/ 3) w = 2 * abs(detection_sp.part(0 ).x - detection_sp.part(2 ).x) h = w shape = frame.shape face_crop = frame[ max(int(y_center - h), 0):min(int(y_center + h), shape[0]), max(int(x_center - w), 0):min(int(x_center + w), shape[1]) ] return cv2.resize(face_crop,(150,150)) def align_face_dlib(frame, detection_sp): detections = dlib.full_object_detections() detections.append(detection_sp) return dlib.get_face_chips(frame, detections, size=150)[0] def predict_fake(face): return predictor.predict(face)[0] def process_frame(frame): labels = [] dets = detector(frame, 1) batch_faces = dlib.full_object_detections() for k,d in enumerate(dets): face = align_face_dlib(frame, sp(frame, d.rect)) labels.append(predict_fake(face)) return labels def process_video(video_filename): frame_labels = [] frames = [] cap = cv2.VideoCapture(video_filename) frame_count = 0 while cap.isOpened() : ret = cap.grab() frame_count += 1 if not ret: break if frame_count % SKIP_FRAMES: continue _, frame = cap.retrieve() frame_labels.extend(process_frame(frame)) cap.release() fakeness = statistics.mean(frame_labels) fakeness = 0.1 + fakeness * 0.8 return fakeness def single_log_loss(prediction, groundtruth): return groundtruth * math.log(prediction)+(1-groundtruth)* math.log(1-prediction) def estimate_loss(predictions, all_correct=True): result = 0 for p in predictions: if all_correct: result += single_log_loss(p, 1 if p > 0.5 else 0) else: result += single_log_loss(p, 0 if p > 0.5 else 1) return -result/len(predictions) predictions = [] filenames = glob.glob(os.path.join(DATA_PREFIX, 'deepfake-detection-challenge/test_videos/*.mp4')) sub = pd.read_csv(os.path.join(DATA_PREFIX, 'deepfake-detection-challenge/sample_submission.csv')) sub = sub.set_index('filename', drop=False) print('Initialize submission') for filename in tqdm.tqdm(filenames): fn = filename.split('/')[-1] sub.loc[fn, 'label'] = 0.5 print('CUDA usage: {}'.format(dlib.DLIB_USE_CUDA)) for filename in tqdm.tqdm(filenames): fn = filename.split('/')[-1] sub.loc[fn, 'label'] = 0.5 try: start = timer() prediction = process_video(filename) sub.loc[fn, 'label'] = prediction sub.to_csv('submission.csv', index=False) predictions.append(prediction) print('Processed video {}, label={}, time={}'.format(filename, prediction, timedelta(seconds=timer() -start))) print('Possible lost: best={}, worse={}'.format(estimate_loss(predictions), estimate_loss(predictions, False))) except Exception as error: print('Failed to process {}'.format(filename)) sub.to_csv('submission.csv', index=False )
Deepfake Detection Challenge
7,736,225
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<predict_on_test>
%matplotlib inline warnings.filterwarnings("ignore" )
Deepfake Detection Challenge
7,736,225
predictions = model.predict(test )<save_to_csv>
df_train0 = pd.read_json('.. /input/deepfake-detection-challenge/metadata0.json' )
Deepfake Detection Challenge
7,736,225
export = pd.DataFrame([np.argmax(prediction)for prediction in predictions]) export.index += 1 export = export.reset_index() export.columns = ['ImageId', 'Label'] export.to_csv('submission.csv', index=False )<import_modules>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) len(test_videos )
Deepfake Detection Challenge
7,736,225
from tensorflow.keras.preprocessing.image \ <load_from_csv>
print("PyTorch version:", torch.__version__) print("CUDA version:", torch.version.cuda) print("cuDNN version:", torch.backends.cudnn.version() )
Deepfake Detection Challenge
7,736,225
df_train=pd.read_csv('.. /input/digit-recognizer/train.csv') df_test=pd.read_csv('.. /input/digit-recognizer/test.csv') print([df_train.shape,df_test.shape]) print(df_train.iloc[265,1:].values.reshape(28,28)[:,10] )<prepare_x_and_y>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") gpu
Deepfake Detection Challenge
7,736,225
images=['%s%s'%('pixel',pixel_no)for pixel_no in range(0,784)] x_train=df_train[images].values/255. x_train=x_train.reshape(-1,28,28,1) y_train=df_train['label'].values x_test_out=df_test[images].values/255. x_test_out=x_test_out.reshape(-1,28,28,1 )<prepare_x_and_y>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
7,736,225
num_classes=10; img_size,img_size2=28,96 N=df_train.shape[0]; n=int (.1*N) shuffle_ids=np.arange(N) np.random.RandomState(123 ).shuffle(shuffle_ids) x_train=x_train[shuffle_ids]; y_train=y_train[shuffle_ids] x_test,x_valid,x_train=\ x_train[:n],x_train[n:2*n],x_train[2*n:] y_test,y_valid,y_train=\ y_train[:n],y_train[n:2*n],y_train[2*n:] df=pd.DataFrame( [[x_train.shape,x_valid.shape,x_test.shape], [x_train.dtype,x_valid.dtype,x_test.dtype], [y_train.shape,y_valid.shape,y_test.shape], [y_train.dtype,y_valid.dtype,y_test.dtype]], columns=['train','valid','test'], index=['image shape','image type','label shape','label type']) df<choose_model_class>
frames_per_video = 20 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet) mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
7,736,225
def model() : model=tf.keras.Sequential() model.add(tkl.Input(shape=(28,28,1))) model.add(tkl.BatchNormalization()) model.add(tkl.Conv2D(28,(5,5),padding='same')) model.add(tkl.LeakyReLU(alpha=.02)) model.add(tkl.MaxPooling2D(pool_size=(2,2))) model.add(tkl.Dropout (.2)) model.add(tkl.Conv2D(96,(5,5),padding='same')) model.add(tkl.LeakyReLU(alpha=.02)) model.add(tkl.MaxPooling2D(strides=(2,2))) model.add(tkl.Dropout (.2)) model.add(tkl.Conv2D(128,(5,5))) model.add(tkl.LeakyReLU(alpha=.02)) model.add(tkl.MaxPooling2D(strides=(2,2))) model.add(tkl.Dropout (.2)) model.add(tkl.GlobalMaxPooling2D()) model.add(tkl.Dense(1024)) model.add(tkl.LeakyReLU(alpha=.02)) model.add(tkl.Dropout (.5)) model.add(tkl.Dense(num_classes,activation='softmax')) model.compile(loss='sparse_categorical_crossentropy', optimizer='nadam', metrics=['sparse_categorical_accuracy']) return model<train_model>
class MyResNeXt(models.resnet.ResNet): def __init__(self, training=True): super(MyResNeXt, self ).__init__(block=models.resnet.Bottleneck, layers=[3, 4, 6, 3], groups=32, width_per_group=4) self.fc = nn.Linear(2048, 1 )
Deepfake Detection Challenge
7,736,225
cnn_model=model() checkpointer=tkc.ModelCheckpoint( filepath='/tmp/checkpoint',verbose=2,save_weights_only=True, monitor='val_sparse_categorical_accuracy',mode='max',save_best_only=True) lr_reduction=tkc.ReduceLROnPlateau( monitor='val_loss',patience=15,verbose=2,factor=.8) early_stopping=tkc.EarlyStopping( monitor='val_loss',patience=75,verbose=2) history=cnn_model.fit( x_train,y_train,epochs=120,batch_size=128, verbose=2,validation_data=(x_valid,y_valid), callbacks=[checkpointer,lr_reduction,early_stopping] )<compute_test_metric>
Deepfake Detection Challenge
7,736,225
cnn_model.load_weights('/tmp/checkpoint') scores=cnn_model.evaluate(x_test,y_test,verbose=0 )<train_model>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.5
Deepfake Detection Challenge
7,736,225
steps,epochs=int(len(x_train)/128),10 datagen=ImageDataGenerator( featurewise_center=True, featurewise_std_normalization=True, zoom_range=.2,shear_range=.2,rotation_range=30, height_shift_range=.2,width_shift_range=.2) datagen.fit(x_train) history=cnn_model.\ fit(datagen.flow(x_train,y_train,batch_size=128), steps_per_epoch=steps,epochs=epochs,verbose=2, validation_data=datagen.flow(x_valid,y_valid,batch_size=16), callbacks=[checkpointer,lr_reduction,early_stopping] )<compute_test_metric>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
7,736,225
cnn_model.load_weights('/tmp/checkpoint') scores=cnn_model.evaluate(x_test,y_test,verbose=0 )<predict_on_test>
input_size = 224 checkpoint = torch.load("/kaggle/input/deepfakes-inference-demo/resnext.pth", map_location=gpu) model = MyResNeXt().to(gpu) model.load_state_dict(checkpoint) _ = model.eval() del checkpoint
Deepfake Detection Challenge
7,736,225
predict_y_test_out=cnn_model.predict(x_test_out) predict_y_test_out=predict_y_test_out.argmax(axis=-1 )<save_to_csv>
start_time = time.time() speedtest_videos = test_videos[:5] predictions = predict_on_video_set(speedtest_videos, num_workers=4) elapsed = time.time() - start_time print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) )
Deepfake Detection Challenge
7,736,225
submission=pd.DataFrame( {'ImageId':range(1,len(predict_y_test_out)+1), 'Label':predict_y_test_out}) print(submission[0:15].T) submission.to_csv('kaggle_digits_cnn.csv',index=False )<load_pretrained>
!pip install.. /input/deepfake-xception-trained-model/pytorchcv-0.0.55-py2.py3-none-any.whl --quiet
Deepfake Detection Challenge
7,736,225
os.environ['TFHUB_MODEL_LOAD_FORMAT']='COMPRESSED' model=th.load('https://tfhub.dev/captain-pool/esrgan-tf2/1') func=model.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY] func.inputs[0].set_shape([1,img_size2//4,img_size2//4,3]) converter=tf.lite.TFLiteConverter.from_concrete_functions([func]) converter.optimizations=[tf.lite.Optimize.DEFAULT] tflite_model=converter.convert() with tf.io.gfile.GFile('ESRGAN.tflite','wb')as f: f.write(tflite_model) esrgan_model_path='./ESRGAN.tflite'<prepare_x_and_y>
class Head(nn.Module): def __init__(self, in_f, out_f): super().__init__() self.f = nn.Flatten() self.l = nn.Linear(in_f, 512) self.b1 = nn.BatchNorm1d(in_f) self.d = nn.Dropout(0.25) self.o = nn.Linear(512, out_f) self.b2 = nn.BatchNorm1d(512) self.r = nn.ReLU() def forward(self, x): x = self.f(x) x = self.b1(x) x = self.d(x) x = self.l(x) x = self.r(x) x = self.b2(x) x = self.d(x) out = self.o(x) return out class FCN(nn.Module): def __init__(self, base, in_f): super().__init__() self.base = base self.h1 = Head(in_f, 1) def forward(self, x): x = self.base(x) return self.h1(x )
Deepfake Detection Challenge
7,736,225
N3=10000; n3=int (.1*N3) x_train3=x_train[:N3]; y_train3=y_train[:N3] x_valid3=x_valid[:n3]; y_valid3=y_valid[:n3] x_test3=x_test[:n3]; y_test3=y_test[:n3] x_train3=tf.repeat(x_train3,3,axis=3 ).numpy() x_valid3=tf.repeat(x_valid3,3,axis=3 ).numpy() x_test3=tf.repeat(x_test3,3,axis=3 ).numpy() x_test3.shape,x_test3.mean()<normalization>
model = get_model("xception", pretrained=False) model = nn.Sequential(*list(model.children())[:-1]) model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(( 1,1)) )
Deepfake Detection Challenge
7,736,225
def bicubic_resize(imgs,img_size=img_size2): bicubic=tf.image.resize( imgs*255,[img_size,img_size],tf.image.ResizeMethod.BICUBIC) bicubic_contrast=tf.image.adjust_contrast(bicubic,.8) bicubic_contrast=tf.cast(bicubic_contrast,tf.uint8) return bicubic_contrast.numpy() /255<normalization>
input_size = 150 checkpoint = torch.load('.. /input/mymodels/model_niz.pth', map_location=gpu) model = FCN(model, 2048 ).to(gpu) model.load_state_dict(checkpoint) _ = model.eval() del checkpoint
Deepfake Detection Challenge
7,736,225
x_train3=bicubic_resize(x_train3) x_valid3=bicubic_resize(x_valid3) x_test3=bicubic_resize(x_test3) x_test3.shape,x_test3.mean()<normalization>
start_time = time.time() speedtest_videos = test_videos[:5] predictions = predict_on_video_set(speedtest_videos, num_workers=4) elapsed = time.time() - start_time print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) )
Deepfake Detection Challenge
7,736,225
def esrgantf2_superresolution( img,super_size=img_size2,model_path=esrgan_model_path): if img.mean() <1.: img=img*255. lr=tf.image.resize(img,[super_size//4,super_size//4]) lr=tf.expand_dims(lr.numpy() [:,:,:3],axis=0) lr=tf.cast(lr,tf.float32) interpreter=tf.lite.Interpreter(model_path=model_path) interpreter.allocate_tensors() input_details=interpreter.get_input_details() output_details=interpreter.get_output_details() interpreter.set_tensor(input_details[0]['index'],lr) interpreter.invoke() output_data=interpreter.get_tensor(output_details[0]['index']) sr=tf.squeeze(output_data,axis=0) sr=tf.clip_by_value(sr,0,255) sr=tf.round(sr); sr=tf.cast(sr,tf.uint8) lr=tf.cast(tf.squeeze(lr,axis=0),tf.uint8) return lr,sr<normalization>
final_predictions = 0.5 *(np.array(predictions_resnext)+ np.array(predictions_xception)) submission_df = pd.DataFrame({'filename': test_videos, "label": final_predictions}) plt.hist(submission_df['label'] )
Deepfake Detection Challenge
7,736,225
<choose_model_class><EOS>
submission_df.to_csv("submission.csv", index=False)
Deepfake Detection Challenge
8,252,685
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<train_model>
from IPython.display import Image
Deepfake Detection Challenge
8,252,685
fw='/tmp/checkpoint' handle_base='mobilenet_v2_100_%d'%img_size2 mhandle='https://tfhub.dev/google/imagenet/{}/classification/4'\ .format(handle_base) hub_model=premodel(img_size2,1024,mhandle,num_classes, 'softmax','sparse_categorical_crossentropy') history=hub_model.fit(x=x_train3,y=y_train3,batch_size=128, epochs=20,callbacks=cb(fw),verbose=0, validation_data=(x_valid3,y_valid3))<train_model>
Image('.. /input/deepfake-kernel-data/google_cloud_compute_engine_launch_vm.png' )
Deepfake Detection Challenge
8,252,685
hub_model.load_weights('/tmp/checkpoint') hub_model.evaluate(x_test3,y_test3,verbose=0 )<set_options>
Image('.. /input/deepfake-kernel-data/google_cloud_vm.png' )
Deepfake Detection Challenge
8,252,685
%matplotlib inline sns.set(style='white', context='notebook', palette='deep' )<load_from_csv>
Image('.. /input/deepfake-kernel-data/lr_15e-2_epochs_42_patience_5.png' )
Deepfake Detection Challenge
8,252,685
df_train = pd.read_csv(".. /input/digit-recognizer/train.csv") df_test = pd.read_csv(".. /input/digit-recognizer/test.csv" )<prepare_x_and_y>
Image('.. /input/deepfake-kernel-data/lr_2e-3_epochs_10_patience_5.png' )
Deepfake Detection Challenge
8,252,685
X_train = np.array(df_train.drop(['label'], axis=1), dtype="float32")/ 255.0 X_train = X_train.reshape(-1, 28, 28, 1) Y_train = to_categorical(df_train['label'], num_classes = 10) X_test = np.array(df_test, dtype="float32")/ 255.0 X_test = X_test.reshape(-1, 28, 28, 1 )<split>
Image('.. /input/deepfake-kernel-data/lr_2e-3_epochs_20_patience_5.png' )
Deepfake Detection Challenge
8,252,685
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.05, random_state=34 )<choose_model_class>
Image('.. /input/deepfake-kernel-data/lr_4e-3_epochs_12_patience_2.png' )
Deepfake Detection Challenge
8,252,685
model = Sequential() model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'Same', activation ='relu')) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Dense(10, activation = "softmax")) <define_variables>
Image('.. /input/deepfake-kernel-data/lr_4e-3_epochs_30_patience_2.png' )
Deepfake Detection Challenge
8,252,685
datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )<choose_model_class>
Image('.. /input/deepfake-kernel-data/google_cloud_vm_deepfake_training_screenshot.png' )
Deepfake Detection Challenge
8,252,685
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"]) learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )<train_model>
%matplotlib inline warnings.filterwarnings("ignore" )
Deepfake Detection Challenge
8,252,685
epochs=50 batch_size=128 history = model.fit(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,Y_val), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size, callbacks=[learning_rate_reduction] )<predict_on_test>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) frame_h = 5 frame_l = 5 len(test_videos )
Deepfake Detection Challenge
8,252,685
Y_pred = model.predict(X_val) Y_pred_classes = np.argmax(Y_pred,axis = 1) Y_true = np.argmax(Y_val,axis = 1 )<compute_test_metric>
print("PyTorch version:", torch.__version__) print("CUDA version:", torch.version.cuda) print("cuDNN version:", torch.backends.cudnn.version() )
Deepfake Detection Challenge
8,252,685
errors =(Y_pred_classes - Y_true != 0) Y_pred_classes_errors = Y_pred_classes[errors] Y_pred_errors = Y_pred[errors] Y_true_errors = Y_true[errors] X_val_errors = X_val[errors]<compute_train_metric>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") gpu
Deepfake Detection Challenge
8,252,685
Y_pred_errors_prob = np.max(Y_pred_errors,axis = 1) true_prob_errors = np.diagonal(np.take(Y_pred_errors, Y_true_errors, axis=1)) delta_pred_true_errors = Y_pred_errors_prob - true_prob_errors sorted_dela_errors = np.argsort(delta_pred_true_errors) most_important_errors = sorted_dela_errors[-6:] display_errors(most_important_errors, X_val_errors, Y_pred_classes_errors, Y_true_errors )<predict_on_test>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
8,252,685
results = model.predict(X_test) results = np.argmax(results, axis = 1) results = pd.Series(results, name="Label" )<save_to_csv>
frames_per_video = 64 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
8,252,685
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"), results],axis = 1) submission.to_csv("submission.csv",index=False )<install_modules>
input_size = 224
Deepfake Detection Challenge
8,252,685
!pip3 install --no-dependencies.. /input/efficientnetcassava/Keras_Applications-1.0.8-py3-none-any.whl !pip3 install --no-dependencies.. /input/efficientnetcassava/efficientnet-1.1.1-py3-none-any.whl<import_modules>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
8,252,685
import re import numpy as np import pandas as pd import os import json<import_modules>
class MyResNeXt(models.resnet.ResNet): def __init__(self, training=True): super(MyResNeXt, self ).__init__(block=models.resnet.Bottleneck, layers=[3, 4, 6, 3], groups=32, width_per_group=4) self.fc = nn.Linear(2048, 1 )
Deepfake Detection Challenge
8,252,685
import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras import models from tensorflow.keras import layers from tensorflow.keras import losses from sklearn.model_selection import train_test_split from efficientnet.keras import EfficientNetB3 as EfficientNet<define_variables>
checkpoint = torch.load("/kaggle/input/deepfakes-inference-demo/resnext.pth", map_location=gpu) model = MyResNeXt().to(gpu) model.load_state_dict(checkpoint) _ = model.eval() del checkpoint
Deepfake Detection Challenge
8,252,685
AUTO = tf.data.experimental.AUTOTUNE SIZE = 600 ORIGINAL_WIDTH = 800 ORIGINAL_HEIGHT = 600 CHANNELS = 3 BATCH_SIZE = 32<normalization>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.5
Deepfake Detection Challenge
8,252,685
def decode_image(path): image = tf.io.read_file(path) image = tf.image.decode_jpeg(image, channels=3) image =(tf.cast(image, tf.float32)/ 255.0) image = tf.image.resize(image, [ORIGINAL_HEIGHT, ORIGINAL_WIDTH]) image = tf.reshape(image, [ORIGINAL_HEIGHT, ORIGINAL_WIDTH , CHANNELS]) return image def normalize(x): x = tf.image.resize(x, [ORIGINAL_HEIGHT, ORIGINAL_WIDTH]) x = tf.reshape(x, [ORIGINAL_HEIGHT, ORIGINAL_WIDTH, CHANNELS]) return x<load_from_csv>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
8,252,685
load_dir = "/kaggle/input/cassava-leaf-disease-classification" sub_df = pd.read_csv(load_dir + '/sample_submission.csv') sub_df['paths'] = load_dir + "/test_images/" + sub_df.image_id<categorify>
speed_test = False
Deepfake Detection Challenge
8,252,685
def load_dataset(augment=False): test_dataset =tf.data.Dataset.from_tensor_slices(sub_df.paths.values ).map(decode_image, num_parallel_calls=AUTO) if augment: test_dataset = test_dataset.map(lambda x: data_aug(x), num_parallel_calls=AUTO) else: test_dataset = test_dataset.map(lambda x:normalize(x)) return test_dataset.batch(BATCH_SIZE ).prefetch(AUTO )<choose_model_class>
if speed_test: start_time = time.time() speedtest_videos = test_videos[:5] predictions = predict_on_video_set(speedtest_videos, num_workers=4) elapsed = time.time() - start_time print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) )
Deepfake Detection Challenge
8,252,685
def load_model(i): inputs = layers.Input(shape=(ORIGINAL_HEIGHT, ORIGINAL_WIDTH, 3)) model = Sequential([ EfficientNet(include_top=False,weights=None, input_tensor=inputs), layers.GlobalAveragePooling2D(name="avg_pool"), layers.BatchNormalization() , layers.Dropout(0.3, name="top_dropout"), layers.Dense(5, activation="softmax", name="pred") ]) model.load_weights(F".. /input/efficientnetcassava/EfficientNetB3_tl_best_weights_{i}.h5") model.compile(loss=losses.SparseCategoricalCrossentropy() , optimizer=tf.optimizers.Adam(lr=0.001), metrics=['accuracy']) return model<load_pretrained>
predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
8,252,685
n_models = 5 models = [] for i in range(n_models): models.append(load_model(i))<predict_on_test>
submission_df_resnext = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df_resnext.to_csv("submission_resnext.csv", index=False )
Deepfake Detection Challenge
8,252,685
preds = [] test_dataset = load_dataset() for i in range(n_models): preds.append(models[i].predict(test_dataset, verbose=1)) for i in range(10): test_dataset_augmented = load_dataset(augment=True) for i in range(n_models): preds.append(models[i].predict(test_dataset_augmented, verbose=1)) preds = np.mean(preds, axis=0) preds<save_to_csv>
!pip install.. /input/deepfake-xception-trained-model/pytorchcv-0.0.55-py2.py3-none-any.whl --quiet
Deepfake Detection Challenge
8,252,685
sub_df['label'] = preds.argmax(axis=1) sub_df.drop(columns='paths' ).to_csv('submission.csv', index=False) !head submission.csv<set_options>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) len(test_videos )
Deepfake Detection Challenge
8,252,685
warnings.filterwarnings('ignore') <define_variables>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") gpu
Deepfake Detection Challenge
8,252,685
training_folder = '.. /input/cassava-leaf-disease-classification/train_images/'<load_from_csv>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
8,252,685
samples_df = pd.read_csv(".. /input/cassava-leaf-disease-classification/train.csv") samples_df = shuffle(samples_df, random_state=42) samples_df["filepath"] = training_folder+samples_df["image_id"] samples_df.head() <define_variables>
frames_per_video = 64 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
8,252,685
training_percentage = 0.8 training_item_count = int(len(samples_df)*training_percentage) validation_item_count = len(samples_df)-int(len(samples_df)*training_percentage) training_df = samples_df[:training_item_count] validation_df = samples_df[training_item_count:] <define_variables>
input_size = 150
Deepfake Detection Challenge
8,252,685
batch_size = 8 image_size = 512 input_shape =(image_size, image_size, 3) dropout_rate = 0.4 classes_to_predict = sorted(training_df.label.unique() )<prepare_x_and_y>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
8,252,685
training_data = tf.data.Dataset.from_tensor_slices(( training_df.filepath.values, training_df.label.values)) validation_data = tf.data.Dataset.from_tensor_slices(( validation_df.filepath.values, validation_df.label.values))<categorify>
model = get_model("xception", pretrained=False) model = nn.Sequential(*list(model.children())[:-1]) class Pooling(nn.Module): def __init__(self): super(Pooling, self ).__init__() self.p1 = nn.AdaptiveAvgPool2d(( 1,1)) self.p2 = nn.AdaptiveMaxPool2d(( 1,1)) def forward(self, x): x1 = self.p1(x) x2 = self.p2(x) return(x1+x2)* 0.5 model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(( 1,1))) class Head(torch.nn.Module): def __init__(self, in_f, out_f): super(Head, self ).__init__() self.f = nn.Flatten() self.l = nn.Linear(in_f, 512) self.d = nn.Dropout(0.5) self.o = nn.Linear(512, out_f) self.b1 = nn.BatchNorm1d(in_f) self.b2 = nn.BatchNorm1d(512) self.r = nn.ReLU() def forward(self, x): x = self.f(x) x = self.b1(x) x = self.d(x) x = self.l(x) x = self.r(x) x = self.b2(x) x = self.d(x) out = self.o(x) return out class FCN(torch.nn.Module): def __init__(self, base, in_f): super(FCN, self ).__init__() self.base = base self.h1 = Head(in_f, 1) def forward(self, x): x = self.base(x) return self.h1(x) net = [] model = FCN(model, 2048) model = model.cuda() model.load_state_dict(torch.load('.. /input/deepfake-kernel-data/model_50epochs_lr0001_patience5_factor01_batchsize32.pth')) net.append(model )
Deepfake Detection Challenge
8,252,685
def load_image_and_label_from_path(image_path, label): img = tf.io.read_file(image_path) img = tf.image.decode_jpeg(img, channels=3) return img, label AUTOTUNE = tf.data.experimental.AUTOTUNE training_data = training_data.map(load_image_and_label_from_path, num_parallel_calls=AUTOTUNE) validation_data = validation_data.map(load_image_and_label_from_path, num_parallel_calls=AUTOTUNE) training_data_batches = training_data.shuffle(buffer_size=1000 ).batch(batch_size ).prefetch(buffer_size=AUTOTUNE) validation_data_batches = validation_data.shuffle(buffer_size=1000 ).batch(batch_size ).prefetch(buffer_size=AUTOTUNE )<normalization>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.5
Deepfake Detection Challenge
8,252,685
adapt_data = tf.data.Dataset.from_tensor_slices(training_df.filepath.values) def adapt_mode(image_path): img = tf.io.read_file(image_path) img = tf.image.decode_jpeg(img, channels=3) img = layers.experimental.preprocessing.Rescaling(1.0 / 255 )(img) return img adapt_data = adapt_data.map(adapt_mode, num_parallel_calls=AUTOTUNE) adapt_data_batches = adapt_data.shuffle(buffer_size=1000 ).batch(batch_size ).prefetch(buffer_size=AUTOTUNE )<set_options>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
8,252,685
data_augmentation_layers = tf.keras.Sequential( [ layers.experimental.preprocessing.RandomCrop(height=image_size, width=image_size), layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"), layers.experimental.preprocessing.RandomRotation(0.25), layers.experimental.preprocessing.RandomZoom(( -0.2, 0)) , layers.experimental.preprocessing.RandomContrast(( 0.2,0.2)) ] )<choose_model_class>
speed_test = False
Deepfake Detection Challenge
8,252,685
efficientnet = EfficientNetB3(weights=".. /input/keras-efficientnetb3-no-top-weights/efficientnetb3_notop.h5", include_top=False, input_shape=input_shape, drop_connect_rate=dropout_rate) inputs = Input(shape=input_shape) augmented = data_augmentation_layers(inputs) efficientnet = efficientnet(augmented) pooling = layers.GlobalAveragePooling2D()(efficientnet) dropout = layers.Dropout(dropout_rate )(pooling) outputs = Dense(len(classes_to_predict), activation="softmax" )(dropout) model = Model(inputs=inputs, outputs=outputs) model.summary()<define_variables>
if speed_test: start_time = time.time() speedtest_videos = test_videos[:5] predictions = predict_on_video_set(speedtest_videos, num_workers=4) elapsed = time.time() - start_time print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) )
Deepfake Detection Challenge
8,252,685
epochs = 20<choose_model_class>
%%time model.eval() predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
8,252,685
decay_steps = int(round(len(training_df)/batch_size)) *epochs cosine_decay = CosineDecay(initial_learning_rate=1e-4, decay_steps=decay_steps, alpha=0.3) callbacks = [ModelCheckpoint(filepath='best_model.h5', monitor='val_loss', save_best_only=True)] model.compile(loss="sparse_categorical_crossentropy", optimizer=tf.keras.optimizers.Adam(cosine_decay), metrics=["accuracy"] )<train_model>
submission_df_xception = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df_xception.to_csv("submission_xception.csv", index=False )
Deepfake Detection Challenge
8,252,685
history = model.fit(training_data_batches, epochs = epochs, validation_data=validation_data_batches, callbacks=callbacks )<load_pretrained>
submission_df = pd.DataFrame({"filename": test_videos}) submission_df["label"] = 0.5*submission_df_resnext["label"] + 0.5*submission_df_xception["label"]
Deepfake Detection Challenge
8,252,685
model.load_weights("./best_model.h5" )<save_to_csv>
submission_df.to_csv("submission.csv", index=False )
Deepfake Detection Challenge
8,666,237
submission_df.to_csv("submission.csv", index=False )<set_options>
BASE_PATH = '/kaggle/input/deepfake-detection-challenge/' TEST_VIDEO_PATH = BASE_PATH + 'test_videos/' SAMP_PATH = BASE_PATH + 'sample_submission.csv' test_img_list = glob.glob(f'{TEST_VIDEO_PATH}*.mp4' )
Deepfake Detection Challenge
8,666,237
warnings.filterwarnings("ignore") %matplotlib inline <set_options>
class EffnetTest(nn.Module): def __init__(self, version): super(EffnetTest, self ).__init__() self.model = EfficientNet.from_name(f"efficientnet-{version}", override_params={"num_classes":1}) self.model.fc = nn.Linear(512, 1) self.model._norm_layer = nn.GroupNorm(num_groups=32, num_channels=3) def forward(self, x): x = self.model(x) return torch.sigmoid(x )
Deepfake Detection Challenge
8,666,237
pd.set_option('display.max_columns', 500) pd.set_option('display.max_rows', 500) folder = '/kaggle/input/mercedes-benz-greener-manufacturing/'<load_from_csv>
class SimpleCNNInference: def __init__(self, model_path, version='b6', img_size=200): self.img_size = img_size self.transform = transforms.Compose([ transforms.ToPILImage() , transforms.Resize(size=(self.img_size, self.img_size)) , transforms.ToTensor() , transforms.Normalize(( 0.485, 0.456, 0.406),(0.229, 0.224, 0.225)) ]) self.device = torch.device("cuda") self.model = EffnetTest(version=version) self.model.load_state_dict(torch.load(model_path, map_location="cuda:0")) self.model.to(self.device) self.model.eval() def predict_single_from_array(self, img_frame): img_frame = cv2.cvtColor(img_frame, cv2.COLOR_BGR2RGB) img_frame = self.transform(img_frame ).to(self.device ).unsqueeze(0) return float(self.model(img_frame))
Deepfake Detection Challenge
8,666,237
train_df = pd.read_csv(folder + 'train.csv.zip') test_df = pd.read_csv(folder + 'test.csv.zip') sub_df = pd.read_csv(folder + 'sample_submission.csv.zip') print('train_df: ', train_df.shape) print('test_df: ', test_df.shape) print('sub_df: ', sub_df.shape )<define_variables>
test_filenames = [string.split('/')[-1] for string in test_img_list] detector = RetinaFaceDetector(weights="/kaggle/input/resnetretinaface/Resnet50_Final.pth") inference = SimpleCNNInference(model_path="/kaggle/input/effnetb6-200-0741/EffnetB6_pytorch_group_imgface6_200_0.0741.pth", version='b6', img_size=200) inference2 = SimpleCNNInference(model_path="/kaggle/input/testefficientnet/testEfficientNet.pth", version='b5', img_size=224) inference3 = SimpleCNNInference(model_path="/kaggle/input/effnetb5-imgface6-0074/EffnetB5_imgface6_0.074.pth", version='b5', img_size=224) inference4 = SimpleCNNInference(model_path="/kaggle/input/effnetb6-0071-finetuned/EffnetB6_pytorch_group_imgface6_0.071_finetuned.pth", version='b6', img_size=224) inference5 = SimpleCNNInference(model_path="/kaggle/input/effnetb4-0691/EffnetB4_pytorch_group_imgface6_0.0691.pth", version='b4', img_size=224) frame_skip = 16 res = 2 final_predictions = dict() for i, video in enumerate(tqdm(test_img_list)) : single_predictions = [] cap = cv2.VideoCapture(video) for j in range(300): _ = cap.grab() if j % frame_skip == 0: pass else: continue try: _, frame = cap.retrieve() result = detector.detect_faces(frame[::res, ::res]) if result == []: continue bounding_box = result[0] x1, y1 = bounding_box[0]*res, bounding_box[1]*res x2, y2 = bounding_box[2]*res, bounding_box[3]*res frame_face = frame[y1:y2, x1:x2] pred = inference.predict_single_from_array(frame_face) single_predictions.append(1-pred) pred2 = inference2.predict_single_from_array(frame_face) single_predictions.append(1-pred2) pred3 = inference3.predict_single_from_array(frame_face) single_predictions.append(1-pred3) pred4 = inference4.predict_single_from_array(frame_face) single_predictions.append(1-pred4) pred5 = inference5.predict_single_from_array(frame_face) single_predictions.append(1-pred5) except: print("E") final_predictions[test_filenames[i]] = np.mean(single_predictions) cv2.destroyAllWindows()
Deepfake Detection Challenge
8,666,237
cat_cols = dfs_train.categoricals.tolist()<feature_engineering>
sub = pd.read_csv(SAMP_PATH) sub['label'] = sub['filename'].map(final_predictions ).clip(0.01, 0.99 ).fillna(0.5 )
Deepfake Detection Challenge
8,666,237
<filter><EOS>
sub.to_csv('submission.csv', index=False )
Deepfake Detection Challenge
7,694,669
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<prepare_x_and_y>
%%time %%capture
Deepfake Detection Challenge
7,694,669
y = train_df['y'] train_df.drop(['y'], axis=1, inplace=True )<categorify>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) len(test_videos )
Deepfake Detection Challenge
7,694,669
class MeanEncoding(BaseEstimator): def __init__(self, feature, C=0.1): self.C = C self.feature = feature def fit(self, X_train, y_train): df = pd.DataFrame({'feature': X_train[self.feature], 'target': y_train} ).dropna() self.global_mean = df.target.mean() mean = df.groupby('feature' ).target.mean() size = df.groupby('feature' ).target.size() self.encoding =(self.global_mean * self.C + mean * size)/(self.C + size) def transform(self, X_test): X_test[self.feature] = X_test[self.feature].map(self.encoding ).fillna(self.global_mean ).values return X_test def fit_transform(self, X_train, y_train): df = pd.DataFrame({'feature': X_train[self.feature], 'target': y_train} ).dropna() self.global_mean = df.target.mean() mean = df.groupby('feature' ).target.mean() size = df.groupby('feature' ).target.size() self.encoding =(self.global_mean * self.C + mean * size)/(self.C + size) X_train[self.feature] = X_train[self.feature].map(self.encoding ).fillna(self.global_mean ).values return X_train<categorify>
print("PyTorch version:", torch.__version__) print("CUDA version:", torch.version.cuda) print("cuDNN version:", torch.backends.cudnn.version() )
Deepfake Detection Challenge
7,694,669
for f in cat_cols: me = MeanEncoding(f, C=0.99) me.fit(train_df, y) train_df = me.transform(train_df) test_df = me.transform(test_df )<predict_on_test>
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
Deepfake Detection Challenge
7,694,669
km = KMeans(n_clusters=2, random_state=13) km.fit(pd.DataFrame(y)) y_clust = km.predict(pd.DataFrame(y))<count_values>
facedet = BlazeFace().to(device) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
7,694,669
pd.Series(y_clust ).value_counts(normalize=True )<split>
input_size = 256
Deepfake Detection Challenge
7,694,669
X_train, X_val, y_train, y_val, y_train_clust, y_val_clust = train_test_split( train_df, y, pd.Series(y_clust), test_size=0.25, stratify=y_clust, random_state=777 )<count_values>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
7,694,669
y_train_clust.value_counts(normalize=True )<normalization>
def disable_grad(model): for parameter in model.parameters() : parameter.requires_grad = False return model def normalize(img): y, x, _ = img.shape if y > x and x < 256: ratio_x = x / y ratio_y = y / x return cv2.resize(img,(256, int(ratio_y * 256))) elif y < x and y < 256: ratio_x = x / y ratio_y = y / x return cv2.resize(img,(int(ratio_x * 256), 256)) else: return cv2.resize(img,(256, 256)) def weight_preds(preds, weights): final_preds = [] for i in range(len(preds)) : for j in range(len(preds[i])) : if len(final_preds)!= len(preds[i]): final_preds.append(preds[i][j] * weights[i]) else: final_preds[j] += preds[i][j] * weights[i] return torch.FloatTensor(final_preds) def predict_faces(models, x, weigths, n): x = torch.tensor(x, device=device ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = 0 preds = [] for i in range(len(models)) : preds.append(models[i](x ).squeeze() [:n]) del x y_pred = torch.sigmoid(weight_preds(preds, weigths)).mean().item() return y_pred
Deepfake Detection Challenge
7,694,669
scaler = StandardScaler() scaler.fit(X_train) X_train_sc = pd.DataFrame(scaler.transform(X_train)) X_val_sc = pd.DataFrame(scaler.transform(X_val)) test_df_sc = pd.DataFrame(scaler.transform(test_df))<normalization>
frames_per_video = 32 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
7,694,669
pca = PCA(n_components=2) pca.fit(X_train_sc) train_pca_transformed = pca.transform(X_train_sc )<compute_train_metric>
class MetaModel(nn.Module): def __init__(self, models=None, device='cuda:0', extended=False): super(MetaModel, self ).__init__() self.extended = extended self.device = device self.models = models self.len = len(models) if self.extended: self.bn = nn.BatchNorm1d(self.len) self.relu = nn.ReLU() self.dropout = nn.Dropout(0.2) self.fc = nn.Linear(self.len, 1) def forward(self, x): x = torch.cat(tuple(x), dim=1) if self.extended: x = self.bn(x) x = self.relu(x) x = self.fc(x) return x
Deepfake Detection Challenge
7,694,669
lasso = LassoCV(max_iter=9999) lasso.fit(X_train_sc, y_train) lasso_train_pred = lasso.predict(X_train_sc) lasso_val_pred = lasso.predict(X_val_sc) print('train', metrics.r2_score(y_train, lasso_train_pred), 'val', metrics.r2_score(y_val, lasso_val_pred))<compute_train_metric>
MODELS_PATH = "/kaggle/input/deepfake-detection-model-20k/" WEIGTHS_EXT = '.pth' models = [] weigths = [] raw_data_stack = \ [ ['0.8548137313946486 0.3376769562025044', 'efficientnet-b2'], ['EfficientNetb3 0.8573518024606384 0.34558522378585194', 'efficientnet-b3'], ['EfficientNetb4 0.8579110384582294 0.3383911053075265', 'efficientnet-b4'], ['EfficientNet6 0.8602770369095758 0.33193617861157143', 'efficientnet-b6'], ['EfficientNetb0 t2 0.8616966359803837 0.3698434531609828', 'efficientnet-b0'], ['EfficientNetb1 t2 0.8410909403768391 0.36058002083572327', 'efficientnet-b1'], ['EfficientNetb2 t2 0.8659554331928073 0.35598630783834084', 'efficientnet-b2'], ['EfficientNetb3 t2 0.8486191172674868 0.3611779548592305', 'efficientnet-b3'], ['EfficientNetb3 0.8635894347414609 0.328333642473084', 'efficientnet-b3'], ['EfficientNetb6 0.8593736556826981 0.32286693639934694', 'efficientnet-b6'], ['tf_efficientnet_b1_ns 0.8571367116923342 0.3341234226295108', 'tf_efficientnet_b1_ns'], ['tf_efficientnet_b3_ns 0.8712466660930913 0.3277394129117183', 'tf_efficientnet_b3_ns'], ['tf_efficientnet_b4_ns 0.8708595027101437 0.3152573955405342', 'tf_efficientnet_b4_ns'], ['tf_efficientnet_b6_ns 0.8733115374688118 0.3156576980666498', 'tf_efficientnet_b6_ns'], ] stack_models = [] for raw_model in raw_data_stack: checkpoint = torch.load(MODELS_PATH + raw_model[0] + WEIGTHS_EXT, map_location=device) if '-' in raw_model[1]: model = EfficientNet.from_name(raw_model[1]) model._fc = nn.Linear(model._fc.in_features, 1) else: model = timm.create_model(raw_model[1], pretrained=False) model.classifier = nn.Linear(model.classifier.in_features, 1) model.load_state_dict(checkpoint) _ = model.eval() _ = disable_grad(model) model = model.to(device) stack_models.append(model) del checkpoint, model meta_models = \ [ ['MetaModel 0.30638167556896007', slice(4, 8), False, 0.37780], ['MetaModel 0.2919331893755284', slice(0, 4), False, 0.33357], ['MetaModel 0.30281482560578044', slice(0, 8, None), True, 0.34077], ['MetaModel 0.26302117601197256', slice(0, 10, None), False, 0.35134], ['MetaModel 0.256337642808031', slice(10, 14, None), False, 0.32698], ['MetaModel 0.264787397152165', slice(0, 14, None), False, 0.34974] ] for meta_raw in meta_models: checkpoint = torch.load(MODELS_PATH + meta_raw[0] + WEIGTHS_EXT, map_location=device) model = MetaModel(models=raw_data_stack[meta_raw[1]], extended=meta_raw[2] ).to(device) model.load_state_dict(checkpoint) _ = model.eval() _ = disable_grad(model) model.to(device) models.append(model) weigths.append(meta_raw[3]) del model, checkpoint total = sum([1-score for score in weigths]) weigths = [(1-score)/ total for score in weigths]
Deepfake Detection Challenge
7,694,669
ridge = RidgeCV() ridge.fit(X_train_sc, y_train) ridge_train_pred = ridge.predict(X_train_sc) ridge_val_pred = ridge.predict(X_val_sc) print('train', metrics.r2_score(y_train, ridge_train_pred), 'val', metrics.r2_score(y_val, ridge_val_pred))<compute_train_metric>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = normalize(face) resized_face = torchvision.transforms.CenterCrop(( input_size, input_size))(Image.fromarray(resized_face)) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) del faces if n > 0: x = torch.tensor(x, device=device ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = 0 stacked_preds = [] preds = [] for i in range(len(stack_models)) : stacked_preds.append(stack_models[i](x ).squeeze() [:n].unsqueeze(dim=1)) for i in range(len(models)) : preds.append(models[i](stacked_preds[meta_models[i][1]])) del x, stacked_preds y_pred = torch.sigmoid(weight_preds(preds, weigths)).mean().item() del preds return y_pred except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.5
Deepfake Detection Challenge
7,694,669
enet = ElasticNetCV() enet.fit(X_train_sc, y_train) enet_train_pred = enet.predict(X_train_sc) enet_val_pred = enet.predict(X_val_sc) print('train', metrics.r2_score(y_train, enet_train_pred), 'val', metrics.r2_score(y_val, enet_val_pred))<compute_train_metric>
def predict_on_video_single(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = cv2.resize(face,(input_size, input_size)) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) del faces if n > 0: x = torch.tensor(x, device=device ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : stacked_preds = [] preds = [] for i in range(len(stack_models)) : stacked_preds.append(stack_models[i](x ).squeeze() [:n].unsqueeze(dim=1)) del x y_pred = torch.sigmoid(models[-1](stacked_preds)).mean().item() return y_pred except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.5
Deepfake Detection Challenge
7,694,669
huber = HuberRegressor(alpha=0.05) huber.fit(X_train_sc, y_train) huber_train_pred = huber.predict(X_train_sc) huber_val_pred = huber.predict(X_val_sc) print('train', metrics.r2_score(y_train, huber_train_pred), 'val', metrics.r2_score(y_val, huber_val_pred))<compute_train_metric>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
7,694,669
rf = RandomForestRegressor(n_estimators=5) rf.fit(X_train_sc, y_train) rf_train_pred = rf.predict(X_train_sc) rf_val_pred = rf.predict(X_val_sc) print('train', metrics.r2_score(y_train, rf_train_pred), 'val', metrics.r2_score(y_val, rf_val_pred))<predict_on_test>
speed_test = False
Deepfake Detection Challenge
7,694,669
sub_df['y'] = np.round(np.exp(lasso.predict(test_df_sc)) , 4 )<save_to_csv>
if speed_test: start_time = time.time() speedtest_videos = test_videos[:5] predictions = predict_on_video_set(speedtest_videos, num_workers=4) elapsed = time.time() - start_time print("Elapsed %f min.Average per video: %f sec." %(elapsed / 60, elapsed / len(speedtest_videos)) )
Deepfake Detection Challenge
7,694,669
sub_df.to_csv('sub.csv', index=False )<set_options>
predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
7,694,669
plt.ion()<load_from_csv>
submission_df = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df.to_csv("submission.csv", index=False )
Deepfake Detection Challenge
7,694,669
<define_variables><EOS>
!rm -r reader && rm install.sh
Deepfake Detection Challenge
8,531,644
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<categorify>
%%capture !pip install /kaggle/input/facenet-pytorch-vggface2/facenet_pytorch-2.2.9-py3-none-any.whl !pip install /kaggle/input/xt-training/pynvml-8.0.4-py3-none-any.whl !pip install /kaggle/input/xt-training/xt_training-1.4.0-py3-none-any.whl !pip install /kaggle/input/imageio-ffmpeg/imageio_ffmpeg-0.3.0-py3-none-manylinux2010_x86_64.whl !pip install /kaggle/input/imutils/imutils-0.5.3/ !cp -R /kaggle/input/xtract-ai-dfdc/dfdc./
Deepfake Detection Challenge
8,531,644
class ImageData(Dataset): def __init__(self, df, data_dir, transform): super().__init__() self.df = df self.data_dir = data_dir self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, index): img_name = self.df.id[index] label = self.df.has_cactus[index] img_path = os.path.join(self.data_dir, img_name) image = mpimg.imread(img_path) image = self.transform(image) return image, label<set_options>
import os import glob import numpy as np import pandas as pd import torch from torch.utils.data import DataLoader, Subset from torch import optim from tqdm.notebook import tqdm from matplotlib import pyplot as plt import albumentations as A from tqdm.notebook import tqdm from xt_training import metrics, Runner from xt_training.runner import Logger from dfdc.datasets.video_dataset import VideoDataset from dfdc.models.video_models import FaceSequenceClassifier, FaceClassifier
Deepfake Detection Challenge
8,531,644
epochs = 15 batch_size = 20 device = torch.device('cuda:0' )<categorify>
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print('Running on device: {}'.format(device))
Deepfake Detection Challenge
8,531,644
data_transf = transforms.Compose([transforms.ToPILImage() , transforms.ToTensor() ]) train_data = ImageData(df = df, data_dir = train_dir, transform = data_transf) train_loader = DataLoader(dataset = train_data, batch_size = batch_size )<set_options>
class Ensemble(torch.nn.Module): def __init__(self, unpack=None, permute=False, mapping=None, **kwargs): super().__init__() self.permute = permute self.mapping = mapping if unpack is None: self.unpack = lambda x, i: x else: self.unpack = unpack for name, model in kwargs.items() : self.add_module(name, model) def forward(self, x): out = [] for i, layer in enumerate(self._modules.values()): if self.mapping is None: in_i = i else: in_i = self.mapping[i] x_i = self.unpack(x, in_i) out.append(layer(x_i)) out = torch.stack(out) if self.permute: out = out.permute(( 1, 0, 2)) return out
Deepfake Detection Challenge
8,531,644
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )<choose_model_class>
face_model1 = FaceClassifier(pretrained=False, base_model='resnext') face_model1.load_state_dict(torch.load('/kaggle/input/face-sequence-classifier/face_model_best_alltrain_moreaug_20200317.pt')) face_model1.classifier.fc = torch.nn.Sequential() face_model2 = FaceClassifier(pretrained=False, base_model='resnet') face_model2.load_state_dict(torch.load('/kaggle/input/face-sequence-classifier/face_model_best_lessaug_resnet101_20200321.pt')) face_model2.classifier.fc = torch.nn.Sequential() face_model3 = FaceClassifier(pretrained=False, base_model='resnext') face_model3.load_state_dict(torch.load('/kaggle/input/face-sequence-classifier/face_model_best_20200314.pt')) face_model3.classifier.fc = torch.nn.Sequential() face_model4 = FaceClassifier(pretrained=False, base_model='resnext') face_model4.load_state_dict(torch.load('/kaggle/input/face-sequence-classifier/face_model_best_20200223.pt')) face_model4.classifier.fc = torch.nn.Sequential() face_model = Ensemble( unpack=None, m1=face_model1, m2=face_model2, m3=face_model3, m4=face_model4, ) face_model.to(device) face_model.eval() @torch.no_grad() def face_model_transform(x): return face_model(x.to(device))
Deepfake Detection Challenge
8,531,644
model = models.resnet50(pretrained=True) model.cuda() optimizer = optim.Adam(model.parameters() , lr=0.001) loss_func = nn.CrossEntropyLoss()<train_model>
test_trans = A.ReplayCompose([ A.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) , A.Resize(160, 160, always_apply=True) ]) video_root = '/kaggle/input/deepfake-detection-challenge/' test_dataset = VideoDataset( video_root, transform=test_trans, out_transform=face_model_transform, is_test=True, sample_frames=-1, shuffle=False, stride=10, n_frames=-1, device=device, reader='imutils', path_include='test_videos/', ) batch_size = 1 num_workers = 0 test_loader = DataLoader( test_dataset, batch_size=batch_size, num_workers=num_workers )
Deepfake Detection Challenge
8,531,644
%%time for epoch in tqdm(range(epochs)) : for i,(images, labels)in enumerate(train_loader): images = images.to(device) labels = labels.to(device) outputs = model(images) loss = loss_func(outputs, labels) optimizer.zero_grad() loss.backward() optimizer.step() if(i+1)% 500 == 0: print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, epochs, loss.item()))<load_from_csv>
video_model1 = FaceSequenceClassifier(mode='linear') video_model1.load_state_dict(torch.load('/kaggle/input/face-sequence-classifier/video_model_best_alltrain_lessaug_20200319.pt')) video_model2 = FaceSequenceClassifier(mode='linear') video_model2.load_state_dict(torch.load('/kaggle/input/face-sequence-classifier/video_model_best_lessaug_resnet101_20200321.pt')) video_model3 = FaceSequenceClassifier(mode='linear') video_model3.load_state_dict(torch.load('/kaggle/input/face-sequence-classifier/video_model_best_ep8_20200314.pt')) video_model4 = FaceSequenceClassifier(mode='linear') video_model4.load_state_dict(torch.load('/kaggle/input/face-sequence-classifier/video_model_best_20200223.pt')) video_model5 = FaceSequenceClassifier(mode='conv') video_model5.load_state_dict(torch.load('/kaggle/input/face-sequence-classifier/video_model_best_alltrain_lessaug_conv_20200319.pt')) scales = [1, 1, 1, 1, 0.5] def unpack(x, i): return [x[0][:, i], x[1]] video_model = Ensemble( unpack=unpack, permute=True, m1=video_model1, m2=video_model2, m3=video_model3, m4=video_model4, m5=video_model5, mapping=[0, 1, 2, 3, 0] ) video_model.to(device) video_model.eval()
Deepfake Detection Challenge
8,531,644
submit = pd.read_csv('.. /input/sample_submission.csv') test_data = ImageData(df = submit, data_dir = test_dir, transform = data_transf) test_loader = DataLoader(dataset = test_data, shuffle=False )<categorify>
sample_dataset = VideoDataset( video_root, transform=test_trans, out_transform=face_model_transform, is_test=True, sample_frames=-1, shuffle=False, stride=10, n_frames=-1, device=device, reader='imutils', path_include='train_sample_videos/', ) sample_dataset.samples[0] =('/kaggle/input/deepfake-detection-challenge/train_sample_videos/aagfhgtpmv.mp4', 1) sample_loader = DataLoader( sample_dataset, batch_size=batch_size, num_workers=num_workers ) for x, y in sample_loader: break print(x[0].abs().mean()) x = [x_i.to(device)for x_i in x] y.to(device) with torch.no_grad() : y_pred = video_model(x) for i, scale in enumerate(scales): y_pred[:, i] = y_pred[:, i] * scale y_pred
Deepfake Detection Challenge
8,531,644
predict = [] for batch_i,(data, target)in enumerate(test_loader): data, target = data.to(device), target.to(device) output = model(data) _, pred = torch.max(output.data, 1) predict.append(pred )<save_to_csv>
runner = Runner(model=video_model, device=device) y_pred, _ = runner(test_loader, 'test', return_preds=True )
Deepfake Detection Challenge
8,531,644
<import_modules><EOS>
for i, scale in enumerate(scales): y_pred[:, i] = y_pred[:, i] * scale labels = torch.nn.functional.softmax(y_pred.mean(dim=1), dim=1)[:, 1].numpy() filenames = [os.path.basename(f)for f in test_dataset.video_files] submission = pd.DataFrame({'filename': filenames, 'label': labels}) submission.to_csv('submission.csv', index=False) plt.hist(submission.label, 30) submission
Deepfake Detection Challenge
8,546,370
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<load_from_csv>
%matplotlib inline warnings.filterwarnings("ignore" )
Deepfake Detection Challenge
8,546,370
train = pd.read_csv('.. /input/tabular-playground-series-jan-2021/train.csv') test = pd.read_csv('.. /input/tabular-playground-series-jan-2021/test.csv') sub = pd.read_csv('.. /input/tabular-playground-series-jan-2021/sample_submission.csv' )<define_variables>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) frame_h = 5 frame_l = 5 len(test_videos )
Deepfake Detection Challenge