kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
7,930,779
train.drop(['PID'], axis=1, inplace=True )<load_from_csv>
frames_per_video = 64 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
7,930,779
df_test=pd.read_csv(".. /input/house-prices-advanced-regression-techniques/test.csv" )<count_missing_values>
input_size = 224
Deepfake Detection Challenge
7,930,779
df_test.isna().sum()<load_from_csv>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
7,930,779
submission = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/sample_submission.csv' )<load_from_csv>
class MyResNeXt(models.resnet.ResNet): def __init__(self, training=True): super(MyResNeXt, self ).__init__(block=models.resnet.Bottleneck, layers=[3, 4, 6, 3], groups=32, width_per_group=4) self.fc = nn.Linear(2048, 1 )
Deepfake Detection Challenge
7,930,779
df_train=pd.read_csv(".. /input/house-prices-advanced-regression-techniques/train.csv" )<drop_column>
checkpoint = torch.load("/kaggle/input/deepfakes-inference-demo/resnext.pth", map_location=gpu) model = MyResNeXt().to(gpu) model.load_state_dict(checkpoint) _ = model.eval() del checkpoint
Deepfake Detection Challenge
7,930,779
missing = df_test.isnull().sum() missing = missing[missing>0] train.drop(missing.index, axis=1, inplace=True) train.drop(['Electrical'], axis=1, inplace=True) df_test.dropna(axis=1, inplace=True) df_test.drop(['Electrical'], axis=1, inplace=True )<count_duplicates>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.5
Deepfake Detection Challenge
7,930,779
test = tqdm(range(0, len(df_test)) , desc='Matching') for i in test: for j in range(0, len(train)) : for k in range(1, len(df_test.columns)) : if df_test.iloc[i,k] == train.iloc[j,k]: continue else: break else: submission.iloc[i, 1] = train.iloc[j, -1] break test.close()<save_to_csv>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
7,930,779
submission.to_csv('enes_results.csv', index=False )<import_modules>
speed_test = False
Deepfake Detection Challenge
7,930,779
!pip install contractions nltk.download("stopwords") nltk.download('punkt') nltk.download('wordnet') for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) train = pd.read_csv('/kaggle/input/jigsaw-toxic-comment-classification-challenge/train.csv.zip') test = pd.read_csv('/kaggle/input/jigsaw-toxic-comment-classification-challenge/test.csv.zip') test_label = pd.read_csv('/kaggle/input/jigsaw-toxic-comment-classification-challenge/test_labels.csv.zip') sub=pd.read_csv('/kaggle/input/jigsaw-toxic-comment-classification-challenge/sample_submission.csv.zip' )<concatenate>
if speed_test: start_time = time.time() speedtest_videos = test_videos[:5] predictions = predict_on_video_set(speedtest_videos, num_workers=4) elapsed = time.time() - start_time print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) )
Deepfake Detection Challenge
7,930,779
class_names = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] train_text = train['comment_text'] test_text = test['comment_text'] all_text = pd.concat([train_text, test_text] )<string_transform>
predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
7,930,779
<feature_engineering><EOS>
submission_df = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df.to_csv("submission.csv", index=False )
Deepfake Detection Challenge
7,808,440
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<categorify>
%matplotlib inline
Deepfake Detection Challenge
7,808,440
if False: word_vectorizer = pickle.load(open("word_vectorizer.pk", "rb")) char_vectorizer = pickle.load(open("char_vectorizer.pk", "rb")) train_word_features = word_vectorizer.transform(train_text) test_word_features = word_vectorizer.transform(test_text) train_char_features = char_vectorizer.transform(train_text) test_char_features = char_vectorizer.transform(test_text) train_features = hstack([train_char_features, train_word_features]) test_features = hstack([test_char_features, test_word_features] )<drop_column>
frames_per_vid = [17, 25, 30, 32, 35, 36, 38, 39, 40, 49, 56, 64, 72, 80, 81, 82, 100] public_LB = [0.46788, 0.46776, 0.46611, 0.46542, 0.46643, 0.46484, 0.46444, 0.46603, 0.46635, 0.46620, 0.46481, 0.46441, 0.46559, 0.46518, 0.46453, 0.46482, 0.46495] df_viz = pd.DataFrame({'frames_per_vid': frames_per_vid, 'public_LB':public_LB} )
Deepfake Detection Challenge
7,808,440
del train_word_features, test_word_features,train_char_features, test_char_features, del word_vectorizer, char_vectorizer<create_dataframe>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) frame_h = 5 frame_l = 5 len(test_videos )
Deepfake Detection Challenge
7,808,440
%%time scores = [] submission = pd.DataFrame.from_dict({'id': test['id']}) for class_name in class_names: train_target = train[class_name] classifier = LogisticRegression(solver='sag',n_jobs=-1) cv_score = np.mean(cross_val_score(classifier, train_features, train_target, cv=3, scoring='roc_auc')) scores.append(cv_score) print('CV score for class {} is {}'.format(class_name, cv_score)) classifier.fit(train_features, train_target) submission[class_name] = classifier.predict_proba(test_features)[:, 1] with open(f'logreg_{class_name}.pk', 'wb')as f: pickle.dump(classifier, f) print('Total CV score is {}'.format(np.mean(scores))) submission.to_csv('submission.csv', index=False )<import_modules>
print("PyTorch version:", torch.__version__) print("CUDA version:", torch.version.cuda) print("cuDNN version:", torch.backends.cudnn.version() )
Deepfake Detection Challenge
7,808,440
import tensorflow as tf from tensorflow.keras import layers import os import re import math import numpy as np import matplotlib.pyplot as plt import pandas as pd<define_variables>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") gpu
Deepfake Detection Challenge
7,808,440
GCS_DS_PATH = ".. /input/ranzcr-clip-catheter-line-classification"<load_from_csv>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
7,808,440
train_df = pd.read_csv(GCS_DS_PATH+"/train.csv") train_df.index = train_df["StudyInstanceUID"] del train_df["StudyInstanceUID"] train_annot_df = pd.read_csv(GCS_DS_PATH+"/train_annotations.csv") train_annot_df.index = train_annot_df["StudyInstanceUID"] del train_annot_df["StudyInstanceUID"]<define_variables>
frames_per_video = 65 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
7,808,440
classes = list(train_df.columns[:-1]) classes_normal= [name for name in classes[:-1] if name.split(" - ")[1] == "Normal"] classes_abnormal= [name for name in classes[:-1] if name.split(" - ")[1] == "Abnormal"] classes_borderline = [name for name in classes[:-1] if name.split(" - ")[1] == "Borderline"] classes_count = train_df[classes].sum(axis = 0) num_classes = len(classes_count) print("Number of Classes: {}".format(num_classes)) classes_count<define_variables>
input_size = 224
Deepfake Detection Challenge
7,808,440
class_weights = {} ls = list(classes_count.values) tot_samples = sum(ls) for i in range(num_classes): class_weights[i] = tot_samples/(num_classes*ls[i]) class_weights<count_values>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
7,808,440
patient_ids = train_df["PatientID"].unique() patientwise_count = train_df['PatientID'].value_counts() num_patients = len(patientwise_count) print("Number of patients: ",num_patients) patientwise_count<define_variables>
class MyResNeXt(models.resnet.ResNet): def __init__(self, training=True): super(MyResNeXt, self ).__init__(block=models.resnet.Bottleneck, layers=[3, 4, 6, 3], groups=32, width_per_group=4) self.fc = nn.Linear(2048, 1 )
Deepfake Detection Challenge
7,808,440
IMAGE_SIZE = [600,600] AUTO = tf.data.experimental.AUTOTUNE TEST_FILENAMES = tf.io.gfile.glob(GCS_DS_PATH + '/test_tfrecords/*.tfrec' )<categorify>
checkpoint = torch.load("/kaggle/input/deepfakes-inference-demo/resnext.pth", map_location=gpu) model = MyResNeXt().to(gpu) model.load_state_dict(checkpoint) _ = model.eval() del checkpoint
Deepfake Detection Challenge
7,808,440
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.image.resize(image, [*IMAGE_SIZE]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "StudyInstanceUID" : tf.io.FixedLenFeature([], tf.string), "image" : tf.io.FixedLenFeature([], tf.string), "ETT - Abnormal" : tf.io.FixedLenFeature([], tf.int64), "ETT - Borderline" : tf.io.FixedLenFeature([], tf.int64), "ETT - Normal" : tf.io.FixedLenFeature([], tf.int64), "NGT - Abnormal" : tf.io.FixedLenFeature([], tf.int64), "NGT - Borderline" : tf.io.FixedLenFeature([], tf.int64), "NGT - Incompletely Imaged" : tf.io.FixedLenFeature([], tf.int64), "NGT - Normal" : tf.io.FixedLenFeature([], tf.int64), "CVC - Abnormal" : tf.io.FixedLenFeature([], tf.int64), "CVC - Borderline" : tf.io.FixedLenFeature([], tf.int64), "CVC - Normal" : tf.io.FixedLenFeature([], tf.int64), "Swan Ganz Catheter Present" : tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = [example['ETT - Abnormal'], example['ETT - Borderline'], example['ETT - Normal'], example['NGT - Abnormal'], example['NGT - Borderline'], example['NGT - Incompletely Imaged'], example['NGT - Normal'], example['CVC - Abnormal'], example['CVC - Borderline'], example['CVC - Normal'], example['Swan Ganz Catheter Present']] label = [tf.cast(i,tf.float32)for i in label] return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "StudyInstanceUID" : tf.io.FixedLenFeature([], tf.string), "image" : tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['StudyInstanceUID'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset<split>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.5
Deepfake Detection Challenge
7,808,440
def data_augment(image, label): image = tf.image.random_flip_left_right(image) return image,label def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) print('Dataset: {} unlabeled test images'.format(NUM_TEST_IMAGES))<train_model>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
7,808,440
BATCH_SIZE = 16 * strategy.num_replicas_in_sync test_ds = get_test_dataset() print("Test:", test_ds )<install_modules>
speed_test = False
Deepfake Detection Challenge
7,808,440
!pip install /kaggle/input/kerasapplications -q !pip install /kaggle/input/efficientnet-keras-source-code/ -q --no-deps <load_pretrained>
if speed_test: start_time = time.time() speedtest_videos = test_videos[:5] predictions = predict_on_video_set(speedtest_videos, num_workers=4) elapsed = time.time() - start_time print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) )
Deepfake Detection Challenge
7,808,440
model = tf.keras.models.load_model(".. /input/ranzcr-clip-tpu/model.h5" )<save_to_csv>
predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
7,808,440
<set_options><EOS>
submission_df = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df.to_csv("submission.csv", index=False )
Deepfake Detection Challenge
7,955,513
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<train_model>
%matplotlib inline
Deepfake Detection Challenge
7,955,513
print('Train images: %d' %len(os.listdir(os.path.join(WORK_DIR, "train"))))<load_from_csv>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) len(test_videos )
Deepfake Detection Challenge
7,955,513
train = pd.read_csv(os.path.join(WORK_DIR, "train.csv")) train_images = WORK_DIR + "/train/" + train['StudyInstanceUID'] + '.jpg' ss = pd.read_csv(os.path.join(WORK_DIR, 'sample_submission.csv')) test_images = WORK_DIR + "/test/" + ss['StudyInstanceUID'] + '.jpg' label_cols = ss.columns[1:] labels = train[label_cols].values train_annot = pd.read_csv(os.path.join(WORK_DIR, "train_annotations.csv")) print('Labels: ', '*'*20, ' ', label_cols.values) print('*'*50) train.head()<define_variables>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") gpu
Deepfake Detection Challenge
7,955,513
BATCH_SIZE = 8 * 1 STEPS_PER_EPOCH = len(train)* 0.85 / BATCH_SIZE VALIDATION_STEPS = len(train)* 0.15 / BATCH_SIZE EPOCHS = 30 TARGET_SIZE = 750<categorify>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
7,955,513
def build_decoder(with_labels = True, target_size =(TARGET_SIZE, TARGET_SIZE), ext = 'jpg'): def decode(path): file_bytes = tf.io.read_file(path) if ext == 'png': img = tf.image.decode_png(file_bytes, channels = 3) elif ext in ['jpg', 'jpeg']: img = tf.image.decode_jpeg(file_bytes, channels = 3) else: raise ValueError("Image extension not supported") img = tf.cast(img, tf.float32)/ 255.0 img = tf.image.resize(img, target_size) return img def decode_with_labels(path, label): return decode(path), label return decode_with_labels if with_labels else decode def build_augmenter(with_labels = True): def augment(img): img = tf.image.random_flip_left_right(img) img = tf.image.random_flip_up_down(img) img = tf.image.adjust_brightness(img, 0.1) return img def augment_with_labels(img, label): return augment(img), label return augment_with_labels if with_labels else augment def build_dataset(paths, labels = None, bsize = 32, cache = True, decode_fn = None, augment_fn = None, augment = True, repeat = True, shuffle = 1024, cache_dir = ""): if cache_dir != "" and cache is True: os.makedirs(cache_dir, exist_ok=True) if decode_fn is None: decode_fn = build_decoder(labels is not None) if augment_fn is None: augment_fn = build_augmenter(labels is not None) AUTO = tf.data.experimental.AUTOTUNE slices = paths if labels is None else(paths, labels) dset = tf.data.Dataset.from_tensor_slices(slices) dset = dset.map(decode_fn, num_parallel_calls = AUTO) dset = dset.cache(cache_dir)if cache else dset dset = dset.map(augment_fn, num_parallel_calls = AUTO)if augment else dset dset = dset.repeat() if repeat else dset dset = dset.shuffle(shuffle)if shuffle else dset dset = dset.batch(bsize ).prefetch(AUTO) return dset<create_dataframe>
frames_per_video = 150 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
7,955,513
test_df = build_dataset( test_images, bsize = BATCH_SIZE, repeat = False, shuffle = False, augment = False, cache = False) test_df<choose_model_class>
input_size =224
Deepfake Detection Challenge
7,955,513
<choose_model_class>
mean = [0.43216, 0.394666, 0.37645] std = [0.22803, 0.22145, 0.216989] normalize_transform = Normalize(mean,std )
Deepfake Detection Challenge
7,955,513
print('Our Xception CNN has %d layers' %len(model.layers))<create_dataframe>
class MyResNeXt(models.resnet.ResNet): def __init__(self, training=True): super(MyResNeXt, self ).__init__(block=models.resnet.Bottleneck, layers=[3, 4, 6, 3], groups=32, width_per_group=4) self.fc = nn.Linear(2048, 1 )
Deepfake Detection Challenge
7,955,513
img_tensor = build_dataset( pd.Series(test_images[0]), bsize = 1,repeat = False, shuffle = False, augment = False, cache = False )<save_to_csv>
checkpoint = torch.load("/kaggle/input/deepfakes-inference-demo/resnext.pth", map_location=gpu) model = MyResNeXt().to(gpu) model.load_state_dict(checkpoint) _ = model.eval() del checkpoint
Deepfake Detection Challenge
7,955,513
ss[label_cols] = model.predict(test_df, verbose = 1) ss.to_csv('submission.csv', index = False )<define_variables>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.5
Deepfake Detection Challenge
7,955,513
class CFG: device = 'GPU' cpu_workers = 2 debug = True seed = 13353 batch_size = 50 num_tta = 2 num_folds = 3 fold_idx = False fold_blend = 'pmean' model_blend = 'pmean' power = 1/11 w_public = 0.25 lgb_folds = 5 label_features = False sort_targets = True pred_as_feature = True lgb_stop_rounds = 200 lgb_params = {'objective': 'binary', 'metrics': 'auc', 'n_estimators': 10000, 'learning_rate': 0.01, 'num_leaves': 8, 'max_depth': 5, 'min_child_samples': 20, 'subsample': 0.3, 'colsample_bytree': 0.5, 'reg_alpha': 0.1, 'reg_lambda': 0.1, 'silent': True, 'verbosity': -1, 'n_jobs' : -1, 'random_state': 13353} data_path = '/kaggle/input/ranzcr-clip-catheter-line-classification/' models = ['/kaggle/input/ranzcr-v12/', '/kaggle/input/ranzcr-v15-pub/', '/kaggle/input/ranzcr-v17-pub/', '/kaggle/input/ranzcr-v13-pub/', '/kaggle/input/ranzcr-v14-pub/']<drop_column>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
7,955,513
CFG = dict(vars(CFG)) for key in ['__dict__', '__doc__', '__module__', '__weakref__']: del CFG[key]<load_pretrained>
speed_test = False
Deepfake Detection Challenge
7,955,513
CFGs = [] for model in CFG['models']: model_cfg = pickle.load(open(model + 'configuration.pkl', 'rb')) CFGs.append(model_cfg) print('Numer of models:', len(CFGs))<set_options>
if speed_test: start_time = time.time() speedtest_videos = test_videos[:5] predictions = predict_on_video_set(speedtest_videos, num_workers=4) elapsed = time.time() - start_time print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) )
Deepfake Detection Challenge
7,955,513
pd.set_option('display.max_columns', 100) ImageFile.LOAD_TRUNCATED_IMAGES = True %matplotlib inline warnings.filterwarnings('ignore') sys.path.append('.. /input/timm-pytorch-image-models/pytorch-image-models-master') <train_model>
predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
7,955,513
<compute_test_metric><EOS>
submission_df = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df.to_csv("submission.csv", index=False )
Deepfake Detection Challenge
8,684,188
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<load_from_csv>
!pip install.. /input/kaggle-efficientnet-repo/efficientnet-1.0.0-py3-none-any.whl
Deepfake Detection Challenge
8,684,188
df = pd.read_csv(CFG['data_path'] + 'sample_submission.csv') CFG['targets'] = ['ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present'] CFG['num_classes'] = len(CFG['targets']) if CFG['debug'] and len(df)== 3582: print('Subsetting data in the debug mode...') df = df.head(CFG['batch_size']) print(df.shape )<load_from_csv>
import pandas as pd import tensorflow as tf import cv2 import glob from tqdm.notebook import tqdm import numpy as np import os from keras.layers import * from keras import Model import matplotlib.pyplot as plt import time from keras.applications.xception import Xception import efficientnet.keras as efn
Deepfake Detection Challenge
8,684,188
for m in CFG['models']: tmp_train_preds = pd.read_csv(m + '/oof.csv') tmp_train_preds.columns = ['StudyInstanceUID'] + CFG['targets'] + ['PatientID', 'fold'] + [m + ' ' + c for c in CFG['targets']] if m == CFG['models'][0]: train_preds = tmp_train_preds else: train_preds = train_preds.merge(tmp_train_preds[['StudyInstanceUID'] + [m + ' ' + c for c in CFG['targets']]], how = 'left', on = 'StudyInstanceUID') weights = [] for model_idx, m in enumerate(CFG['models']): score = 0 for fold_idx in range(5): tmp_train_preds = train_preds.loc[train_preds['fold'] == fold_idx] score += get_score(tmp_train_preds[CFG['targets']].values, tmp_train_preds[[m + ' ' + c for c in CFG['targets']]].values)[0] / 5 weights.append(score) sorted_ids = list(np.argsort(np.array(weights))) sorted_weights = [weights[i] for i in sorted_ids] CFG['models'] = [CFG['models'][i] for i in sorted_ids] CFGs = [CFGs[i] for i in sorted_ids] print('-' * 45) print('{:<5}{:<33}{:>5}'.format('ID', 'Model', 'AUC')) print('-' * 45) for model_idx, m in enumerate(CFG['models']): print('{:<5}{:<33}{:.4f}'.format(model_idx + 1, m, sorted_weights[model_idx])) print('-' * 45 )<categorify>
import torch import torch.nn as nn import torch.nn.functional as F
Deepfake Detection Challenge
8,684,188
for c in CFG['targets']: class_preds = train_preds.filter(like = 'kaggle' ).filter(like = c ).columns for blend in ['amean', 'median', 'gmean', 'pmean', 'rmean']: train_preds[blend + ' ' + c] = compute_blend(train_preds, class_preds, blend, CFG) for blend in ['amean', 'median', 'gmean', 'pmean', 'rmean']: train_preds['w' + blend + ' ' + c] = compute_blend(train_preds, class_preds, blend, CFG, weights = np.array(sorted_weights)) print('-' * 18) print('{:<10}{:>5}'.format('Blend', 'AUC')) print('-' * 18) for blend in ['amean', 'median', 'gmean', 'pmean', 'rmean']: score = 0 for fold_idx in range(5): tmp_train_preds = train_preds.loc[train_preds['fold'] == fold_idx] score += get_score(tmp_train_preds[CFG['targets']].values, tmp_train_preds[[blend + ' ' + c for c in CFG['targets']]].values)[0] / 5 print('{:<10}{:>5.4f}'.format(blend, score)) print('-' * 18) for blend in ['amean', 'median', 'gmean', 'pmean', 'rmean']: score = 0 for fold_idx in range(5): tmp_train_preds = train_preds.loc[train_preds['fold'] == fold_idx] score += get_score(tmp_train_preds[CFG['targets']].values, tmp_train_preds[['w' + blend + ' ' + c for c in CFG['targets']]].values)[0] / 5 print('{:<10}{:>5.4f}'.format('w' + blend, score)) print('-' * 18 )<create_dataframe>
print("PyTorch version:", torch.__version__) print("CUDA version:", torch.version.cuda) print("cuDNN version:", torch.backends.cudnn.version() )
Deepfake Detection Challenge
8,684,188
def get_dataset(CFG): class ImageData(Dataset): def __init__(self, df, path, transform = None, labeled = False, indexed = False): self.df = df self.path = path self.transform = transform self.labeled = labeled self.indexed = indexed def __len__(self): return len(self.df) def __getitem__(self, idx): path = os.path.join(self.path, self.df.iloc[idx]['StudyInstanceUID'] + '.jpg') image = cv2.imread(path, cv2.IMREAD_GRAYSCALE) if image is None: raise FileNotFoundError(path) mask = image > 0 image = image[np.ix_(mask.any(1), mask.any(0)) ] image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform is not None: image = self.transform(image = image)['image'] if CFG['normalize'] == 'public': image = image.astype(np.float32) image = image.transpose(2, 0, 1) image = torch.tensor(image ).float() if self.labeled: label = torch.tensor(self.df.iloc[idx][CFG['targets']] ).float() if self.indexed: idx = torch.tensor(idx) return idx, image, label else: return image, label else: return image return ImageData<choose_model_class>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") gpu
Deepfake Detection Challenge
8,684,188
def get_model(CFG, device, num_classes): if CFG['weights'] != 'public': model = timm.create_model(model_name = CFG['backbone'], pretrained = False, in_chans = CFG['channels']) if 'efficient' in CFG['backbone']: model.classifier = nn.Linear(model.classifier.in_features, num_classes) else: model.fc = nn.Linear(model.fc.in_features, num_classes) else: class CustomModel(nn.Module): def __init__(self, model_name = 'resnet200d', out_dim = 11, pretrained = False): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, out_dim) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output model = CustomModel(CFG['backbone'], num_classes, True) return model<create_dataframe>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) len(test_videos )
Deepfake Detection Challenge
8,684,188
cv_start = time.time() gc.collect() all_counter = 0 fold_counter = 0 if not CFG['fold_idx'] else CFG['fold_idx'] all_cnn_preds = None for model_idx in range(len(CFG['models'])) : ImageData = get_dataset(CFGs[model_idx]) test_dataset = ImageData(df = df, path = CFG['data_path'] + 'test/', transform = get_augs(CFGs[model_idx], image_size = CFGs[model_idx]['image_size']), labeled = False, indexed = False) test_loader = DataLoader(test_dataset, batch_size = CFG['batch_size'], shuffle = False, num_workers = CFG['cpu_workers'], pin_memory = True) for fold_idx in tqdm(range(CFG['num_folds'])) : model = get_model(CFGs[model_idx], device = device, num_classes = CFG['num_classes']) model = model.to(device) model.load_state_dict(torch.load(CFG['models'][model_idx] + 'weights_fold{}.pth'.format(fold_counter),map_location = device)) model.eval() PROBS = [] with torch.no_grad() : for batch_idx, inputs in enumerate(test_loader): inputs = inputs.to(device) probs = torch.zeros(( inputs.shape[0], CFG['num_classes']), device = device) for tta_idx in range(CFG['num_tta']): preds = model(get_tta_flips(inputs, tta_idx)) probs += preds.sigmoid() PROBS.append(probs.detach().cpu() / CFG['num_tta']) cnn_preds = pd.DataFrame(torch.cat(PROBS ).numpy() , columns = [CFG['models'][model_idx] + str(fold_idx)+ '/' + c for c in CFG['targets']]) all_cnn_preds = pd.concat([all_cnn_preds, cnn_preds], axis = 1) all_counter += 1 if not CFG['fold_idx']: fold_counter += 1 if fold_counter == CFG['num_folds']: fold_counter = 0 del model, inputs, preds, probs, PROBS gc.collect() del test_loader, test_dataset gc.collect() print('Finished {} preds x {} TTA in {:.2f} hours'.format(all_counter, CFG['num_tta'],(time.time() - cv_start)/ 3600))<categorify>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
8,684,188
print('Blending fold predictions with: ' + CFG['fold_blend']) for m in CFG['models']: for c in CFG['targets']: class_preds = all_cnn_preds.filter(like = m ).filter(like = c ).columns all_cnn_preds[m + c] = compute_blend(all_cnn_preds, class_preds, CFG['fold_blend'], CFG) all_cnn_preds.drop(class_preds, axis = 1, inplace = True) all_cnn_preds.head()<load_from_csv>
input_size = 224
Deepfake Detection Challenge
8,684,188
for m in CFG['models']: tmp_train_preds = pd.read_csv(m + '/oof.csv') tmp_train_preds.columns = ['StudyInstanceUID'] + CFG['targets'] + ['PatientID', 'fold'] + [m + '' + c for c in CFG['targets']] if m == CFG['models'][0]: train_preds = tmp_train_preds else: train_preds = train_preds.merge(tmp_train_preds[['StudyInstanceUID'] + [m + '' + c for c in CFG['targets']]], how = 'left', on = 'StudyInstanceUID') train_preds.head()<concatenate>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
8,684,188
test_preds = all_cnn_preds.copy() test_preds = pd.concat([df['StudyInstanceUID'], test_preds], axis = 1) test_preds.head()<create_dataframe>
frames_per_video = 10 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet)
Deepfake Detection Challenge
8,684,188
X = train_preds.copy() X_test = test_preds.copy() drop_features = ['StudyInstanceUID', 'PatientID', 'fold'] + CFG['targets'] features = [f for f in X.columns if f not in drop_features] print(len(features), 'features') display(features )<load_from_csv>
class HisResNeXt(models.resnet.ResNet): def __init__(self, training=True): super(HisResNeXt, self ).__init__(block=models.resnet.Bottleneck, layers=[3, 4, 6, 3], groups=32, width_per_group=4) self.fc = nn.Linear(2048, 1 )
Deepfake Detection Challenge
8,684,188
folds = pd.read_csv('/kaggle/input/how-to-properly-split-folds/train_folds.csv') del X['fold'] X = X.merge(folds[['StudyInstanceUID', 'fold']], how = 'left', on = 'StudyInstanceUID' )<sort_values>
detection_graph = tf.Graph() with detection_graph.as_default() : od_graph_def = tf.compat.v1.GraphDef() with tf.io.gfile.GFile('.. /input/mobilenet-face/frozen_inference_graph_face.pb', 'rb')as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='' )
Deepfake Detection Challenge
8,684,188
if CFG['sort_targets']: sorted_targets = ['Swan Ganz Catheter Present', 'ETT - Normal', 'ETT - Abnormal', 'ETT - Borderline', 'NGT - Abnormal', 'NGT - Normal', 'NGT - Incompletely Imaged', 'NGT - Borderline', 'CVC - Abnormal', 'CVC - Normal', 'CVC - Borderline']<prepare_x_and_y>
checkpoint = torch.load("/kaggle/input/deepfakes-inference-demo/resnext.pth", map_location=gpu) model = HisResNeXt().to(gpu) model.load_state_dict(checkpoint) _ = model.eval() del checkpoint
Deepfake Detection Challenge
8,684,188
cnn_oof = np.zeros(( len(X), CFG['num_classes'])) lgb_oof = np.zeros(( len(X), CFG['num_classes'])) lgb_tst = np.zeros(( len(X_test), CFG['lgb_folds'], CFG['num_classes'])) all_lgb_preds = None cv_start = time.time() print('-' * 45) print('{:<28}{:<7}{:>5}'.format('Label', 'Model', 'AUC')) print('-' * 45) for label in sorted_targets: y = X[label] label_features = [f for f in features if label in f] if CFG['label_features'] else features lgb_auc = 0 cnn_auc = 0 for fold in range(CFG['lgb_folds']): trn_idx = X.loc[X['fold'] != fold].index val_idx = X.loc[X['fold'] == fold].index X_train, y_train = X.iloc[trn_idx][label_features], y.iloc[trn_idx] X_valid, y_valid = X.iloc[val_idx][label_features], y.iloc[val_idx] X_test_label = X_test[label_features] clf = lgb.LGBMClassifier(**CFG['lgb_params']) clf = clf.fit(X_train, y_train, eval_set = [(X_valid, y_valid)], early_stopping_rounds = CFG['lgb_stop_rounds'], verbose = False) cnn_oof[val_idx, CFG['targets'].index(label)] = compute_blend(X_valid, list(X_valid.filter(like = label ).columns), CFG['fold_blend'], CFG) lgb_oof[val_idx, CFG['targets'].index(label)] = clf.predict_proba(X_valid)[:, 1] lgb_tst[:, fold, CFG['targets'].index(label)] = clf.predict_proba(X_test_label)[:, 1] cnn_auc += roc_auc_score(y_valid ,cnn_oof[val_idx, CFG['targets'].index(label)])/ CFG['lgb_folds'] lgb_auc += roc_auc_score(y_valid, lgb_oof[val_idx, CFG['targets'].index(label)])/ CFG['lgb_folds'] print('{:<29}{:<7}{:>5.4f}'.format(label, 'CNN', cnn_auc)) print('{:<29}{:<7}{:>5.4f}'.format(label, 'LGB', lgb_auc)) print('-' * 45) if cnn_auc >= lgb_auc: for fold in range(CFG['lgb_folds']): val_idx = X.loc[X['fold'] == fold].index X_valid, y_valid = X.iloc[val_idx][label_features], y.iloc[val_idx] lgb_oof[val_idx, CFG['targets'].index(label)] = compute_blend(X_valid, list(X_valid.filter(like = label ).columns), CFG['fold_blend'], CFG) lgb_tst[:, fold, CFG['targets'].index(label)] = compute_blend(X_test_label, list(X_test_label.filter(like = label ).columns), CFG['fold_blend'], CFG) for fold in range(CFG['lgb_folds']): lgb_preds = pd.DataFrame(lgb_tst[:, fold, CFG['targets'].index(label)], columns = [str(fold)+ '/' + label]) all_lgb_preds = pd.concat([all_lgb_preds, lgb_preds], axis = 1) if CFG['pred_as_feature']: X['LGB ' + label] = lgb_oof[:, CFG['targets'].index(label)] X_test['LGB ' + label] = np.mean(lgb_tst[:, :, CFG['targets'].index(label)], axis = 1) features.append('LGB ' + label) score_cnn = 0 score_lgb = 0 for fold in range(CFG['lgb_folds']): val_idx = X.loc[X['fold'] == fold].index score_cnn += get_score(X.iloc[val_idx][CFG['targets']].values, cnn_oof[val_idx, :])[0] / CFG['lgb_folds'] score_lgb += get_score(X.iloc[val_idx][CFG['targets']].values, lgb_oof[val_idx, :])[0] / CFG['lgb_folds'] print('{:<29}{:<7}{:>5.4f}'.format('OVERALL', 'CNN', score_cnn)) print('{:<29}{:<7}{:>5.4f}'.format('OVERALL', 'LGB', score_lgb)) print('-' * 45) print('Finished in {:.2f} minutes'.format(( time.time() - cv_start)/ 60)) del cnn_oof, lgb_tst, lgb_oof, clf, X_train, X_valid, X_test, X_test_label, y_train, y_valid del features, label_features, trn_idx, val_idx, folds, train_preds, test_preds gc.collect()<categorify>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.5
Deepfake Detection Challenge
8,684,188
print('Blending fold predictions with: ' + CFG['fold_blend']) for c in CFG['targets']: class_preds = all_lgb_preds.filter(like = c ).columns all_lgb_preds[c] = compute_blend(all_lgb_preds, class_preds, CFG['fold_blend'], CFG) all_lgb_preds.drop(class_preds, axis = 1, inplace = True) all_lgb_preds.head()<define_variables>
cm = detection_graph.as_default() cm.__enter__()
Deepfake Detection Challenge
8,684,188
if CFG['w_public'] > 0: gc.collect() BATCH_SIZE = 96 IMAGE_SIZE = 640 TEST_PATH = '.. /input/ranzcr-clip-catheter-line-classification/test' MODEL_PATH_resnet200d = '.. /input/resnet200d-public/resnet200d_320_CV9632.pth' MODEL_PATH_seresnet152d = '.. /input/seresnet152d-cv9615/seresnet152d_320_CV96.15.pth' class TestDataset(Dataset): def __init__(self, df, transform=None): self.df = df self.file_names = df['StudyInstanceUID'].values self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, idx): file_name = self.file_names[idx] file_path = f'{TEST_PATH}/{file_name}.jpg' image = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE) mask = image > 0 image = image[np.ix_(mask.any(1), mask.any(0)) ] image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform: augmented = self.transform(image=image) image = augmented['image'] return image def get_transforms(CFG): return A.Compose([A.Resize(IMAGE_SIZE, IMAGE_SIZE), A.Normalize() , ToTensorV2() ]) class ResNet200D(nn.Module): def __init__(self, model_name = 'resnet200d'): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, 11) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output class SeResNet152D(nn.Module): def __init__(self, model_name = 'seresnet152d'): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, 11) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output def inference(models, test_loader, device): tk0 = tqdm(enumerate(test_loader), total = len(test_loader)) probs = [] for i,(images)in tk0: images = images.to(device) avg_preds = [] for model in models: with torch.no_grad() : y_preds1 = model(images) y_preds2 = model(images.flip(-1)) y_preds =(y_preds1.sigmoid().to('cpu' ).numpy() + y_preds2.sigmoid().to('cpu' ).numpy())/ 2 avg_preds.append(y_preds) avg_preds = np.mean(avg_preds, axis = 0) probs.append(avg_preds) probs = np.concatenate(probs) return probs models200D = [] model = ResNet200D() model.load_state_dict(torch.load(MODEL_PATH_resnet200d)['model']) model.eval() model.to(device) models200D.append(model) del model models152D = [] model = SeResNet152D() model.load_state_dict(torch.load(MODEL_PATH_seresnet152d)['model']) model.eval() model.to(device) models152D.append(model) del model test_dataset = TestDataset(df, transform = get_transforms(CFG)) test_loader = DataLoader(test_dataset, batch_size = BATCH_SIZE, shuffle = False, num_workers = CFG['cpu_workers'], pin_memory = True) predictions200d = inference(models200D, test_loader, device) predictions152d = inference(models152D, test_loader, device) target_cols = df.iloc[:, 1:12].columns.tolist() predictions200d = pd.DataFrame(predictions200d, columns = ['200d/' + c for c in target_cols]) predictions152d = pd.DataFrame(predictions152d, columns = ['152d/' + c for c in target_cols]) predictions = pd.concat([predictions200d, predictions152d], axis = 1) df_pub = predictions.copy() display(df_pub.head() )<categorify>
config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True sess=tf.compat.v1.Session(graph=detection_graph, config=config) image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') boxes_tensor = detection_graph.get_tensor_by_name('detection_boxes:0') scores_tensor = detection_graph.get_tensor_by_name('detection_scores:0') num_detections = detection_graph.get_tensor_by_name('num_detections:0' )
Deepfake Detection Challenge
8,684,188
if CFG['w_public'] == 0: df_pub = all_lgb_preds.copy() else: for c in CFG['targets']: class_preds = df_pub.filter(like = c ).columns df_pub[c] = compute_blend(df_pub, class_preds, CFG['model_blend'], CFG, weights = np.array([2/3, 1/3])) df_pub.drop(class_preds, axis = 1, inplace = True) df_pub.head()<categorify>
def get_img(images): global boxes,scores,num_detections im_heights,im_widths=[],[] imgs=[] for image in images: (im_height,im_width)=image.shape[:-1] imgs.append(image) im_heights.append(im_height) im_widths.append(im_widths) imgs=np.array(imgs) (boxes, scores_)= sess.run( [boxes_tensor, scores_tensor], feed_dict={image_tensor: imgs}) finals=[] for x in range(boxes.shape[0]): scores=scores_[x] max_=np.where(scores==scores.max())[0][0] box=boxes[x][max_] ymin, xmin, ymax, xmax = box (left, right, top, bottom)=(xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height) left, right, top, bottom = int(left), int(right), int(top), int(bottom) image=imgs[x] finals.append(cv2.cvtColor(cv2.resize(image[max([0,top-40]):bottom+80,max([0,left-40]):right+80],(240,240)) ,cv2.COLOR_BGR2RGB)) return finals def detect_video(video): frame_count=10 capture = cv2.VideoCapture(video) v_len = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) frame_idxs = np.linspace(0,v_len,frame_count, endpoint=False, dtype=np.int) imgs=[] i=0 for frame_idx in range(int(v_len)) : ret = capture.grab() if not ret: print("Error grabbing frame %d from movie %s" %(frame_idx, video)) if frame_idx >= frame_idxs[i]: if frame_idx-frame_idxs[i]>20: return None ret, frame = capture.retrieve() if not ret or frame is None: print("Error retrieving frame %d from movie %s" %(frame_idx, video)) else: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) imgs.append(frame) i += 1 if i >= len(frame_idxs): break imgs=get_img(imgs) if len(imgs)<10: return None return np.hstack(imgs )
Deepfake Detection Challenge
8,684,188
all_preds = all_lgb_preds.copy() all_preds.columns = ['my/' + c for c in all_preds.columns] df_pub.columns = ['public/' + c for c in df_pub.columns] preds = pd.concat([all_preds, df_pub], axis = 1) for c in CFG['targets']: class_preds = preds.filter(like = c ).columns preds[c] = compute_blend(preds, class_preds, CFG['model_blend'], CFG, weights = np.array([1 - CFG['w_public'], CFG['w_public']])) preds.drop(class_preds, axis = 1, inplace = True) preds.head()<save_to_csv>
res_predictions =[]
Deepfake Detection Challenge
8,684,188
if all_counter == len(CFG['models'] * CFG['num_folds']): for c in CFG['targets']: df[c] = preds[c].rank(pct = True) df.to_csv('submission.csv', index = False) display(df.head() )<install_modules>
for x in tqdm(glob.glob('.. /input/deepfake-detection-challenge/test_videos/*.mp4')) : try: filename=x.replace('.. /input/deepfake-detection-challenge/test_videos/','' ).replace('.mp4','.jpg') a=detect_video(x) y_pred = predict_on_video(x, batch_size=frames_per_video) res_predictions.append(y_pred) if a is None: continue cv2.imwrite('./videos/'+filename,a) except Exception as err: print(err )
Deepfake Detection Challenge
8,684,188
!pip install /kaggle/input/kerasapplications -q !pip install /kaggle/input/efficientnet-keras-source-code/ -q --no-deps<import_modules>
bottleneck_EfficientNetB1 = efn.EfficientNetB1(weights=None,include_top=False,pooling='avg') inp=Input(( 10,240,240,3)) x=TimeDistributed(bottleneck_EfficientNetB1 )(inp) x = LSTM(128 )(x) x = Dense(64, activation='elu' )(x) x = Dense(1,activation='sigmoid' )(x) model_EfficientNetB1=Model(inp,x) bottleneck_Xception = Xception(weights=None, include_top=False,pooling='avg') y=TimeDistributed(bottleneck_Xception )(inp) y = LSTM(128 )(y) y = Dense(64, activation='elu' )(y) y = Dense(1,activation='sigmoid' )(y) model_Xception=Model(inp,y )
Deepfake Detection Challenge
8,684,188
import os, gc import efficientnet.tfkeras as efn import numpy as np import pandas as pd import tensorflow as tf<categorify>
model_EfficientNetB1.load_weights('.. /input/efficientnetb1dfdc/EfficientNetB1-e_2_b_4_f_30-10.h5') model_Xception.load_weights('.. /input/xceptiondfdc/Xception-e_2_b_4_f_30-10.h5' )
Deepfake Detection Challenge
8,684,188
def auto_select_accelerator() : try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) print("Running on TPU:", tpu.master()) except ValueError: strategy = tf.distribute.get_strategy() print(f"Running on {strategy.num_replicas_in_sync} replicas") return strategy def build_decoder(with_labels=True, target_size=(300, 300), ext='jpg'): def decode(path): file_bytes = tf.io.read_file(path) if ext == 'png': img = tf.image.decode_png(file_bytes, channels=3) elif ext in ['jpg', 'jpeg']: img = tf.image.decode_jpeg(file_bytes, channels=3) else: raise ValueError("Image extension not supported") img = tf.cast(img, tf.float32)/ 255.0 img = tf.image.resize(img, target_size) return img def decode_with_labels(path, label): return decode(path), label return decode_with_labels if with_labels else decode def build_augmenter(with_labels=True): def augment(img): img = tf.image.random_flip_left_right(img) img = tf.image.random_flip_up_down(img) return img def augment_with_labels(img, label): return augment(img), label return augment_with_labels if with_labels else augment def build_dataset(paths, labels=None, bsize=32, cache=True, decode_fn=None, augment_fn=None, augment=True, repeat=True, shuffle=1024, cache_dir=""): if cache_dir != "" and cache is True: os.makedirs(cache_dir, exist_ok=True) if decode_fn is None: decode_fn = build_decoder(labels is not None) if augment_fn is None: augment_fn = build_augmenter(labels is not None) AUTO = tf.data.experimental.AUTOTUNE slices = paths if labels is None else(paths, labels) dset = tf.data.Dataset.from_tensor_slices(slices) dset = dset.map(decode_fn, num_parallel_calls=AUTO) dset = dset.cache(cache_dir)if cache else dset dset = dset.map(augment_fn, num_parallel_calls=AUTO)if augment else dset dset = dset.repeat() if repeat else dset dset = dset.shuffle(shuffle)if shuffle else dset dset = dset.batch(bsize ).prefetch(AUTO) return dset<define_variables>
def get_birghtness(img): return img/img.max() def process_img(img,flip=False): imgs=[] for x in range(10): if flip: imgs.append(get_birghtness(cv2.flip(img[:,x*240:(x+1)*240,:],1))) else: imgs.append(get_birghtness(img[:,x*240:(x+1)*240,:])) return np.array(imgs )
Deepfake Detection Challenge
8,684,188
COMPETITION_NAME = "ranzcr-clip-catheter-line-classification" strategy = auto_select_accelerator() BATCH_SIZE = strategy.num_replicas_in_sync * 16<load_from_csv>
sample_submission = pd.read_csv(".. /input/deepfake-detection-challenge/sample_submission.csv") test_files=glob.glob('./videos/*.jpg') submission=pd.DataFrame() submission['filename']=os.listdir(( '.. /input/deepfake-detection-challenge/test_videos/')) submission['label']=0.5 filenames=[] batch=[] batch1=[] preds=[]
Deepfake Detection Challenge
8,684,188
model_paths = [ '.. /input/ranzcr-last-models/0.952_model_640_47.h5', '.. /input/ranzcr-last-models/0.953_model_616_51.h5', '.. /input/ranzcr-last-models/0.953_model_640_43.h5', '.. /input/ranzcr-last-models/0.954_model_640_42.h5', '.. /input/ranzcr-last-models/0.954_model_632_48.h5', ] subs = [] for model_path in model_paths: load_dir = f"/kaggle/input/{COMPETITION_NAME}/" sub_df = pd.read_csv(load_dir + 'sample_submission.csv') test_paths = load_dir + "test/" + sub_df['StudyInstanceUID'] + '.jpg' label_cols = sub_df.columns[1:] image_size = int(model_path.split('_')[2]) test_decoder = build_decoder(with_labels=False, target_size=(image_size, image_size)) dtest = build_dataset( test_paths, bsize=BATCH_SIZE, repeat=False, shuffle=False, augment=False, cache=False, decode_fn=test_decoder ) with strategy.scope() : model = tf.keras.models.load_model(model_path) print('predict:', model_path) sub_df[label_cols] = model.predict(dtest, verbose=1) subs.append(sub_df) del model gc.collect()<save_to_csv>
new_preds=[] for x,y in zip(preds,res_predictions): new_preds.append(x[0]+(0.2*y)) print(sum(new_preds)/len(new_preds))
Deepfake Detection Challenge
8,684,188
submission = pd.concat(subs) submission = submission.groupby('StudyInstanceUID' ).mean() submission.to_csv('submission.csv') submission<install_modules>
for x,y in zip(new_preds,filenames): submission.loc[submission['filename']==y,'label']=x
Deepfake Detection Challenge
8,684,188
<install_modules><EOS>
submission.to_csv('submission.csv', index=False) !rm -r videos
Deepfake Detection Challenge
8,119,370
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<install_modules>
%matplotlib inline
Deepfake Detection Challenge
8,119,370
!pip install.. /input/efficientnet-pyotrch/EfficientNet-PyTorch-master/ > /dev/null<install_modules>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) frame_h = 5 frame_l = 5 len(test_videos )
Deepfake Detection Challenge
8,119,370
!pip install.. /input/segmentation-models-pytorch/segmentation_models.pytorch-master/ > /dev/null<import_modules>
print("PyTorch version:", torch.__version__) print("CUDA version:", torch.version.cuda) print("cuDNN version:", torch.backends.cudnn.version() )
Deepfake Detection Challenge
8,119,370
class RANZCRDataset(torch.utils.data.Dataset): def __init__( self, df, root, ext, path_col, use_timm_aug=False, transforms=None, augmentations=None, ): super().__init__() df = df.reset_index(drop=True ).copy() self.transforms = transforms self.augmentations = augmentations self.root = root self.use_timm_aug = use_timm_aug self.image_names = self._prepare_image_names(df[path_col].tolist() , root, ext) def __len__(self): return len(self.image_names) def _prepare_image_names(self, basenames: List[str], root: str, ext: str): return [pjoin(root, el)+ ext for el in basenames] def _prepare_img_target_from_idx(self, idx: int): image_name = self.image_names[idx] img = Image.open(image_name) if not self.use_timm_aug: img = np.array(img) if self.augmentations is not None: if self.use_timm_aug: img = self.augmentations(img) else: img = self.augmentations(image=img)["image"] if self.use_timm_aug: img = np.array(img) if self.transforms is not None: img = self.transforms(img) return img def __getitem__(self, index: int): img = self._prepare_img_target_from_idx(index) return img<define_variables>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") gpu
Deepfake Detection Challenge
8,119,370
EFFNETB6_EMB_DIM = 2304 EFFNETB5_EMB_DIM = 2048 EFFNETB4_EMB_DIM = 1792 EFFNETB3_EMB_DIM = 1536 EFFNETB1_EMB_DIM = 1280 RESNET50_EMB_DIM = 2048 REXNET200_EMB_DIM = 2560 VIT_EMB_DIM = 768 NF_RESNET50_EMB_DIM = 2048 EPS = 1e-6 class TaylorSoftmax(nn.Module): def __init__(self, dim=1, n=2): super(TaylorSoftmax, self ).__init__() assert n % 2 == 0 self.dim = dim self.n = n def forward(self, x): fn = torch.ones_like(x) denor = 1. for i in range(1, self.n+1): denor *= i fn = fn + x.pow(i)/ denor out = fn / fn.sum(dim=self.dim, keepdims=True) return out class CNNModel(nn.Module): def __init__( self, classifiier_config: Mapping[str, Any], encoder_type: str, device: str, use_pretrained_encoder: bool = False, path_to_chkp: Optional[str] = None, use_taylorsoftmax: bool = False, one_channel: bool = True, ): super().__init__() if path_to_chkp is not None: use_pretrained_encoder = False if encoder_type == "rexnet_200": self.encoder = timm.create_model( encoder_type, pretrained=use_pretrained_encoder ) if one_channel: self.encoder.stem.conv.in_channels = 1 weight = self.encoder.stem.conv.weight.mean(1, keepdim=True) self.encoder.stem.conv.weight = torch.nn.Parameter(weight) self.encoder.head.fc = nn.Identity() nn_embed_size = REXNET200_EMB_DIM elif encoder_type == "tf_efficientnet_b3_ns": self.encoder = timm.create_model( encoder_type, pretrained=use_pretrained_encoder ) if one_channel: self.encoder.conv_stem.in_channels = 1 weight = self.encoder.conv_stem.weight.mean(1, keepdim=True) self.encoder.conv_stem.weight = torch.nn.Parameter(weight) self.encoder.classifier = nn.Identity() nn_embed_size = EFFNETB3_EMB_DIM elif encoder_type == "tf_efficientnet_b5_ns": self.encoder = timm.create_model( encoder_type, pretrained=use_pretrained_encoder, num_classes=11 ) if path_to_chkp is not None: print("Loading starting point") state_dict = load_effnet_b5_start_point(path_to_chkp) self.encoder.load_state_dict(state_dict) if one_channel: self.encoder.conv_stem.in_channels = 1 weight = self.encoder.conv_stem.weight.mean(1, keepdim=True) self.encoder.conv_stem.weight = torch.nn.Parameter(weight) self.encoder.classifier = nn.Identity() nn_embed_size = EFFNETB5_EMB_DIM elif encoder_type == "resnet200d": self.encoder = timm.create_model( encoder_type, pretrained=use_pretrained_encoder, num_classes=11 ) if path_to_chkp is not None: print("Loading starting point") state_dict = load_resnet200d_start_point(path_to_chkp) self.encoder.load_state_dict(state_dict) if one_channel: self.encoder.conv1[0].in_channels = 1 weight = self.encoder.conv1[0].weight.mean(1, keepdim=True) self.encoder.conv1[0].weight = torch.nn.Parameter(weight) self.encoder.fc = nn.Identity() nn_embed_size = RESNET50_EMB_DIM else: raise ValueError(f"{encoder_type} is invalid model_type") classes_num = classifiier_config["classes_num"] hidden_dims = classifiier_config["hidden_dims"] second_dropout_rate = classifiier_config["second_dropout_rate"] if classifiier_config["classifier_type"] == "relu": first_dropout_rate = classifiier_config["first_dropout_rate"] self.classifier = nn.Sequential( nn.Linear(nn_embed_size, hidden_dims), nn.ReLU() , nn.Dropout(p=first_dropout_rate), nn.Linear(hidden_dims, hidden_dims), nn.ReLU() , nn.Dropout(p=second_dropout_rate), nn.Linear(hidden_dims, classes_num), ) elif classifiier_config["classifier_type"] == "elu": first_dropout_rate = classifiier_config["first_dropout_rate"] self.classifier = nn.Sequential( nn.Dropout(first_dropout_rate), nn.Linear(nn_embed_size, hidden_dims), nn.ELU() , nn.Dropout(second_dropout_rate), nn.Linear(hidden_dims, classes_num), ) elif classifiier_config["classifier_type"] == "dima": self.classifier = nn.Sequential( nn.BatchNorm1d(nn_embed_size), nn.Linear(nn_embed_size, hidden_dims), nn.BatchNorm1d(hidden_dims), nn.PReLU(hidden_dims), nn.Dropout(p=second_dropout_rate), nn.Linear(hidden_dims, classes_num), ) elif classifiier_config["classifier_type"] == "prelu": first_dropout_rate = classifiier_config["first_dropout_rate"] self.classifier = nn.Sequential( nn.Dropout(first_dropout_rate), nn.Linear(nn_embed_size, hidden_dims), nn.PReLU(hidden_dims), nn.Dropout(p=second_dropout_rate), nn.Linear(hidden_dims, classes_num), ) elif classifiier_config["classifier_type"] == "multiscale_relu": first_dropout_rate = classifiier_config["first_dropout_rate"] self.big_dropout = nn.Dropout(p=0.5) self.classifier = nn.Sequential( nn.Linear(nn_embed_size, hidden_dims), nn.ELU() , nn.Dropout(p=second_dropout_rate), nn.Linear(hidden_dims, classes_num), ) elif classifiier_config["classifier_type"] == "drop_linear": self.classifier = nn.Sequential( nn.Dropout(p=second_dropout_rate), nn.Linear(nn_embed_size, classes_num), ) else: raise ValueError("Invalid classifier_type") self.use_taylorsoftmax = use_taylorsoftmax if self.use_taylorsoftmax: self.taylorsoftmax = TaylorSoftmax() self.classifier_type = classifiier_config["classifier_type"] self.encoder_type = encoder_type self.device = device self.to(self.device) def forward(self, image): x = self.encoder(image) if self.classifier_type == "multiscale_relu": logits = torch.mean( torch.stack( [self.classifier(self.big_dropout(x)) for _ in range(5)], dim=0, ), dim=0, ) else: logits = self.classifier(x) if self.use_taylorsoftmax: logits = self.taylorsoftmax(logits ).log() return logits class CNNSegModel(nn.Module): def __init__( self, classifiier_config: Mapping[str, Any], encoder_type: str, encoder_config: Mapping[str, Any], device: str, path_to_chkp: Optional[str] = None, use_taylorsoftmax: bool = False, one_channel: bool = True, enable_inference_mode: bool = False ): super().__init__() if path_to_chkp is not None: use_pretrained_encoder = False if encoder_type == "timm-efficientnet-b5_unet": self.encoder = smp.Unet(**encoder_config) if path_to_chkp is not None: print("Loading starting point") state_dict = load_effnet_b5_start_point(path_to_chkp) self.encoder.encoder.load_state_dict(state_dict) if one_channel: self.encoder.encoder.conv_stem.in_channels = 1 weight = self.encoder.encoder.conv_stem.weight.mean(1, keepdim=True) self.encoder.encoder.conv_stem.weight = torch.nn.Parameter(weight) self.encoder.classification_head[3] = nn.Identity() nn_embed_size = 512 elif encoder_type == "densenet121_unet": self.encoder = smp.Unet(**encoder_config) if path_to_chkp is not None: print("Loading starting point") state_dict = load_densenet121_start_point(path_to_chkp) self.encoder.encoder.load_state_dict(state_dict) if one_channel: self.encoder.encoder.features.conv0.in_channels = 1 weight = self.encoder.encoder.features.conv0.weight.mean( 1, keepdim=True ) self.encoder.encoder.features.conv0.weight = torch.nn.Parameter( weight ) self.encoder.classification_head[3] = nn.Identity() nn_embed_size = 1024 else: raise ValueError(f"{encoder_type} is invalid model_type") classes_num = classifiier_config["classes_num"] hidden_dims = classifiier_config["hidden_dims"] second_dropout_rate = classifiier_config["second_dropout_rate"] if classifiier_config["classifier_type"] == "relu": first_dropout_rate = classifiier_config["first_dropout_rate"] self.classifier = nn.Sequential( nn.Linear(nn_embed_size, hidden_dims), nn.ReLU() , nn.Dropout(p=first_dropout_rate), nn.Linear(hidden_dims, hidden_dims), nn.ReLU() , nn.Dropout(p=second_dropout_rate), nn.Linear(hidden_dims, classes_num), ) elif classifiier_config["classifier_type"] == "elu": first_dropout_rate = classifiier_config["first_dropout_rate"] self.classifier = nn.Sequential( nn.Dropout(first_dropout_rate), nn.Linear(nn_embed_size, hidden_dims), nn.ELU() , nn.Dropout(second_dropout_rate), nn.Linear(hidden_dims, classes_num), ) elif classifiier_config["classifier_type"] == "dima": self.classifier = nn.Sequential( nn.BatchNorm1d(nn_embed_size), nn.Linear(nn_embed_size, hidden_dims), nn.BatchNorm1d(hidden_dims), nn.PReLU(hidden_dims), nn.Dropout(p=second_dropout_rate), nn.Linear(hidden_dims, classes_num), ) elif classifiier_config["classifier_type"] == "prelu": first_dropout_rate = classifiier_config["first_dropout_rate"] self.classifier = nn.Sequential( nn.Dropout(first_dropout_rate), nn.Linear(nn_embed_size, hidden_dims), nn.PReLU(hidden_dims), nn.Dropout(p=second_dropout_rate), nn.Linear(hidden_dims, classes_num), ) elif classifiier_config["classifier_type"] == "multiscale_relu": first_dropout_rate = classifiier_config["first_dropout_rate"] self.big_dropout = nn.Dropout(p=0.5) self.classifier = nn.Sequential( nn.Linear(nn_embed_size, hidden_dims), nn.ELU() , nn.Dropout(p=second_dropout_rate), nn.Linear(hidden_dims, classes_num), ) elif classifiier_config["classifier_type"] == "drop_linear": self.classifier = nn.Sequential( nn.Dropout(p=second_dropout_rate), nn.Linear(nn_embed_size, classes_num), ) elif classifiier_config["classifier_type"] == "double_elu_mlp": first_dropout_rate = classifiier_config["first_dropout_rate"] self.classifier_prenet = nn.Sequential( nn.Dropout(first_dropout_rate), nn.Linear(nn_embed_size, hidden_dims), nn.ELU() , ) self.classifier_hidden_class = nn.Sequential( nn.Dropout(second_dropout_rate), nn.Linear( hidden_dims, classifiier_config["hidden_classes_num"] ), ) self.classifier_class_final = nn.Sequential( nn.Dropout(second_dropout_rate), nn.Linear( hidden_dims + classifiier_config["hidden_classes_num"], classes_num, ), ) else: raise ValueError("Invalid classifier_type") self.use_taylorsoftmax = use_taylorsoftmax if self.use_taylorsoftmax: self.taylorsoftmax = TaylorSoftmax() self.classifier_type = classifiier_config["classifier_type"] self.enable_inference_mode = enable_inference_mode self.encoder_type = encoder_type self.device = device self.to(self.device) def forward(self, image, enable_inference_mode=False): enable_inference_mode = enable_inference_mode or self.enable_inference_mode if enable_inference_mode: embs = self.encoder.encoder(image) x = self.encoder.classification_head(embs[-1]) else: mask, x = self.encoder(image) if self.classifier_type == "multiscale_relu": logits = torch.mean( torch.stack( [self.classifier(self.big_dropout(x)) for _ in range(5)], dim=0, ), dim=0, ) elif self.classifier_type == "double_elu_mlp": pre_logits = self.classifier_prenet(x) class_logits = self.classifier_hidden_class(pre_logits) logits = self.classifier_class_final( torch.cat([pre_logits, class_logits], axis=-1) ) else: logits = self.classifier(x) if self.use_taylorsoftmax: logits = self.taylorsoftmax(logits ).log() if enable_inference_mode: return logits else: if self.classifier_type == "double_elu_mlp": return mask, class_logits, logits else: return mask, logits<load_pretrained>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
8,119,370
def get_validation_models( model_initilizer: Callable, model_config: Mapping[str, Any], model_ckp_dicts: List[OrderedDict], device: str, ): t_models = [] for mcd in model_ckp_dicts: t_model = model_initilizer(**model_config, device=device) t_model.load_state_dict(mcd) t_model = t_model.to(device) t_model.eval() t_models.append(t_model) return t_models def create_val_loaders( loader_initilizer: object, loader_config: Mapping[str, Any], dfs: List[str], batch_size: int, ): t_loaders = [] for df in dfs: t_dataset = loader_initilizer(df=df, **loader_config) t_loader = torch.utils.data.DataLoader( t_dataset, batch_size=batch_size, drop_last=False, shuffle=False, num_workers=os.cpu_count() // 2, ) t_loaders.append(t_loader) return t_loaders @torch.no_grad() def cnn_model_predict(t_batch, t_model, t_device): image = t_batch.to(t_device) logits = t_model(image) logits = logits.detach().cpu().numpy() return logits def predict_over_all_train( my_loaders, my_models, model_predict_func, device, do_concat=True ): logits = [] for loader, model in zip(my_loaders, my_models): for batch in tqdm(loader): logit = model_predict_func(batch, model, device) logits.append(logit) if do_concat: logits = np.concatenate(logits) return logits <set_options>
frames_per_video = 64 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
8,119,370
%matplotlib inline<define_variables>
input_size = 224
Deepfake Detection Challenge
8,119,370
SKIP_VAL = True<load_from_csv>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
8,119,370
def public_notebook() : device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') BATCH_SIZE = 64 TEST_PATH = '.. /input/ranzcr-clip-catheter-line-classification/test' test = pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv') class TestDataset(Dataset): def __init__(self, df, transform=None): self.df = df self.file_names = df['StudyInstanceUID'].values self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, idx): file_name = self.file_names[idx] file_path = f'{TEST_PATH}/{file_name}.jpg' image = cv2.imread(file_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform: augmented = self.transform(image=image) image = augmented['image'] return image def get_transforms(image_size=640): return Compose([ Resize(image_size, image_size), Normalize() , ToTensorV2() , ]) class ResNet200D(nn.Module): def __init__(self, model_name='resnet200d'): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, 11) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output class SeResNet152D(nn.Module): def __init__(self, model_name='seresnet152d'): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, 11) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output class RANZCRResNet200D(nn.Module): def __init__(self, model_name='resnet200d', out_dim=11, pretrained=False): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, out_dim) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output def inference(models, test_loader, device): tk0 = tqdm(enumerate(test_loader), total=len(test_loader)) probs = [] for i,(images)in tk0: images = images.to(device) avg_preds = [] for model in models: with torch.no_grad() : y_preds1 = model(images) y_preds2 = model(images.flip(-1)) y_preds =( y_preds1.sigmoid().to('cpu' ).numpy() + y_preds2.sigmoid().to('cpu' ).numpy() )/ 2 avg_preds.append(y_preds) avg_preds = np.stack(avg_preds, axis=0) probs.append(avg_preds) probs = np.concatenate(probs, axis=1) return probs models200D = [] model = ResNet200D() model.load_state_dict(torch.load(".. /input/resnet200d-public/resnet200d_320_CV9632.pth")['model']) model.eval() model.to(device) models200D.append(model) models200D_2 = [] model = RANZCRResNet200D() model.load_state_dict(torch.load(".. /input/resnet200d-baseline-benchmark-public/resnet200d_fold0_cv953.pth", map_location='cuda:0')) model.eval() model.to(device) models200D_2.append(model) model = RANZCRResNet200D() model.load_state_dict(torch.load(".. /input/resnet200d-baseline-benchmark-public/resnet200d_fold1_cv955.pth", map_location='cuda:0')) model.eval() model.to(device) models200D_2.append(model) model = RANZCRResNet200D() model.load_state_dict(torch.load(".. /input/resnet200d-baseline-benchmark-public/resnet200d_fold2_cv955.pth", map_location='cuda:0')) model.eval() model.to(device) models200D_2.append(model) model = RANZCRResNet200D() model.load_state_dict(torch.load(".. /input/resnet200d-baseline-benchmark-public/resnet200d_fold3_cv957.pth", map_location='cuda:0')) model.eval() model.to(device) models200D_2.append(model) model = RANZCRResNet200D() model.load_state_dict(torch.load(".. /input/resnet200d-baseline-benchmark-public/resnet200d_fold4_cv954.pth", map_location='cuda:0')) model.eval() model.to(device) models200D_2.append(model) model = SeResNet152D() model.load_state_dict(torch.load('.. /input/seresnet152d-cv9615/seresnet152d_320_CV96.15.pth')['model']) model.eval() model.to(device) models200D.append(model) test_dataset_512 = TestDataset(test, transform=get_transforms(image_size=512)) test_loader_512 = DataLoader(test_dataset_512, batch_size=BATCH_SIZE, shuffle=False, num_workers=os.cpu_count() // 2 , pin_memory=True) test_dataset_640 = TestDataset(test, transform=get_transforms(image_size=640)) test_loader_640 = DataLoader(test_dataset_640, batch_size=BATCH_SIZE, shuffle=False, num_workers=os.cpu_count() // 2 , pin_memory=True) predictions200d = inference(models200D, test_loader_640, device) predictions200d_2 = inference(models200D_2, test_loader_512, device) return predictions200d, predictions200d_2<load_from_csv>
class MyResNeXt(models.resnet.ResNet): def __init__(self, training=True): super(MyResNeXt, self ).__init__(block=models.resnet.Bottleneck, layers=[3, 4, 6, 3], groups=32, width_per_group=4) self.fc = nn.Linear(2048, 1 )
Deepfake Detection Challenge
8,119,370
<define_variables>
checkpoint = torch.load("/kaggle/input/deepfakes-inference-demo/resnext.pth", map_location=gpu) model = MyResNeXt().to(gpu) model.load_state_dict(checkpoint) _ = model.eval() del checkpoint
Deepfake Detection Challenge
8,119,370
RESIZE_SIZE = 640<load_from_csv>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.481
Deepfake Detection Challenge
8,119,370
PATH2DIR = '.. /input/ranzcr-clip-catheter-line-classification/' os.listdir(PATH2DIR) train = pd.read_csv(pjoin(PATH2DIR, 'train.csv')) sample_sub = pd.read_csv(pjoin(PATH2DIR, 'sample_submission.csv')) split = np.load('.. /input/ranzcr-models/naive_cv_split.npy', allow_pickle=True )<define_variables>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
8,119,370
DEVICE = 'cuda'<load_pretrained>
predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
8,119,370
models_512 = [] ckp_names = glob('.. /input/ranzcr-models/timm_efficientnet_b5_unet_32bs_640res_lesslaugs_ls005_shedchanged_startpoint_difflrs_segbranch_125coefs_1e4noseg_bigholes_firstpseudo_swa_roc_auc_score/timm_efficientnet_b5_unet_32bs_640res_lesslaugs_ls005_shedchanged_startpoint_difflrs_segbranch_125coefs_1e4noseg_bigholes_firstpseudo_swa_roc_auc_score/*.pt') print(ckp_names) chckps = [torch.load(el, map_location='cpu')for el in ckp_names] models_512.append(get_validation_models( model_initilizer=CNNSegModel, model_config={ "classifiier_config": { "classifier_type": "elu", "classes_num": 11, "hidden_dims": 1024, "second_dropout_rate": 0.2, "first_dropout_rate": 0.3, }, "encoder_config":{ "in_channels":3, "encoder_name":'timm-efficientnet-b5', "encoder_weights":None, "classes":2, "activation":'sigmoid', "aux_params":dict( pooling='avg', dropout=None, classes=4, ) }, "encoder_type": "timm-efficientnet-b5_unet", "use_taylorsoftmax": False, "one_channel": True, "enable_inference_mode": True }, model_ckp_dicts=chckps, device=DEVICE )) ckp_names = glob('.. /input/ranzcr-models/timm_efficientnet_b5_unet_32bs_640res_qubvelaugs_ls005_shedchanged_startpoint_difflrs_segbranch_113coefs_1e4noseg_bigholes_morepseudo_swa_roc_auc_score/timm_efficientnet_b5_unet_32bs_640res_qubvelaugs_ls005_shedchanged_startpoint_difflrs_segbranch_113coefs_1e4noseg_bigholes_morepseudo_swa_roc_auc_score/*.pt') print(ckp_names) chckps = [torch.load(el, map_location='cpu')for el in ckp_names] models_512.append(get_validation_models( model_initilizer=CNNSegModel, model_config={ "classifiier_config": { "classifier_type": "elu", "classes_num": 11, "hidden_dims": 1024, "second_dropout_rate": 0.2, "first_dropout_rate": 0.3, }, "encoder_config":{ "in_channels":3, "encoder_name":'timm-efficientnet-b5', "encoder_weights":None, "classes":2, "activation":'sigmoid', "aux_params":dict( pooling='avg', dropout=None, classes=4, ) }, "encoder_type": "timm-efficientnet-b5_unet", "use_taylorsoftmax": False, "one_channel": True, "enable_inference_mode": True }, model_ckp_dicts=chckps, device=DEVICE )) ckp_names = glob('.. /input/ranzcr-models/timm_efficientnet_b5_unet_24bs_640res_qubvelaugs_ls005_shedchanged_startpoint_difflrs_segbranch_113coefs_1e4noseg_bigholes_pseudo_swa_roc_auc_score/timm_efficientnet_b5_unet_24bs_640res_qubvelaugs_ls005_shedchanged_startpoint_difflrs_segbranch_113coefs_1e4noseg_bigholes_pseudo_swa_roc_auc_score/*.pt') print(ckp_names) chckps = [torch.load(el, map_location='cpu')for el in ckp_names] models_512.append(get_validation_models( model_initilizer=CNNSegModel, model_config={ "classifiier_config": { "classifier_type": "elu", "classes_num": 11, "hidden_dims": 1024, "second_dropout_rate": 0.2, "first_dropout_rate": 0.3, }, "encoder_config":{ "in_channels":3, "encoder_name":'timm-efficientnet-b5', "encoder_weights":None, "classes":2, "activation":'sigmoid', "aux_params":dict( pooling='avg', dropout=None, classes=4, ) }, "encoder_type": "timm-efficientnet-b5_unet", "use_taylorsoftmax": False, "one_channel": True, "enable_inference_mode": True }, model_ckp_dicts=chckps, device=DEVICE )) ckp_names = glob('.. /input/ranzcr-models/timm_efficientnet_b5_unet_24bs_640res_qubvelaugs_ls005_shedchanged_startpoint_difflrs_segbranch_113coefs_1e4noseg_bigholes_swa_roc_auc_score/timm_efficientnet_b5_unet_24bs_640res_qubvelaugs_ls005_shedchanged_startpoint_difflrs_segbranch_113coefs_1e4noseg_bigholes_swa_roc_auc_score/*.pt') print(ckp_names) chckps = [torch.load(el, map_location='cpu')for el in ckp_names] models_512.append(get_validation_models( model_initilizer=CNNSegModel, model_config={ "classifiier_config": { "classifier_type": "elu", "classes_num": 11, "hidden_dims": 1024, "second_dropout_rate": 0.2, "first_dropout_rate": 0.3, }, "encoder_config":{ "in_channels":3, "encoder_name":'timm-efficientnet-b5', "encoder_weights":None, "classes":2, "activation":'sigmoid', "aux_params":dict( pooling='avg', dropout=None, classes=4, ) }, "encoder_type": "timm-efficientnet-b5_unet", "use_taylorsoftmax": False, "one_channel": True, "enable_inference_mode": True }, model_ckp_dicts=chckps, device=DEVICE )) <categorify>
submission_df_resnext = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df_resnext.to_csv("submission_resnext.csv", index=False )
Deepfake Detection Challenge
8,119,370
if not SKIP_VAL: val_dfs = [ train.iloc[split[i][1]] for i in range(5) ] val_loaders = create_val_loaders( loader_initilizer=RANZCRDataset, loader_config={ "root":'train_images_512_512', "path_col": "StudyInstanceUID", "ext": ".jpeg", "transforms":T.ToTensor() }, dfs=val_dfs, batch_size=32 ) train_logits = predict_over_all_train( val_loaders, models_512[0], cnn_model_predict, DEVICE ) val_dfs = pd.concat(val_dfs ).reset_index(drop=True) oof_score = roc_auc_score(val_dfs.iloc[:,1:-1], train_logits) print(f"OOF score : {oof_score}" )<define_variables>
!pip install.. /input/deepfake-xception-trained-model/pytorchcv-0.0.55-py2.py3-none-any.whl --quiet
Deepfake Detection Challenge
8,119,370
def predict_test_with_multiple_models( my_models: List[List[torch.nn.Module]], my_loaders: List[torch.utils.data.DataLoader], predict_func: Callable, device: str, ): logits = [] for my_loader in my_loaders: temp_logits = [] for batch in tqdm(my_loader): temp_logits_inner = [] for exp_models in my_models: logit = np.stack( [predict_func(batch, m, device)for m in exp_models], axis=0 ) temp_logits_inner.append(logit) temp_logits.append(np.stack(temp_logits_inner, axis=0)) logits.append(np.concatenate(temp_logits, axis=2)) return np.stack(logits, axis=0 )<define_variables>
%matplotlib inline warnings.filterwarnings("ignore" )
Deepfake Detection Challenge
8,119,370
INF_BS = 32<create_dataframe>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) len(test_videos )
Deepfake Detection Challenge
8,119,370
all_test_loaders_512 = [] test_original = RANZCRDataset(**{ "df":sample_sub, "root":'test_images_512_512', "path_col": "StudyInstanceUID", "ext": ".jpeg", "transforms":T.ToTensor() }) all_test_loaders_512.append(torch.utils.data.DataLoader( test_original, batch_size=INF_BS, drop_last=False, shuffle=False, num_workers=os.cpu_count() // 2 )) test_hf = RANZCRDataset(**{ "df":sample_sub, "root":'test_images_512_512', "path_col": "StudyInstanceUID", "ext": ".jpeg", "transforms":T.ToTensor() , "augmentations": albu.HorizontalFlip(p=1.0) }) all_test_loaders_512.append(torch.utils.data.DataLoader( test_hf, batch_size=INF_BS, drop_last=False, shuffle=False, num_workers=os.cpu_count() // 2 )) test_left_rot = RANZCRDataset(**{ "df":sample_sub, "root":'test_images_512_512', "path_col": "StudyInstanceUID", "ext": ".jpeg", "transforms":T.ToTensor() , "augmentations": albu.ShiftScaleRotate( shift_limit=0, scale_limit=0, rotate_limit=(15,16), p=1.0 ) }) all_test_loaders_512.append(torch.utils.data.DataLoader( test_left_rot, batch_size=INF_BS, drop_last=False, shuffle=False, num_workers=os.cpu_count() // 2 )) test_right_rot = RANZCRDataset(**{ "df":sample_sub, "root":'test_images_512_512', "path_col": "StudyInstanceUID", "ext": ".jpeg", "transforms":T.ToTensor() , "augmentations": albu.ShiftScaleRotate( shift_limit=0, scale_limit=0, rotate_limit=(-16,-15), p=1.0 ) }) all_test_loaders_512.append(torch.utils.data.DataLoader( test_right_rot, batch_size=INF_BS, drop_last=False, shuffle=False, num_workers=os.cpu_count() // 2 ))<predict_on_test>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
Deepfake Detection Challenge
8,119,370
test_logits_512 = predict_test_with_multiple_models( models_512, all_test_loaders_512, cnn_model_predict, DEVICE ) <import_modules>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
8,119,370
from scipy.special import expit<compute_test_metric>
frames_per_video = 64 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
8,119,370
test_logits = expit(test_logits_512 ).mean(0 ).mean(1 )<prepare_x_and_y>
input_size = 150
Deepfake Detection Challenge
8,119,370
my_exp_1 = test_logits[0] my_exp_2 = test_logits[1] my_exp_3 = test_logits[2] my_exp_4 = test_logits[3]<define_variables>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
8,119,370
blend =( my_exp_1**0.5 + my_exp_2**0.5 + my_exp_3**0.5 + my_exp_3**0.5 )<feature_engineering>
model = get_model("xception", pretrained=False) model = nn.Sequential(*list(model.children())[:-1]) class Pooling(nn.Module): def __init__(self): super(Pooling, self ).__init__() self.p1 = nn.AdaptiveAvgPool2d(( 1,1)) self.p2 = nn.AdaptiveMaxPool2d(( 1,1)) def forward(self, x): x1 = self.p1(x) x2 = self.p2(x) return(x1+x2)* 0.5 model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(( 1,1))) class Head(torch.nn.Module): def __init__(self, in_f, out_f): super(Head, self ).__init__() self.f = nn.Flatten() self.l = nn.Linear(in_f, 512) self.d = nn.Dropout(0.5) self.o = nn.Linear(512, out_f) self.b1 = nn.BatchNorm1d(in_f) self.b2 = nn.BatchNorm1d(512) self.r = nn.ReLU() def forward(self, x): x = self.f(x) x = self.b1(x) x = self.d(x) x = self.l(x) x = self.r(x) x = self.b2(x) x = self.d(x) out = self.o(x) return out class FCN(torch.nn.Module): def __init__(self, base, in_f): super(FCN, self ).__init__() self.base = base self.h1 = Head(in_f, 1) def forward(self, x): x = self.base(x) return self.h1(x) net = [] model = FCN(model, 2048) model = model.cuda() model.load_state_dict(torch.load('.. /input/deepfake-xception-trained-model/model.pth')) net.append(model )
Deepfake Detection Challenge
8,119,370
sample_sub.iloc[:,1:] = blend sample_sub<count_unique_values>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.481
Deepfake Detection Challenge
8,119,370
sample_sub.nunique(axis=0 )<save_to_csv>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
8,119,370
!rm -rf test_images_512_512 sample_sub.to_csv('submission.csv', index=False) os.listdir('./' )<define_variables>
%%time model.eval() predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
8,119,370
ROOT = Path.cwd().parent INPUT = ROOT / "input" OUTPUT = ROOT / "output" DATA = INPUT / "ranzcr-clip-catheter-line-classification" TRAIN = DATA / "train" TEST = DATA / "test" TRAINED_MODEL = INPUT/ 'ranzer-models' TMP = ROOT / "tmp" TMP.mkdir(exist_ok=True) RANDAM_SEED = 1086 N_CLASSES = 11 FOLDS = [0, 1, 2, 3, 4] N_FOLD = len(FOLDS) IMAGE_SIZE =(640, 640) CONVERT_TO_RANK = False FAST_COMMIT = True CLASSES = [ 'ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present' ]<load_from_csv>
submission_df_xception = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df_xception.to_csv("submission_xception.csv", index=False )
Deepfake Detection Challenge
8,119,370
for p in DATA.iterdir() : print(p.name) train = pd.read_csv(DATA / "train.csv") smpl_sub = pd.read_csv(DATA / "sample_submission.csv" )<split>
submission_df = pd.DataFrame({"filename": test_videos} )
Deepfake Detection Challenge
8,119,370
if FAST_COMMIT and len(smpl_sub)== 3582: smpl_sub = smpl_sub.iloc[:64 * 2].reset_index(drop=True )<categorify>
submission_df["label"] = 0.70*submission_df_resnext["label"] + 0.30*submission_df_xception["label"]
Deepfake Detection Challenge
8,119,370
def multi_label_stratified_group_k_fold(label_arr: np.array, gid_arr: np.array, n_fold: int, seed: int=42): np.random.seed(seed) random.seed(seed) start_time = time.time() n_train, n_class = label_arr.shape gid_unique = sorted(set(gid_arr)) n_group = len(gid_unique) gid2aid = dict(zip(gid_unique, range(n_group))) aid_arr = np.vectorize(lambda x: gid2aid[x] )(gid_arr) cnts_by_class = label_arr.sum(axis=0) col, row = np.array(sorted(enumerate(aid_arr), key=lambda x: x[1])).T cnts_by_group = coo_matrix( (np.ones(len(label_arr)) ,(row, col)) ).dot(coo_matrix(label_arr)).toarray().astype(int) del col del row cnts_by_fold = np.zeros(( n_fold, n_class), int) groups_by_fold = [[] for fid in range(n_fold)] group_and_cnts = list(enumerate(cnts_by_group)) np.random.shuffle(group_and_cnts) print("finished preparation", time.time() - start_time) for aid, cnt_by_g in sorted(group_and_cnts, key=lambda x: -np.std(x[1])) : best_fold = None min_eval = None for fid in range(n_fold): cnts_by_fold[fid] += cnt_by_g fold_eval =(cnts_by_fold / cnts_by_class ).std(axis=0 ).mean() cnts_by_fold[fid] -= cnt_by_g if min_eval is None or fold_eval < min_eval: min_eval = fold_eval best_fold = fid cnts_by_fold[best_fold] += cnt_by_g groups_by_fold[best_fold].append(aid) print("finished assignment.", time.time() - start_time) gc.collect() idx_arr = np.arange(n_train) for fid in range(n_fold): val_groups = groups_by_fold[fid] val_indexs_bool = np.isin(aid_arr, val_groups) train_indexs = idx_arr[~val_indexs_bool] val_indexs = idx_arr[val_indexs_bool] print("[fold {}]".format(fid), end=" ") print("n_group:(train, val)=({}, {})".format(n_group - len(val_groups), len(val_groups)) , end=" ") print("n_sample:(train, val)=({}, {})".format(len(train_indexs), len(val_indexs))) yield train_indexs, val_indexs<concatenate>
submission_df.to_csv("submission.csv", index=False )
Deepfake Detection Challenge
8,522,656
label_arr = train[CLASSES].values group_id = train.PatientID.values train_val_indexs = list( multi_label_stratified_group_k_fold(label_arr, group_id, N_FOLD, RANDAM_SEED))<feature_engineering>
TEST_DIR = "/kaggle/input/deepfake-detection-challenge/test_videos/" CHECKPOINT = '/kaggle/input/kha-deepfake-dataset/checkpoint_mobilev3_alldata_1903_withfaceforensics_3epochs_.pth' CHECKPOINT2 = '/kaggle/input/kha-deepfake-dataset/cpt_mbn_sqrimg_2503)2epochs_.pth' CHECKPOINT3 = '/kaggle/input/kha-deepfake-dataset/checkpoint_b0_1803_0epochs_0.4498354019969702.pth' CHECKPOINT4 = '/kaggle/input/kha-deepfake-dataset/cpt_xception_new_sqrimg_29030_epochs_3_moment.pth' CHECKPOINT5 = '/kaggle/input/kha-deepfake-dataset/cpt_nasnet_bbd_29032_epochs_0_moment.pth' CHECKPOINT6 = '/kaggle/input/kha-deepfake-dataset/cpt_effb1_sqrimg_25032_epochs_1_moment.pth' CHECKPOINT7 = '/kaggle/input/kha-deepfake-dataset/cpt_mbn_LSTM16_300313epochs_0.21526583118569945.pth' CHECKPOINT8 = '/kaggle/input/kha-deepfake-dataset/checkpoint_b0_LSTM16_2503_batchfirst1epochs_0.25805166250713446.pth' CHECKPOINT9 = '/kaggle/input/kha-deepfake-dataset/cpt_nasnetnew_bbd_31031_epochs_0_moment.pth' CHECKPOINT10 = '/kaggle/input/kha-deepfake-dataset/cpt_xception_sqrimg_25031_epochs_0_moment.pth' CHECKPOINT11 = '/kaggle/input/kha-deepfake-dataset/cpt_resnet_bbd_31033_epochs_3_moment.pth' CONVERT_RGB = True MTCNN_BATCH = 15 GAP = 5 IMG_SIZE = 224 SCALE = 0.5 NUM_WORKERS = mp.cpu_count()
Deepfake Detection Challenge