kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
5,106,186 | count_low=0
count_mid=0
count_high=0
def filter_2cls_raw(row, low_thr=0.08, high_thr=0.95):
global count_low
global count_mid
global count_high
prob = row['target']
if prob<low_thr:
row['PredictionString'] = '14 1 0 0 1 1'
count_low+=1
elif low_thr<=prob<high_thr:
row['PredictionString']+=f' 14 {prob} 0 0 1 1'
count_mid+=1
elif high_thr<=prob:
row['PredictionString'] = row['PredictionString']
count_high+=1
else:
raise ValueError('Prediction must be from [0-1]')
return row<prepare_output> | train_df.groupby(pd.cut(train_df["Fare"], np.arange(0, 350, 25)))['Survived'].sum() / train_df.groupby(pd.cut(train_df["Fare"], np.arange(0, 350, 25)))['Survived'].count() | Titanic - Machine Learning from Disaster |
5,106,186 | sub_raw = pred_raw.apply(filter_2cls_raw, axis=1)
print(count_low/3000,count_mid/3000,count_high/3000)
sub_raw[60:63]<groupby> | train_df['CabinId'] = train_df['Cabin'].apply(lambda x: 'None' if pd.isna(x)else x[0])
train_df.groupby(['CabinId'])['Survived'].sum() / train_df.groupby(['CabinId'])['Survived'].count() | Titanic - Machine Learning from Disaster |
5,106,186 | count_low=0
count_mid=0
count_high=0
def filter_2cls(row, low_thr=0.05, high_thr=0.99):
global count_low
global count_mid
global count_high
prob = row['target']
if prob<low_thr:
row['PredictionString'] = '14 1 0 0 1 1'
count_low+=1
elif low_thr<=prob<high_thr:
row['PredictionString']+=f' 14 {prob} 0 0 1 1'
count_mid+=1
elif high_thr<=prob:
row['PredictionString']+=f' 14 {prob} 0 0 1 1'
count_high+=1
else:
raise ValueError('Prediction must be from [0-1]')
return row<prepare_output> | train_df.drop(['CabinId'], axis = 1, inplace = True ) | Titanic - Machine Learning from Disaster |
5,106,186 | sub = pred.apply(filter_2cls, axis=1)
print(count_low/3000,count_mid/3000,count_high/3000)
sub[60:63]<merge> | train_df.groupby(['Embarked'])['Survived'].sum() / train_df.groupby(['Embarked'])['Survived'].count() | Titanic - Machine Learning from Disaster |
5,106,186 | merge_sub = pd.merge(sub_raw, sub, on = 'image_id', how = 'left' )<save_to_csv> | train_df['Title'] = train_df['Name'].apply(lambda x: re.compile('.+?[,][\s] (.*?)[\.][\s].+' ).findall(x)[0])
train_df.groupby(['Title'])['Survived'].sum() / train_df.groupby(['Title'])['Survived'].count() | Titanic - Machine Learning from Disaster |
5,106,186 | merge_sub.to_csv('merge_sub.csv',index = False )<count_values> | train_df.groupby(['Title'])['Survived'].count() | Titanic - Machine Learning from Disaster |
5,106,186 | sub['PredictionString'].value_counts().iloc[[0]]<save_to_csv> | train_df.drop(['Title'], axis = 1, inplace = True ) | Titanic - Machine Learning from Disaster |
5,106,186 | sub_raw[['image_id', 'PredictionString']].to_csv('submission_raw.csv',index = False )<save_to_csv> | np.nanmean(train_df[train_df['Name'].str.contains('Master')]['Age'] ) | Titanic - Machine Learning from Disaster |
5,106,186 | sub[['image_id', 'PredictionString']].to_csv('submission.csv',index = False )<import_modules> | np.nanmean(train_df[train_df['Name'].str.contains('Miss')]['Age'] ) | Titanic - Machine Learning from Disaster |
5,106,186 | import numpy as np, pandas as pd
from glob import glob
import shutil, os
import matplotlib.pyplot as plt
from sklearn.model_selection import GroupKFold
from tqdm.notebook import tqdm
import seaborn as sns<define_variables> | merged_df['Title'] = merged_df['Name'].apply(lambda x: re.compile('.+?[,][\s] (.*?)[\.][\s].+' ).findall(x)[0] ) | Titanic - Machine Learning from Disaster |
5,106,186 | dim = 512
test_dir = f'/kaggle/input/vinbigdata-{dim}-image-dataset/vinbigdata/test'
weights_dir = '/kaggle/input/chest-x-ray-abnormality-detection-with-yolo-v3/yolov3/runs/train/exp/weights/best.pt'<load_from_csv> | boymean = np.nanmean(train_df[train_df['Name'].str.contains('Master.')]['Age'])
girlmean = np.nanmean(train_df[train_df['Name'].str.contains('Miss.')]['Age'])
meanage = np.nanmean(train_df['Age'] ) | Titanic - Machine Learning from Disaster |
5,106,186 | test_df = pd.read_csv(f'/kaggle/input/vinbigdata-{dim}-image-dataset/vinbigdata/test.csv')
test_df.head()<set_options> | merged_df['Age'] = np.where(np.isnan(merged_df['Age'])&(merged_df['Title'] == 'Master'), boymean, merged_df['Age'])
merged_df['Age'] = np.where(np.isnan(merged_df['Age'])&(merged_df['Title'] == 'Miss'), girlmean, merged_df['Age'])
merged_df['Age'] = merged_df['Age'].fillna(meanage ) | Titanic - Machine Learning from Disaster |
5,106,186 | shutil.copytree('/kaggle/input/yolov5-official-v31-dataset/yolov5', '/kaggle/working/yolov5')
os.chdir('/kaggle/working/yolov5')
clear_output()
print('Setup complete.Using torch %s %s' %(torch.__version__, torch.cuda.get_device_properties(0)if torch.cuda.is_available() else 'CPU'))<install_modules> | merged_df['Fare'] = merged_df['Fare'].fillna(np.nanmedian(merged_df['Fare']))
merged_df['Fare'] = merged_df['Fare'].apply(lambda x: 1 if x > 75.0 else 0 ) | Titanic - Machine Learning from Disaster |
5,106,186 | !python detect.py --weights $weights_dir\
--img 512\
--conf 0.15\
--iou 0.4\
--source $test_dir\
--save-txt --save-conf --exist-ok<categorify> | merged_df['Cabin'] = merged_df['Cabin'].apply(lambda x: 0 if pd.isna(x)else 1 ) | Titanic - Machine Learning from Disaster |
5,106,186 | def yolo2voc(image_height, image_width, bboxes):
bboxes = bboxes.copy().astype(float)
bboxes[..., [0, 2]] = bboxes[..., [0, 2]]* image_width
bboxes[..., [1, 3]] = bboxes[..., [1, 3]]* image_height
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] - bboxes[..., [2, 3]]/2
bboxes[..., [2, 3]] = bboxes[..., [0, 1]] + bboxes[..., [2, 3]]
return bboxes<define_variables> | merged_df['Embarked'] = merged_df['Embarked'].fillna('N' ) | Titanic - Machine Learning from Disaster |
5,106,186 | image_ids = []
PredictionStrings = []
for file_path in tqdm(glob('runs/detect/exp/labels/*txt')) :
image_id = file_path.split('/')[-1].split('.')[0]
w, h = test_df.loc[test_df.image_id==image_id,['width', 'height']].values[0]
f = open(file_path, 'r')
data = np.array(f.read().replace('
', ' ' ).strip().split(' ')).astype(np.float32 ).reshape(-1, 6)
data = data[:, [0, 5, 1, 2, 3, 4]]
bboxes = list(np.round(np.concatenate(( data[:, :2], np.round(yolo2voc(h, w, data[:, 2:]))), axis =1 ).reshape(-1), 1 ).astype(str))
for idx in range(len(bboxes)) :
bboxes[idx] = str(int(float(bboxes[idx])))if idx%6!=1 else bboxes[idx]
image_ids.append(image_id)
PredictionStrings.append(' '.join(bboxes))<save_to_csv> | merged_df['Family'] = merged_df['Parch'] + merged_df['SibSp'] | Titanic - Machine Learning from Disaster |
5,106,186 | pred_df = pd.DataFrame({'image_id':image_ids,
'PredictionString':PredictionStrings})
sub_df = pd.merge(test_df, pred_df, on = 'image_id', how = 'left' ).fillna("14 1 0 0 1 1")
sub_df = sub_df[['image_id', 'PredictionString']]
sub_df.to_csv('/kaggle/working/submission.csv',index = False)
sub_df.tail()<install_modules> | merged_df.drop(['Name', 'Ticket', 'SibSp', 'Parch'], axis = 1, inplace = True ) | Titanic - Machine Learning from Disaster |
5,106,186 | shutil.rmtree('/kaggle/working/yolov5' )<set_options> | train_df.groupby(pd.cut(train_df["Age"], np.arange(0, 100, 20)))['Survived'].sum() / train_df.groupby(pd.cut(train_df["Age"], np.arange(0, 100, 20)))['Survived'].count() | Titanic - Machine Learning from Disaster |
5,106,186 | py.init_notebook_mode(connected=True)
pio.templates.default = "plotly_dark"
pd.set_option('max_columns', 50)
<set_options> | maxAge = train_df['Age'].max()
minAge = train_df['Age'].min()
merged_df['Age'] =(merged_df['Age'] - minAge)/(maxAge - minAge ) | Titanic - Machine Learning from Disaster |
5,106,186 | !nvidia-smi<install_modules> | dummiesPclass = pd.get_dummies(merged_df['Pclass'], prefix = 'Pclass')
merged_df = pd.concat([merged_df, dummiesPclass], axis=1)
merged_df.head() | Titanic - Machine Learning from Disaster |
5,106,186 | !nvcc --version<import_modules> | dummiesFare = pd.get_dummies(merged_df['Fare'], prefix = 'Fare')
merged_df = pd.concat([merged_df, dummiesFare], axis=1)
merged_df.head() | Titanic - Machine Learning from Disaster |
5,106,186 | torch.__version__<install_modules> | merged_df.groupby(['Title'])['PassengerId'].count() | Titanic - Machine Learning from Disaster |
5,106,186 | !pip install detectron2 -f \
https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.7/index.html<define_variables> | merged_df['Title'] = merged_df['Title'].apply(lambda x: 'Miss' if(x in ['Mlle', 'Mme', 'Ms'])else x)
merged_df['Title'] = merged_df['Title'].apply(lambda x: 'Mrs' if(x in ['Dona', 'Lady'])else x)
merged_df['Title'] = merged_df['Title'].apply(lambda x: 'Mr' if(x == 'Rev')else x)
merged_df['Title'] = merged_df['Title'].apply(lambda x: x if(x in ['Master', 'Mr', 'Mrs', 'Miss'])else 'Other' ) | Titanic - Machine Learning from Disaster |
5,106,186 | def get_vinbigdata_dicts(
imgdir: Path,
train_df: pd.DataFrame,
train_data_type: str = "original",
use_cache: bool = True,
debug: bool = True,
target_indices: Optional[np.ndarray] = None,
use_class14: bool = False,
):
debug_str = f"_debug{int(debug)}"
train_data_type_str = f"_{train_data_type}"
class14_str = f"_14class{int(use_class14)}"
cache_path = Path(".")/ f"dataset_dicts_cache{train_data_type_str}{class14_str}{debug_str}.pkl"
if not use_cache or not cache_path.exists() :
print("Creating data...")
train_meta = pd.read_csv(imgdir / "train_meta.csv")
if debug:
train_meta = train_meta.iloc[:500]
image_id = train_meta.loc[0, "image_id"]
image_path = str(imgdir / "train" / f"{image_id}.png")
image = cv2.imread(image_path)
resized_height, resized_width, ch = image.shape
print(f"image shape: {image.shape}")
dataset_dicts = []
for index, train_meta_row in tqdm(train_meta.iterrows() , total=len(train_meta)) :
record = {}
image_id, height, width = train_meta_row.values
filename = str(imgdir / "train" / f"{image_id}.png")
record["file_name"] = filename
record["image_id"] = image_id
record["height"] = resized_height
record["width"] = resized_width
objs = []
for index2, row in train_df.query("image_id == @image_id" ).iterrows() :
class_id = row["class_id"]
if class_id == 14:
if use_class14:
bbox_resized = [0, 0, resized_width, resized_height]
obj = {
"bbox": bbox_resized,
"bbox_mode": BoxMode.XYXY_ABS,
"category_id": class_id,
}
objs.append(obj)
else:
pass
else:
h_ratio = resized_height / height
w_ratio = resized_width / width
bbox_resized = [
float(row["x_min"])* w_ratio,
float(row["y_min"])* h_ratio,
float(row["x_max"])* w_ratio,
float(row["y_max"])* h_ratio,
]
obj = {
"bbox": bbox_resized,
"bbox_mode": BoxMode.XYXY_ABS,
"category_id": class_id,
}
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
with open(cache_path, mode="wb")as f:
pickle.dump(dataset_dicts, f)
print(f"Load from cache {cache_path}")
with open(cache_path, mode="rb")as f:
dataset_dicts = pickle.load(f)
if target_indices is not None:
dataset_dicts = [dataset_dicts[i] for i in target_indices]
return dataset_dicts
def get_vinbigdata_dicts_test(
imgdir: Path, test_meta: pd.DataFrame, use_cache: bool = True, debug: bool = True,
):
debug_str = f"_debug{int(debug)}"
cache_path = Path(".")/ f"dataset_dicts_cache_test{debug_str}.pkl"
if not use_cache or not cache_path.exists() :
print("Creating data...")
if debug:
test_meta = test_meta.iloc[:500]
image_id = test_meta.loc[0, "image_id"]
image_path = str(imgdir / "test" / f"{image_id}.png")
image = cv2.imread(image_path)
resized_height, resized_width, ch = image.shape
print(f"image shape: {image.shape}")
dataset_dicts = []
for index, test_meta_row in tqdm(test_meta.iterrows() , total=len(test_meta)) :
record = {}
image_id, height, width = test_meta_row.values
filename = str(imgdir / "test" / f"{image_id}.png")
record["file_name"] = filename
record["image_id"] = image_id
record["height"] = resized_height
record["width"] = resized_width
dataset_dicts.append(record)
with open(cache_path, mode="wb")as f:
pickle.dump(dataset_dicts, f)
print(f"Load from cache {cache_path}")
with open(cache_path, mode="rb")as f:
dataset_dicts = pickle.load(f)
return dataset_dicts<categorify> | merged_df.groupby(['Title'])['PassengerId'].count() | Titanic - Machine Learning from Disaster |
5,106,186 | def format_pred(labels: ndarray, boxes: ndarray, scores: ndarray)-> str:
pred_strings = []
for label, score, bbox in zip(labels, scores, boxes):
xmin, ymin, xmax, ymax = bbox.astype(np.int64)
pred_strings.append(f"{label} {score} {xmin} {ymin} {xmax} {ymax}")
return " ".join(pred_strings)
def predict_batch(predictor: DefaultPredictor, im_list: List[ndarray])-> List:
with torch.no_grad() :
inputs_list = []
for original_image in im_list:
if predictor.input_format == "RGB":
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = original_image
image = torch.as_tensor(image.astype("float32" ).transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
inputs_list.append(inputs)
predictions = predictor.model(inputs_list)
return predictions<load_pretrained> | le = LabelEncoder()
merged_df['Title'] = le.fit_transform(merged_df['Title'] ) | Titanic - Machine Learning from Disaster |
5,106,186 | def save_yaml(filepath: Union[str, Path], content: Any, width: int = 120):
with open(filepath, "w")as f:
yaml.dump(content, f, width=width)
def load_yaml(filepath: Union[str, Path])-> Any:
with open(filepath, "r")as f:
content = yaml.full_load(f)
return content
<define_variables> | dummiesTitle = pd.get_dummies(merged_df['Title'], prefix = 'Title')
merged_df = pd.concat([merged_df, dummiesTitle], axis=1)
merged_df.head() | Titanic - Machine Learning from Disaster |
5,106,186 | thing_classes = [
"Aortic enlargement",
"Atelectasis",
"Calcification",
"Cardiomegaly",
"Consolidation",
"ILD",
"Infiltration",
"Lung Opacity",
"Nodule/Mass",
"Other lesion",
"Pleural effusion",
"Pleural thickening",
"Pneumothorax",
"Pulmonary fibrosis"
]
category_name_to_id = {class_name: index for index, class_name in enumerate(thing_classes)}
<define_variables> | merged_df['Sex'] = le.fit_transform(merged_df['Sex'])
dummiesSex = pd.get_dummies(merged_df['Sex'], prefix = 'Sex')
merged_df = pd.concat([merged_df, dummiesSex], axis=1)
merged_df.head() | Titanic - Machine Learning from Disaster |
5,106,186 | @dataclass
class Flags:
debug: bool = True
outdir: str = "results/det"
imgdir_name: str = "vinbigdata-chest-xray-resized-png-256x256"
split_mode: str = "all_train"
seed: int = 111
train_data_type: str = "original"
use_class14: bool = False
iter: int = 10000
ims_per_batch: int = 2
num_workers: int = 4
lr_scheduler_name: str = "WarmupMultiStepLR"
base_lr: float = 0.00025
roi_batch_size_per_image: int = 512
eval_period: int = 10000
aug_kwargs: Dict = field(default_factory=lambda: {})
def update(self, param_dict: Dict)-> "Flags":
for key, value in param_dict.items() :
if not hasattr(self, key):
raise ValueError(f"[ERROR] Unexpected key for flag = {key}")
setattr(self, key, value)
return self<load_from_csv> | dummiesCabin = pd.get_dummies(merged_df['Cabin'], prefix = 'Cabin')
merged_df = pd.concat([merged_df, dummiesCabin], axis=1)
merged_df.head() | Titanic - Machine Learning from Disaster |
5,106,186 | inputdir = Path("/kaggle/input")
traineddir = inputdir / "vinbigdata-alb-aug-512-cos"
flags: Flags = Flags().update(load_yaml(str(traineddir/"flags.yaml")))
print("flags", flags)
debug = flags.debug
outdir = Path(flags.outdir)
os.makedirs(str(outdir), exist_ok=True)
datadir = inputdir / "vinbigdata-chest-xray-abnormalities-detection"
if flags.imgdir_name == "vinbigdata-chest-xray-resized-png-512x512":
imgdir = inputdir/ "vinbigdata"
else:
imgdir = inputdir / flags.imgdir_name
test_meta = pd.read_csv(inputdir / "vinbigdata-testmeta" / "test_meta.csv")
sample_submission = pd.read_csv(datadir / "sample_submission.csv" )<load_pretrained> | merged_df['Embarked'] = le.fit_transform(merged_df['Embarked'])
dummiesEmbarked = pd.get_dummies(merged_df['Embarked'], prefix = 'Embarked')
merged_df = pd.concat([merged_df, dummiesEmbarked], axis=1)
merged_df.head() | Titanic - Machine Learning from Disaster |
5,106,186 | cfg = get_cfg()
original_output_dir = cfg.OUTPUT_DIR
cfg.OUTPUT_DIR = str(outdir)
print(f"cfg.OUTPUT_DIR {original_output_dir} -> {cfg.OUTPUT_DIR}")
cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"))
cfg.DATASETS.TRAIN =("vinbigdata_train",)
cfg.DATASETS.TEST =()
cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml")
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = flags.base_lr
cfg.SOLVER.MAX_ITER = flags.iter
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = flags.roi_batch_size_per_image
cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(thing_classes)
cfg.MODEL.WEIGHTS = str(traineddir/"model_final.pth")
print("Original thresh", cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST)
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.0
print("Changed thresh", cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST)
predictor = DefaultPredictor(cfg)
DatasetCatalog.register(
"vinbigdata_test", lambda: get_vinbigdata_dicts_test(imgdir, test_meta, debug=debug)
)
MetadataCatalog.get("vinbigdata_test" ).set(thing_classes=thing_classes)
metadata = MetadataCatalog.get("vinbigdata_test")
dataset_dicts = get_vinbigdata_dicts_test(imgdir, test_meta, debug=debug)
if debug:
dataset_dicts = dataset_dicts[:100]
results_list = []
index = 0
batch_size = 4
for i in tqdm(range(ceil(len(dataset_dicts)/ batch_size))):
inds = list(range(batch_size * i, min(batch_size *(i + 1), len(dataset_dicts))))
dataset_dicts_batch = [dataset_dicts[i] for i in inds]
im_list = [cv2.imread(d["file_name"])for d in dataset_dicts_batch]
outputs_list = predict_batch(predictor, im_list)
for im, outputs, d in zip(im_list, outputs_list, dataset_dicts_batch):
resized_height, resized_width, ch = im.shape
if index < 5:
v = Visualizer(
im[:, :, ::-1],
metadata=metadata,
scale=0.5,
instance_mode=ColorMode.IMAGE_BW
)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2.imwrite(str(outdir / f"pred_{index}.jpg"), out.get_image() [:, :, ::-1])
image_id, dim0, dim1 = test_meta.iloc[index].values
instances = outputs["instances"]
if len(instances)== 0:
result = {"image_id": image_id, "PredictionString": "14 1.0 0 0 1 1"}
else:
fields: Dict[str, Any] = instances.get_fields()
pred_classes = fields["pred_classes"]
pred_scores = fields["scores"]
pred_boxes = fields["pred_boxes"].tensor
h_ratio = dim0 / resized_height
w_ratio = dim1 / resized_width
pred_boxes[:, [0, 2]] *= w_ratio
pred_boxes[:, [1, 3]] *= h_ratio
pred_classes_array = pred_classes.cpu().numpy()
pred_boxes_array = pred_boxes.cpu().numpy()
pred_scores_array = pred_scores.cpu().numpy()
result = {
"image_id": image_id,
"PredictionString": format_pred(
pred_classes_array, pred_boxes_array, pred_scores_array
),
}
results_list.append(result)
index += 1<save_to_csv> | merged_df['Family'] = merged_df['Family'].apply(lambda x: 'N' if x == 0 else('S' if x < 4 else 'L')) | Titanic - Machine Learning from Disaster |
5,106,186 | submission_det = pd.DataFrame(results_list, columns=['image_id', 'PredictionString'])
submission_det.to_csv(outdir/"submission.csv", index=False)
submission_det<import_modules> | merged_df['Family'] = le.fit_transform(merged_df['Family'])
dummiesFamily = pd.get_dummies(merged_df['Family'], prefix = 'Family')
merged_df = pd.concat([merged_df, dummiesFamily], axis=1)
merged_df.head() | Titanic - Machine Learning from Disaster |
5,106,186 | import pandas as pd
import numpy as np
from glob import glob
import shutil<define_variables> | merged_df.drop(['Pclass', 'Sex', 'Fare', 'Cabin', 'Embarked', 'Title', 'Family'], axis = 1, inplace = True ) | Titanic - Machine Learning from Disaster |
5,106,186 | low_thr = 0.08
high_thr = 0.95<load_from_csv> | train_df_x = merged_df[:891]
test_df_x = merged_df[891:] | Titanic - Machine Learning from Disaster |
5,106,186 | pred_14cls = pd.read_csv('.. /input/vinbigdata-14-class-submission-lb0154/submission.csv')
pred_2cls = pd.read_csv('.. /input/vinbigdata-2class-prediction/2-cls test pred.csv' )<merge> | train_df_y = train_df['Survived'] | Titanic - Machine Learning from Disaster |
5,106,186 | pred = pd.merge(pred_14cls, pred_2cls, on = 'image_id', how = 'left')
pred.head()<count_values> | train_df = train_df_x.copy()
train_df['Survived'] = train_df_y
train_df.drop(['PassengerId'], axis = 1, inplace = True)
train_df.head() | Titanic - Machine Learning from Disaster |
5,106,186 | pred['PredictionString'].value_counts().iloc[[0]]<feature_engineering> | def train_and_test(model_specific_tasks, df, it = 20):
accsum = 0
minacc = 1.0
maxacc = 0
for i in range(it):
print('Iteration: ',(i + 1), end = '\r')
train, test = train_test_split(df, test_size=0.2)
train_x = train.drop(['Survived'], axis=1)
test_x = test.drop(['Survived'], axis=1)
train_y = train['Survived']
test_y = test['Survived']
train_x = np.asarray(train_x ).astype('float32')
train_y = np.asarray(train_y ).astype('float32')
acc = model_specific_tasks(train_x, train_y, test_x, test_y)
accsum += acc
minacc = acc if acc < minacc else minacc
maxacc = acc if acc > maxacc else maxacc
print('Avg.accuracy: ',(accsum / it))
print('Min.accuracy: ', minacc)
print('Max.accuracy: ', maxacc ) | Titanic - Machine Learning from Disaster |
5,106,186 | def filter_2cls(row, low_thr=low_thr, high_thr=high_thr):
prob = row['target']
if prob<low_thr:
row['PredictionString'] = '14 1 0 0 1 1'
elif low_thr<=prob<high_thr:
row['PredictionString']+=f' 14 {prob} 0 0 1 1'
elif high_thr<=prob:
row['PredictionString'] = row['PredictionString']
else:
raise ValueError('Prediction must be from [0-1]')
return row<prepare_output> | def logistic_reg(train_x, train_y, test_x, test_y):
model = LogisticRegression(solver='sag', max_iter=1000)
model.fit(train_x, train_y)
return model.score(test_x, test_y ) | Titanic - Machine Learning from Disaster |
5,106,186 | sub = pred.apply(filter_2cls, axis=1)
sub.head()<count_values> | train_and_test(logistic_reg, train_df.copy() , it = 50 ) | Titanic - Machine Learning from Disaster |
5,106,186 | sub['PredictionString'].value_counts().iloc[[0]]<save_to_csv> | def rfc(train_x, train_y, test_x, test_y):
model = RandomForestClassifier(n_estimators=100)
model.fit(train_x, train_y)
return model.score(test_x, test_y ) | Titanic - Machine Learning from Disaster |
5,106,186 | sub[['image_id', 'PredictionString']].to_csv('submission.csv',index = False )<set_options> | train_and_test(rfc, train_df.copy() ) | Titanic - Machine Learning from Disaster |
5,106,186 | warnings.filterwarnings("ignore")
DIR_INPUT = '/kaggle/input/vinbigdata-chest-xray-abnormalities-detection'
DIR_TRAIN = f'{DIR_INPUT}/train'
DIR_TEST = f'{DIR_INPUT}/test'
DIR_WEIGHTS = '.. /input/chest-x-ray-abnormalities-detection'
WEIGHTS_FILE = f'{DIR_WEIGHTS}/model_state.pth'<load_from_csv> | def nn(train_x, train_y, test_x, test_y):
model = Sequential()
model.add(layers.Dense(32, activation='relu', input_shape =(22,)))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(8, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(train_x, train_y, epochs=150, batch_size=16, verbose = 0)
return model.evaluate(test_x, test_y, verbose = 0)[1] | Titanic - Machine Learning from Disaster |
5,106,186 | <categorify><EOS> | train_and_test(nn, train_df.copy() , it = 10 ) | Titanic - Machine Learning from Disaster |
4,456,876 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_pretrained> | %matplotlib inline
py.init_notebook_mode(connected=True)
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
PassengerId = test['PassengerId']
train.head(3)
| Titanic - Machine Learning from Disaster |
4,456,876 | model = torchvision.models.detection.fasterrcnn_resnet50_fpn(
pretrained=False,
pretrained_backbone=False,
)<find_best_params> | original_train = train.copy()
full_data = [train, test]
train['Has_Cabin'] = train["Cabin"].apply(lambda x: 0 if type(x)== float else 1)
test['Has_Cabin'] = test["Cabin"].apply(lambda x: 0 if type(x)== float else 1)
for dataset in full_data:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
for dataset in full_data:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
for dataset in full_data:
dataset['Embarked'] = dataset['Embarked'].fillna('S')
for dataset in full_data:
dataset['Fare'] = dataset['Fare'].fillna(train['Fare'].median())
for dataset in full_data:
age_avg = dataset['Age'].mean()
age_std = dataset['Age'].std()
age_null_count = dataset['Age'].isnull().sum()
age_null_random_list = np.random.randint(age_avg - age_std, age_avg + age_std, size=age_null_count)
dataset.loc[np.isnan(dataset['Age']), 'Age'] = age_null_random_list
dataset['Age'] = dataset['Age'].astype(int)
| Titanic - Machine Learning from Disaster |
4,456,876 | device = torch.device('cuda')if torch.cuda.is_available() else torch.device('cpu')
num_classes = 15
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
model.load_state_dict(torch.load(WEIGHTS_FILE, map_location=device))
model.eval()
x = model.to(device )<create_dataframe> | def get_title(name):
title_search = re.search('([A-Za-z]+)\.', name)
if title_search:
return title_search.group(1)
return ""
for dataset in full_data:
dataset['Title'] = dataset['Name'].apply(get_title)
for dataset in full_data:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
for dataset in full_data:
dataset['Sex'] = dataset['Sex'].map({'female': 0, 'male': 1} ).astype(int)
title_mapping = {"Mr": 1, "Master": 2, "Mrs": 3, "Miss": 4, "Rare": 5}
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int)
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91)&(dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454)&(dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age'] ;
| Titanic - Machine Learning from Disaster |
4,456,876 | def collate_fn(batch):
return tuple(zip(*batch))
test_dataset = VinBigTestDataset(test_df, DIR_TEST, get_test_transform())
test_data_loader = DataLoader(
test_dataset,
batch_size=6,
shuffle=False,
num_workers=4,
drop_last=False,
collate_fn=collate_fn
)<define_variables> | drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp']
train = train.drop(drop_elements, axis = 1)
test = test.drop(drop_elements, axis = 1 ) | Titanic - Machine Learning from Disaster |
4,456,876 | def format_prediction_string(labels, boxes, scores):
pred_strings = []
for j in zip(labels, scores, boxes):
pred_strings.append("{0} {1:.4f} {2} {3} {4} {5}".format(
j[0], j[1], j[2][0], j[2][1], j[2][2], j[2][3]))
return " ".join(pred_strings )<categorify> | train[['Title', 'Survived']].groupby(['Title'], as_index=False ).agg(['mean', 'count', 'sum'])
| Titanic - Machine Learning from Disaster |
4,456,876 | detection_threshold = 0.2
results = []
with torch.no_grad() :
for images, image_ids in test_data_loader:
images = list(image.to(device)for image in images)
outputs = model(images)
for i, image in enumerate(images):
image_id = image_ids[i]
result = {
'image_id': image_id,
'PredictionString': '14 1.0 0 0 1 1'
}
boxes = outputs[i]['boxes'].data.cpu().numpy()
labels = outputs[i]['labels'].data.cpu().numpy()
scores = outputs[i]['scores'].data.cpu().numpy()
if len(boxes)> 0:
labels = labels - 1
labels[labels == -1] = 14
selected = scores >= detection_threshold
boxes = boxes[selected].astype(np.int32)
scores = scores[selected]
labels = labels[selected]
if len(boxes)> 0:
result = {
'image_id': image_id,
'PredictionString': format_prediction_string(labels, boxes, scores)
}
results.append(result)
<prepare_output> | train[['Sex', 'Survived']].groupby(['Sex'], as_index=False ).agg(['mean', 'count', 'sum'])
| Titanic - Machine Learning from Disaster |
4,456,876 | test_df = pd.DataFrame(results, columns=['image_id', 'PredictionString'])
test_df.head()<save_to_csv> | title_and_sex = original_train.copy() [['Name', 'Sex']]
title_and_sex['Title'] = title_and_sex['Name'].apply(get_title)
title_and_sex['Sex'] = title_and_sex['Sex'].map({'female': 0, 'male': 1} ).astype(int)
title_and_sex[['Title', 'Sex']].groupby(['Title'], as_index=False ).agg(['mean', 'count', 'sum'])
| Titanic - Machine Learning from Disaster |
4,456,876 | test_df.to_csv('submission.csv', index=False )<install_modules> | def get_gini_impurity(survived_count, total_count):
survival_prob = survived_count/total_count
not_survival_prob =(1- survival_prob)
random_observation_survived_prob = survival_prob
random_observation_not_survived_prob =(1 - random_observation_survived_prob)
mislabelling_survided_prob = not_survival_prob * random_observation_survived_prob
mislabelling_not_survided_prob = survival_prob * random_observation_not_survived_prob
gini_impurity = mislabelling_survided_prob + mislabelling_not_survided_prob
return gini_impurity | Titanic - Machine Learning from Disaster |
4,456,876 | !pip install timm==0.1.26<set_options> | gini_impurity_starting_node = get_gini_impurity(342,891)
gini_impurity_starting_node | Titanic - Machine Learning from Disaster |
4,456,876 | sys.path.insert(0, ".. /input/efficientdet-torch")
sys.path.insert(0, ".. /input/omegaconf")
sys.path.insert(0, ".. /input/weightedboxesfusion")
warnings.filterwarnings('ignore' )<categorify> | gini_impurity_men = get_gini_impurity(109, 577)
print(gini_impurity_men)
gini_impurity_women = get_gini_impurity(233, 314)
print(gini_impurity_women ) | Titanic - Machine Learning from Disaster |
4,456,876 | def get_valid_transforms() :
return A.Compose([
A.Resize(height=1024, width=1024, p=1.0),
ToTensorV2(p=1.0),
], p=1.0 )<normalization> | gini_impurity_title_Mr = get_gini_impurity(81, 517)
print(gini_impurity_title_Mr)
gini_impurity_title_others = get_gini_impurity(261, 374)
print(gini_impurity_title_others ) | Titanic - Machine Learning from Disaster |
4,456,876 | image_dir = ".. /input/vinbigdata-chest-xray-abnormalities-detection/test"
class DatasetRetriever(Dataset):
def __init__(self, image_ids, transforms=None):
super().__init__()
self.image_ids = image_ids
self.transforms = transforms
def __getitem__(self, index: int):
image_id = self.image_ids[index]
dicom = pydicom.dcmread(f"{image_dir}/{image_id}.dicom")
image = dicom.pixel_array
if "PhotometricInterpretation" in dicom:
if dicom.PhotometricInterpretation == "MONOCHROME1":
image = np.amax(image)- image
image = np.stack([image, image, image])
image = image.astype('float32')
image = image - image.min()
image = image / image.max()
image = image.transpose(1, 2, 0)
if self.transforms:
sample = {'image': image}
sample = self.transforms(**sample)
image = sample['image']
return image, image_id
def __len__(self)-> int:
return self.image_ids.shape[0]<define_variables> | men_weight = 577/891
women_weight = 314/891
weighted_gini_impurity_sex_split =(gini_impurity_men * men_weight)+(gini_impurity_women * women_weight)
print(weighted_gini_impurity_sex_split)
title_1_weight = 517/891
title_others_weight = 374/891
weighted_gini_impurity_title_split =(gini_impurity_title_Mr * title_1_weight)+(gini_impurity_title_others * title_others_weight)
print(weighted_gini_impurity_title_split ) | Titanic - Machine Learning from Disaster |
4,456,876 | image_ids = glob(os.path.join('.. /input/vinbigdata-chest-xray-abnormalities-detection/test', "*.dicom"))
image_ids = [image_id.split('/')[-1].split('.')[0] for image_id in image_ids]
dataset = DatasetRetriever(image_ids = np.array(image_ids), transforms = get_valid_transforms())
def collate_fn(batch):
return tuple(zip(*batch))
data_loader = DataLoader(dataset, batch_size=8, shuffle=False, num_workers=2, drop_last=False, collate_fn=collate_fn )<choose_model_class> | sex_gini_decrease = weighted_gini_impurity_sex_split - gini_impurity_starting_node
print(sex_gini_decrease)
title_gini_decrease = weighted_gini_impurity_title_split - gini_impurity_starting_node
print(title_gini_decrease)
| Titanic - Machine Learning from Disaster |
4,456,876 | device = torch.device('cuda:0')if torch.cuda.is_available() else torch.device('cpu')
def load_net(checkpoint_path):
config = get_efficientdet_config('tf_efficientdet_d4')
net = EfficientDet(config, pretrained_backbone=False)
config.num_classes = 14
config.image_size = 1024
net.class_net = HeadNet(config, num_outputs=config.num_classes, norm_kwargs=dict(eps=.001, momentum=.01))
checkpoint = torch.load(checkpoint_path)
net.load_state_dict(checkpoint['model_state_dict'])
net = DetBenchEval(net, config)
net.eval()
return net.to(device)
net = load_net('.. /input/effdet-weights/last-checkpoint.bin' )<predict_on_test> | cv = KFold(n_splits = 10)
accuracies = list()
max_attributes = len(list(test))
depth_range = range(1, max_attributes + 1)
for depth in depth_range:
fold_accuracy = []
tree_model = tree.DecisionTreeClassifier(max_depth = depth)
for train_fold, valid_fold in cv.split(train):
f_train = train.loc[train_fold]
f_valid = train.loc[valid_fold]
model = tree_model.fit(X = f_train.drop(['Survived'], axis=1), y = f_train["Survived"])
valid_acc = model.score(X = f_valid.drop(['Survived'], axis=1), y = f_valid["Survived"])
fold_accuracy.append(valid_acc)
avg = sum(fold_accuracy)/len(fold_accuracy)
accuracies.append(avg)
df = pd.DataFrame({"Max Depth": depth_range, "Average Accuracy": accuracies})
df = df[["Max Depth", "Average Accuracy"]]
print(df.to_string(index=False)) | Titanic - Machine Learning from Disaster |
4,456,876 | def make_predictions(images, score_threshold=0.22):
images = torch.stack(images ).cuda().float()
predictions = []
with torch.no_grad() :
det = net(images, image_scales=torch.tensor([1] * images.shape[0] ).float().cuda())
for i in range(images.shape[0]):
boxes = det[i].detach().cpu().numpy() [:, :4]
scores = det[i].detach().cpu().numpy() [:, 4]
labels = det[i].detach().cpu().numpy() [:, 5]
indexes = np.where(scores > score_threshold)[0]
boxes = boxes[indexes]
boxes[:, 2] = boxes[:, 2] + boxes[:, 0]
boxes[:, 3] = boxes[:, 3] + boxes[:, 1]
predictions.append({
'boxes': boxes[indexes],
'scores': scores[indexes],
'labels': labels[indexes]
})
return [predictions]
def run_wbf(predictions, image_index, image_size=1024, iou_thr=0.5, skip_box_thr=0.4):
boxes = [(prediction[image_index]['boxes'] /(image_size - 1)).tolist() for prediction in predictions]
scores = [prediction[image_index]['scores'].tolist() for prediction in predictions]
labels = [prediction[image_index]['labels'].tolist() for prediction in predictions]
boxes, scores, labels = weighted_boxes_fusion(boxes, scores, labels, weights=None, iou_thr=iou_thr,
skip_box_thr=skip_box_thr)
boxes = boxes *(image_size - 1)
return boxes, scores, labels<define_search_space> | y_train = train['Survived']
x_train = train.drop(['Survived'], axis=1 ).values
x_test = test.values
decision_tree = tree.DecisionTreeClassifier(max_depth = 3)
decision_tree.fit(x_train, y_train)
y_pred = decision_tree.predict(x_test)
submission = pd.DataFrame({"PassengerId": PassengerId,"Survived": y_pred})
submission.to_csv('submission.csv', index=False)
print(submission.head(4)) | Titanic - Machine Learning from Disaster |
4,456,876 | mapping = {0: 'Aortic enlargement', 1: 'Atelectasis', 2: 'Calcification', 3: 'Cardiomegaly', 4: 'Consolidation', 5: 'ILD',6: 'Infiltration', 7: 'Lung Opacity',
8: 'Nodule/Mass', 9: 'Other lesion', 10: 'Pleural effusion', 11: 'Pleural thickening',12: 'Pneumothorax', 13: 'Pulmonary fibrosis'}
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
thickness = 3
color =(1, 0, 0)
for j,(images, image_ids)in enumerate(data_loader):
if j >= 20:
break
predictions = make_predictions(images=images, score_threshold=0.5)
i = 1
sample = images[i].permute(1, 2, 0 ).cpu().numpy()
boxes, scores, labels = run_wbf(predictions, image_index=i)
if len(labels)> 0:
boxes = boxes.astype(np.int32 ).clip(min=0, max=1023)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
for score, box, label in zip(scores, boxes, labels):
cv2.rectangle(sample,(box[0], box[1]),(box[2], box[3]),(0, 1, 1), thickness)
cv2.putText(sample, mapping[label] + "---" + str(round(score, 2)) ,(box[0], box[1]),
font, fontScale, color, thickness, cv2.LINE_AA)
ax.set_axis_off()
ax.imshow(sample)
plt.show()<categorify> | with open("tree1.dot", 'w')as f:
f = tree.export_graphviz(decision_tree,
out_file=f,
max_depth = 3,
impurity = True,
feature_names = list(train.drop(['Survived'], axis=1)) ,
class_names = ['Died', 'Survived'],
rounded = True,
filled= True)
check_call(['dot','-Tpng','tree1.dot','-o','tree1.png'])
img = Image.open("tree1.png")
draw = ImageDraw.Draw(img)
font = ImageFont.truetype('/usr/share/fonts/truetype/liberation/LiberationSerif-Bold.ttf', 26)
draw.text(( 10, 0),
'"Title <= 1.5" corresponds to "Mr." title',
(0,0,255),
font=font)
img.save('sample-out.png')
PImage("sample-out.png" ) | Titanic - Machine Learning from Disaster |
4,456,876 | <predict_on_test><EOS> | acc_decision_tree = round(decision_tree.score(x_train, y_train)* 100, 2)
acc_decision_tree | Titanic - Machine Learning from Disaster |
3,974,929 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<save_to_csv> | %matplotlib inline
| Titanic - Machine Learning from Disaster |
3,974,929 | test_df = pd.DataFrame(results, columns=['image_id', 'PredictionString'])
test_df.to_csv('submission.csv', index=False )<load_from_csv> | train_df = pd.read_csv(".. /input/train.csv")
test_df = pd.read_csv(".. /input/test.csv")
combine = [train_df,test_df]
print(combine ) | Titanic - Machine Learning from Disaster |
3,974,929 | items = pd.read_csv('.. /input/competitive-data-science-predict-future-sales/items.csv')
shops = pd.read_csv('.. /input/competitive-data-science-predict-future-sales/shops.csv')
cats = pd.read_csv('.. /input/competitive-data-science-predict-future-sales/item_categories.csv')
train = pd.read_csv('.. /input/competitive-data-science-predict-future-sales/sales_train.csv')
test = pd.read_csv('.. /input/competitive-data-science-predict-future-sales/test.csv')
<feature_engineering> | train_df[['Pclass','Survived']].groupby(['Pclass'],as_index=False ).mean().sort_values(by='Survived',ascending=False ) | Titanic - Machine Learning from Disaster |
3,974,929 | train = train[train.item_price<100000]
train = train[train.item_cnt_day<1001]
train["item_cnt_day"]=train["item_cnt_day"].fillna(0)
train = train[train.item_cnt_day>0]
median = train[(train.shop_id==32)&(train.item_id==2973)&(train.date_block_num==4)&(train.item_price>0)].item_price.median()
train.loc[train.item_price<0, 'item_price'] = median<count_missing_values> | train_df[['Sex','Survived']].groupby(['Sex'],as_index=False ).mean().sort_values(by='Survived',ascending=False ) | Titanic - Machine Learning from Disaster |
3,974,929 | def check_nulls(df_train):
for d in df_train.columns:
if df_train[d].isnull().values.any() :
print("column "+d)
if df_train[d].dtype.kind in 'bifc':
df_train[d].fillna(0,inplace = True)
else:
print("column "+d)
df_train[d].fillna("NULL_VALUE", inplace = True )<categorify> | train_df[['SibSp','Survived']].groupby(['SibSp'],as_index=False ).mean().sort_values(by='Survived',ascending=False ) | Titanic - Machine Learning from Disaster |
3,974,929 | shops['city'] = shops['shop_name'].str.split(' ' ).map(lambda x: x[0])
shops['city_code'] = LabelEncoder().fit_transform(shops['city'])
cats['split'] = cats['item_category_name'].str.split('-')
cats['type'] = cats['split'].map(lambda x: x[0].strip())
cats['type_code'] = LabelEncoder().fit_transform(cats['type'])
cats['subtype'] = cats['split'].map(lambda x: x[1].strip() if len(x)> 1 else x[0].strip())
cats['subtype_code'] = LabelEncoder().fit_transform(cats['subtype'])
cats = cats[['item_category_id','type_code', 'subtype_code']]<groupby> | train_df[['Parch','Survived']].groupby(['Parch'],as_index=False ).mean().sort_values(by='Survived',ascending=False ) | Titanic - Machine Learning from Disaster |
3,974,929 | check_count = shops.groupby(["shop_id"])['shop_id'].size()
print(check_count[check_count > 1])
check_count = cats.groupby(["item_category_id"])['item_category_id'].size()
print(check_count[check_count > 1])
<groupby> | for dataset in combine:
dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\.', expand=False)
pd.crosstab(train_df['Title'], train_df['Sex'] ) | Titanic - Machine Learning from Disaster |
3,974,929 | check_count = train.groupby(["date","date_block_num","shop_id","item_id"])['shop_id'].size()
print(check_count[check_count > 1])
<filter> | for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col',\
'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
train_df[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean() | Titanic - Machine Learning from Disaster |
3,974,929 | train.query("date_block_num==16 and shop_id==50 and item_id==3423" )<merge> | title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train_df.head() | Titanic - Machine Learning from Disaster |
3,974,929 | test["date_block_num"]=34
test = pd.merge(test, shops, on=['shop_id'], how='left',suffixes=('', '_x'))
test = pd.merge(test, items, on=['item_id'], how='left',suffixes=('', '_y'))
test = pd.merge(test, cats, on=['item_category_id'], how='left')
test.columns<groupby> | train_df = train_df.drop(['Name', 'PassengerId'], axis=1)
test_df = test_df.drop(['Name'], axis=1)
combine = [train_df, test_df]
train_df.shape, test_df.shape | Titanic - Machine Learning from Disaster |
3,974,929 | check_count = test.groupby(["shop_id","item_id"])['shop_id'].size()
print(check_count[check_count > 1])
<create_dataframe> | for dataset in combine:
dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0} ).astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
3,974,929 | train_2 = pd.DataFrame(np.vstack(train_2))
<rename_columns> | guess_ages = np.zeros(( 2,3))
guess_ages | Titanic - Machine Learning from Disaster |
3,974,929 | train_2.rename(columns={0: "date_block_num", 1: "shop_id", 2: "item_id"} , inplace = True)
train_2<merge> | for dataset in combine:
for i in range(0, 2):
for j in range(0, 3):
guess_df = dataset[(dataset['Sex'] == i)& \
(dataset['Pclass'] == j+1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i,j] = int(age_guess/0.5 + 0.5)* 0.5
for i in range(0, 2):
for j in range(0, 3):
dataset.loc[(dataset.Age.isnull())&(dataset.Sex == i)&(dataset.Pclass == j+1),\
'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
3,974,929 | cols=['date_block_num','shop_id','item_id']
group = train.groupby(['date_block_num','shop_id','item_id'] ).agg({'item_cnt_day': ['sum']})
group.columns = ['item_cnt_month']
group.reset_index(inplace=True)
train_2 = pd.merge(train_2, group, on=cols, how='left')
train_2['item_cnt_month'] =(train_2['item_cnt_month']
.fillna(0)
.clip(0,20)
.astype(np.float32))<merge> | train_df['AgeBand'] = pd.cut(train_df['Age'], 5)
train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False ).mean().sort_values(by='AgeBand', ascending=True ) | Titanic - Machine Learning from Disaster |
3,974,929 | train_2 = pd.merge(train_2, shops, on=['shop_id'], how='left',suffixes=('', '_x'))
train_2 = pd.merge(train_2, items, on=['item_id'], how='left',suffixes=('', '_y'))
train_2 = pd.merge(train_2, cats, on=['item_category_id'], how='left')
<groupby> | for dataset in combine:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age']
train_df.head() | Titanic - Machine Learning from Disaster |
3,974,929 | check_count = train_2.groupby(["date_block_num","shop_id","item_id"])['shop_id'].size()
print(check_count[check_count > 1])
<concatenate> | train_df = train_df.drop(['AgeBand'], axis=1)
combine = [train_df, test_df]
train_df.head() | Titanic - Machine Learning from Disaster |
3,974,929 | cols = ['shop_id','item_id']
train_2 = pd.concat([train_2, test], ignore_index=True, sort=False, keys=cols )<groupby> | for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
3,974,929 | def sum_df(df,columns,target):
x= []
name=target+"_sum"
data = columns.split(",")
for col in data:
x.append(col)
print(x)
return df.groupby(x)[target].sum().to_frame(name ).reset_index()
def mean_df(df,columns,target):
x= []
name=target+"_mean"
data = columns.split(",")
for col in data:
x.append(col)
print(x)
return df.groupby(x)[target].mean().to_frame(name ).reset_index()
<merge> | for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False ).mean() | Titanic - Machine Learning from Disaster |
3,974,929 | def lag_feature(df, lags, col):
tmp = df[['date_block_num','shop_id','item_id',col]]
for i in lags:
shifted = tmp.copy()
shifted.columns = ['date_block_num','shop_id','item_id', col+'_lag_'+str(i)]
shifted['date_block_num'] += i
df = pd.merge(df, shifted, on=['date_block_num','shop_id','item_id'], how='left')
return df<merge> | train_df = train_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
test_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
combine = [train_df, test_df]
train_df.head() | Titanic - Machine Learning from Disaster |
3,974,929 | group = train_2.groupby(['date_block_num'] ).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_avg_item_cnt' ]
group.reset_index(inplace=True)
train_2 = pd.merge(train_2, group, on=['date_block_num'], how='left')
train_2['date_avg_item_cnt'] = train_2['date_avg_item_cnt'].astype(np.float16)
train_2 = lag_feature(train_2, [1], 'date_avg_item_cnt')
train_2.drop(['date_avg_item_cnt'], axis=1, inplace=True)
group = train_2.groupby(['date_block_num', 'item_id'] ).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_item_avg_item_cnt' ]
group.reset_index(inplace=True)
train_2 = pd.merge(train_2, group, on=['date_block_num','item_id'], how='left')
train_2['date_item_avg_item_cnt'] = train_2['date_item_avg_item_cnt'].astype(np.float16)
train_2 = lag_feature(train_2, [1,2,3,6,12], 'date_item_avg_item_cnt')
train_2.drop(['date_item_avg_item_cnt'], axis=1, inplace=True)
group = train_2.groupby(['date_block_num', 'shop_id'] ).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_shop_avg_item_cnt' ]
group.reset_index(inplace=True)
train_2 = pd.merge(train_2, group, on=['date_block_num','shop_id'], how='left')
train_2['date_shop_avg_item_cnt'] = train_2['date_shop_avg_item_cnt'].astype(np.float16)
train_2 = lag_feature(train_2, [1,2,3,6,12], 'date_shop_avg_item_cnt')
train_2.drop(['date_shop_avg_item_cnt'], axis=1, inplace=True)
group = train_2.groupby(['date_block_num', 'item_category_id'] ).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_cat_avg_item_cnt' ]
group.reset_index(inplace=True)
train_2 = pd.merge(train_2, group, on=['date_block_num','item_category_id'], how='left')
train_2['date_cat_avg_item_cnt'] = train_2['date_cat_avg_item_cnt'].astype(np.float16)
train_2 = lag_feature(train_2, [1], 'date_cat_avg_item_cnt')
train_2.drop(['date_cat_avg_item_cnt'], axis=1, inplace=True)
<merge> | for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head(10 ) | Titanic - Machine Learning from Disaster |
3,974,929 | group = train_2.groupby(['date_block_num', 'city_code'] ).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_city_avg_item_cnt' ]
group.reset_index(inplace=True)
train_2 = pd.merge(train_2, group, on=['date_block_num', 'city_code'], how='left')
train_2['date_city_avg_item_cnt'] = train_2['date_city_avg_item_cnt'].astype(np.float16)
train_2 = lag_feature(train_2, [1], 'date_city_avg_item_cnt')
train_2.drop(['date_city_avg_item_cnt'], axis=1, inplace=True)
group = train_2.groupby(['date_block_num', 'item_id', 'city_code'] ).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_item_city_avg_item_cnt' ]
group.reset_index(inplace=True)
train_2 = pd.merge(train_2, group, on=['date_block_num', 'item_id', 'city_code'], how='left')
train_2['date_item_city_avg_item_cnt'] = train_2['date_item_city_avg_item_cnt'].astype(np.float16)
train_2 = lag_feature(train_2, [1], 'date_item_city_avg_item_cnt')
train_2.drop(['date_item_city_avg_item_cnt'], axis=1, inplace=True)
group = train_2.groupby(['date_block_num', 'type_code'] ).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_type_avg_item_cnt' ]
group.reset_index(inplace=True)
train_2 = pd.merge(train_2, group, on=['date_block_num', 'type_code'], how='left')
train_2['date_type_avg_item_cnt'] = train_2['date_type_avg_item_cnt'].astype(np.float16)
train_2 = lag_feature(train_2, [1], 'date_type_avg_item_cnt')
train_2.drop(['date_type_avg_item_cnt'], axis=1, inplace=True)
group = train_2.groupby(['date_block_num', 'subtype_code'] ).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_subtype_avg_item_cnt' ]
group.reset_index(inplace=True)
train_2 = pd.merge(train_2, group, on=['date_block_num', 'subtype_code'], how='left')
train_2['date_subtype_avg_item_cnt'] = train_2['date_subtype_avg_item_cnt'].astype(np.float16)
train_2 = lag_feature(train_2, [1], 'date_subtype_avg_item_cnt')
train_2.drop(['date_subtype_avg_item_cnt'], axis=1, inplace=True )<merge> | freq_port = train_df.Embarked.dropna().mode() [0]
freq_port | Titanic - Machine Learning from Disaster |
3,974,929 | group = train_2.groupby(['date_block_num', 'shop_id', 'item_category_id'] ).agg({'item_cnt_month': ['mean']})
group.columns = ['date_shop_cat_avg_item_cnt']
group.reset_index(inplace=True)
train_2 = pd.merge(train_2, group, on=['date_block_num', 'shop_id', 'item_category_id'], how='left')
train_2['date_shop_cat_avg_item_cnt'] = train_2['date_shop_cat_avg_item_cnt'].astype(np.float16)
train_2 = lag_feature(train_2, [1], 'date_shop_cat_avg_item_cnt')
train_2.drop(['date_shop_cat_avg_item_cnt'], axis=1, inplace=True )<merge> | for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
3,974,929 | group = train_2.groupby(['date_block_num', 'shop_id', 'item_id'] ).agg({'item_cnt_month': ['median']})
group.columns = ['date_shop_item_med_item_cnt']
group.reset_index(inplace=True)
train_2 = pd.merge(train_2, group, on=['date_block_num', 'shop_id', 'item_id'], how='left')
train_2['date_shop_item_med_item_cnt'] = train_2['date_shop_item_med_item_cnt'].astype(np.float16)
train_2 = lag_feature(train_2, [1,2,3,6,12], 'date_shop_item_med_item_cnt')
train_2.drop(['date_shop_item_med_item_cnt'], axis=1, inplace=True)
<merge> | for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
3,974,929 | group = train_2.groupby(['date_block_num', 'shop_id'] ).agg({'item_cnt_month': ['median']})
group.columns = ['date_shop_med_item_cnt']
group.reset_index(inplace=True)
train_2 = pd.merge(train_2, group, on=['date_block_num', 'shop_id'], how='left')
train_2['date_shop_med_item_cnt'] = train_2['date_shop_med_item_cnt'].astype(np.float16)
train_2 = lag_feature(train_2, [1], 'date_shop_med_item_cnt')
train_2.drop(['date_shop_med_item_cnt'], axis=1, inplace=True )<merge> | test_df['Fare'].fillna(test_df['Fare'].dropna().median() , inplace=True)
test_df.head() | Titanic - Machine Learning from Disaster |
3,974,929 | group = train_2.groupby(['date_block_num', 'shop_id','item_category_id'] ).agg({'item_cnt_month': ['median']})
group.columns = ['date_shop_cat_med_item_cnt']
group.reset_index(inplace=True)
train_2 = pd.merge(train_2, group, on=['date_block_num', 'shop_id','item_category_id'], how='left')
train_2['date_shop_cat_med_item_cnt'] = train_2['date_shop_cat_med_item_cnt'].astype(np.float16)
train_2 = lag_feature(train_2, [1], 'date_shop_cat_med_item_cnt')
train_2.drop(['date_shop_cat_med_item_cnt'], axis=1, inplace=True )<drop_column> | train_df['FareBand'] = pd.qcut(train_df['Fare'], 4)
train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False ).mean().sort_values(by='FareBand', ascending=True ) | Titanic - Machine Learning from Disaster |
3,974,929 | train_2.columns
<data_type_conversions> | for dataset in combine:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91)&(dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454)&(dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df]
train_df.head(10 ) | Titanic - Machine Learning from Disaster |
3,974,929 | group = train_2.groupby(['date_block_num', 'shop_id','item_id'])['item_cnt_month'].quantile (.25)
group.columns = ['date_shop_item_q25_item_cnt']
dfx=pd.DataFrame()
dfx["date_shop_item_q25_item_cnt"]=group
print(train_2.columns)
train_2 = pd.merge(train_2, dfx, on=['date_block_num', 'shop_id','item_id'], how='left')
train_2['date_shop_item_q25_item_cnt'] = train_2['date_shop_item_q25_item_cnt'].astype(np.float16)
train_2 = lag_feature(train_2, [1], 'date_shop_item_q25_item_cnt')
train_2.drop(['date_shop_item_q25_item_cnt'], axis=1, inplace=True )<data_type_conversions> | X_train = train_df.drop("Survived", axis=1)
Y_train = train_df["Survived"]
X_test = test_df.drop("PassengerId", axis=1 ).copy()
X_train.shape, Y_train.shape, X_test.shape | Titanic - Machine Learning from Disaster |
3,974,929 | group = train_2.groupby(['date_block_num', 'shop_id','item_id'])['item_cnt_month'].quantile (.75)
group.columns = ['date_shop_item_q75_item_cnt']
dfx=pd.DataFrame()
dfx["date_shop_item_q75_item_cnt"]=group
print(train_2.columns)
train_2 = pd.merge(train_2, dfx, on=['date_block_num', 'shop_id','item_id'], how='left')
train_2['date_shop_item_q75_item_cnt'] = train_2['date_shop_item_q75_item_cnt'].astype(np.float16)
train_2 = lag_feature(train_2, [1], 'date_shop_item_q75_item_cnt')
train_2.drop(['date_shop_item_q75_item_cnt'], axis=1, inplace=True )<feature_engineering> | logreg = LogisticRegression()
logreg.fit(X_train, Y_train)
Y_pred1 = logreg.predict(X_test)
acc_log = round(logreg.score(X_train, Y_train)* 100, 2)
acc_log | Titanic - Machine Learning from Disaster |
3,974,929 | train_2['month'] = train_2['date_block_num'] % 12<correct_missing_values> | svc = SVC()
svc.fit(X_train, Y_train)
Y_pred2 = svc.predict(X_test)
acc_svc = round(svc.score(X_train, Y_train)* 100, 2)
acc_svc | Titanic - Machine Learning from Disaster |
3,974,929 | def fill_na(df):
for col in df.columns:
if('_lag_' in col)&(df[col].isnull().any()):
if('item_cnt' in col):
df[col].fillna(0, inplace=True)
return df
train_2 = fill_na(train_2 )<drop_column> | knn = KNeighborsClassifier(n_neighbors = 3)
knn.fit(X_train, Y_train)
Y_pred3 = knn.predict(X_test)
acc_knn = round(knn.score(X_train, Y_train)* 100, 2)
acc_knn | Titanic - Machine Learning from Disaster |
3,974,929 | final=train_2[[
'date_block_num',
'shop_id',
'item_id',
'item_cnt_month',
'city_code',
'item_category_id',
'type_code',
'subtype_code',
'item_cnt_month_lag_1', 'item_cnt_month_lag_2', 'item_cnt_month_lag_3', 'item_cnt_month_lag_6', 'date_item_avg_item_cnt_lag_1',
'date_item_avg_item_cnt_lag_2', 'date_item_avg_item_cnt_lag_3', 'date_item_avg_item_cnt_lag_6', 'date_cat_avg_item_cnt_lag_1',
'date_item_city_avg_item_cnt_lag_1', 'date_subtype_avg_item_cnt_lag_1',
'date_shop_cat_avg_item_cnt_lag_1', 'date_shop_item_med_item_cnt_lag_1',
'date_shop_item_med_item_cnt_lag_2', 'date_shop_item_med_item_cnt_lag_3',
'date_shop_item_med_item_cnt_lag_6', 'date_shop_cat_med_item_cnt_lag_1',
'date_shop_item_q25_item_cnt_lag_1', 'date_shop_item_q75_item_cnt_lag_1'
]]
<train_on_grid> | gaussian = GaussianNB()
gaussian.fit(X_train, Y_train)
Y_pred4 = gaussian.predict(X_test)
acc_gaussian = round(gaussian.score(X_train, Y_train)* 100, 2)
acc_gaussian | Titanic - Machine Learning from Disaster |
3,974,929 | def One_way_ANOVA(df):
lister12 = []
for col in df.columns:
if df[col].dtype.kind not in 'bifc':
model = ols('item_cnt_month ~'+col,data=df ).fit()
table = sm.stats.anova_lm(model, typ=2)
if table["PR(>F)"][0] < 0.05:
lister12.append(col)
return lister12<data_type_conversions> | perceptron = Perceptron()
perceptron.fit(X_train, Y_train)
Y_pred5 = perceptron.predict(X_test)
acc_perceptron = round(perceptron.score(X_train, Y_train)* 100, 2)
acc_perceptron | Titanic - Machine Learning from Disaster |
3,974,929 | train_check=train_2[(train_2.date_block_num<=5)]
train_check['item_category_id_char']=train_check['item_category_id'].apply(str)
train_check['type_code_char']=train_check['type_code'].apply(str)
train_check['subtype_code_char']=train_check['subtype_code'].apply(str )<compute_test_metric> | linear_svc = LinearSVC()
linear_svc.fit(X_train, Y_train)
Y_pred6 = linear_svc.predict(X_test)
acc_linear_svc = round(linear_svc.score(X_train, Y_train)* 100, 2)
acc_linear_svc | Titanic - Machine Learning from Disaster |
3,974,929 | One_way_ANOVA(train_check[["city","shop_name","item_category_id_char","subtype_code_char","type_code_char","item_cnt_month"]] )<prepare_x_and_y> | sgd = SGDClassifier()
sgd.fit(X_train, Y_train)
Y_pred7 = sgd.predict(X_test)
acc_sgd = round(sgd.score(X_train, Y_train)* 100, 2)
acc_sgd | Titanic - Machine Learning from Disaster |
3,974,929 | final.rename(columns = {'item_cnt_month':'item_monthly_sum_x'}, inplace = True)
final["item_monthly_sum_x"] = final["item_monthly_sum_x"]
X_train = final[final.date_block_num < 33].drop(['item_monthly_sum_x'], axis=1)
Y_train = final[final.date_block_num < 33]['item_monthly_sum_x']
X_valid = final[final.date_block_num == 33].drop(['item_monthly_sum_x'], axis=1)
Y_valid = final[final.date_block_num == 33]['item_monthly_sum_x']
X_test = final[final.date_block_num == 34].drop(['item_monthly_sum_x'], axis=1 )<save_to_csv> | decision_tree = DecisionTreeClassifier()
decision_tree.fit(X_train, Y_train)
Y_pred8 = decision_tree.predict(X_test)
acc_decision_tree = round(decision_tree.score(X_train, Y_train)* 100, 2)
acc_decision_tree | Titanic - Machine Learning from Disaster |
3,974,929 | Y_test = model.predict(X_test)
submission = pd.DataFrame({
"ID": test.ID,
"item_cnt_month": Y_test.clip(0, 20)
})
submission.to_csv('xgb_submission.csv', index=False)
<load_from_csv> | random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, Y_train)
Y_pred9 = random_forest.predict(X_test)
random_forest.score(X_train, Y_train)
acc_random_forest = round(random_forest.score(X_train, Y_train)* 100, 2)
acc_random_forest | Titanic - Machine Learning from Disaster |
3,974,929 | PATH = 'competitive-data-science-predict-future-sales'
items = pd.read_csv('.. /input/' + PATH + '/items.csv')
shops = pd.read_csv('.. /input/' + PATH + '/shops.csv')
cats = pd.read_csv('.. /input/' + PATH + '/item_categories.csv')
train = pd.read_csv('.. /input/' + PATH + '/sales_train.csv')
test = pd.read_csv('.. /input/' + PATH + '/test.csv' ).set_index('ID')
print('------------- train info ------------'); print(train.info() , '
')
print('------------- test info ------------'); print(test.info() , '
')
print('------------- items info ------------'); print(items.info() , '
')
print('------------- shops info ------------'); print(shops.info() , '
')
print('------------- categories info ------------'); print(cats.info() , '
' )<count_missing_values> | models = pd.DataFrame({
'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression',
'Random Forest', 'Naive Bayes', 'Perceptron',
'Stochastic Gradient Decent', 'Linear SVC',
'Decision Tree'],
'Score': [acc_svc, acc_knn, acc_log,
acc_random_forest, acc_gaussian, acc_perceptron,
acc_sgd, acc_linear_svc, acc_decision_tree]})
models.sort_values(by='Score', ascending=False ) | Titanic - Machine Learning from Disaster |
3,974,929 | print(train.isna().sum() , '
')
print(test.isna().sum() )<feature_engineering> | submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred9
})
submission.to_csv('submission_titanic.csv', index=False ) | Titanic - Machine Learning from Disaster |
11,353,301 | train = train[train.item_price > 0].reset_index(drop=True)
train[train.item_cnt_day <= 0].item_cnt_day.unique()
train.loc[train.item_cnt_day < 1, 'item_cnt_day'] = 0<feature_engineering> | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
| Titanic - Machine Learning from Disaster |
11,353,301 | train.loc[train.shop_id == 0, 'shop_id'] = 57
test.loc[test.shop_id == 0, 'shop_id'] = 57
train.loc[train.shop_id == 1, 'shop_id'] = 58
test.loc[test.shop_id == 1, 'shop_id'] = 58
train.loc[train.shop_id == 11, 'shop_id'] = 10
test.loc[test.shop_id == 11, 'shop_id'] = 10
train.loc[train.shop_id == 40, 'shop_id'] = 39
test.loc[test.shop_id == 40, 'shop_id'] = 39<feature_engineering> | train = pd.read_csv('.. /input/titanic/train.csv')
test = pd.read_csv('.. /input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
11,353,301 | shops.loc[shops.shop_name == 'Сергиев Посад ТЦ "7Я"', 'shop_name'] = 'СергиевПосад ТЦ "7Я"'
shops['city'] = shops['shop_name'].str.split(' ' ).map(lambda x: x[0])
shops['category'] = shops['shop_name'].str.split(' ' ).map(lambda x:x[1] ).astype(str)
shops.loc[shops.city == '!Якутск', 'city'] = 'Якутск'
category = ['Орджоникидзе,', 'ТЦ', 'ТРК', 'ТРЦ','ул.', 'Магазин', 'ТК', 'склад']
shops.category = shops.category.apply(lambda x: x if(x in category)else 'etc')
shops.category.unique()<groupby> | pd.pivot_table(train, index = 'Pclass', values = ['Survived'] ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.