prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
from typing import Dict
import pandas as pd
import datetime as dt
from src.typeDefs.stateConfig import IStateConfig
from src.typeDefs.stateslinesMeasRecord import IGenLineDataRecord
from typing import List
def getGenLinesDailyData(statesConfigSheet: List[IStateConfig], targetFilePath: str) -> List[List]:
allGenLinesRecords = []
genLineDailyRecords: List[IGenLineDataRecord] = []
for eachRow in statesConfigSheet:
sheetName = eachRow['sheet_gen_data']
# check if sheetname is not nan
if pd.isna(sheetName):
continue
if sheetName == 'IR Regionwise Sch Act':
dataSheeetDf = pd.read_excel(targetFilePath, sheet_name=sheetName, skiprows=0, header=[0,1])
dataSheeetDf.columns = ['_'.join(x) for x in dataSheeetDf.columns]
dataSheeetDf = pd.melt(dataSheeetDf, id_vars=['Date_Date'])
dataSheeetDf = dataSheeetDf.rename(columns={
'variable': 'generator_tag', 'value': 'data_val',
'Date_Date': 'data_time'})
dataSheeetDf['entity_tag'] = eachRow['name']
dataSheeetDf['data_val'].fillna(0, inplace=True)
else:
# getSheetData(sheet)
dataSheeetDf = pd.read_excel(targetFilePath, sheet_name=sheetName, skiprows=0)
dataSheeetDf = | pd.melt(dataSheeetDf, id_vars=['Date']) | pandas.melt |
import pandas as pd
def get_raw_data(fname, cols_to_read, limit_rows=True, nrows=100):
path = f"./1.desired_subset_of_raw_data/{fname}.csv"
if limit_rows:
df = pd.read_csv(path, nrows=nrows, index_col="ACCOUNT_NUM", usecols=cols_to_read)
else:
df = | pd.read_csv(path, index_col="ACCOUNT_NUM", usecols=cols_to_read) | pandas.read_csv |
# %% 说明
# ------------------------------------------------------------------->>>>>>>>>>
# 最后更新ID name的时候用这个脚本,从师兄的list汇总完成替换
# os.chdir("/Users/zhaohuanan/NutstoreFiles/MyNutstore/Scientific_research/2021_DdCBE_topic/Manuscript/20220311_My_tables")
# ------------------------------------------------------------------->>>>>>>>>>
# %% imports and settings
from pandarallel import pandarallel
import datar.all as r
from datar import f
import plotnine as p9
import os
import numpy as np
import pandas as pd
import seaborn as sns
import glob
sns.set()
pd.set_option("max_colwidth", 100) # column最大宽度
pd.set_option("display.width", 250) # dataframe宽度
pd.set_option("display.max_columns", None) # column最大显示数
pd.set_option("display.max_rows", 50) # row最大显示数
pandarallel.initialize() # 多线程设置,默认使用全部核心 nb_workers=24
# %% os.chdir
os.chdir(
"/Users/zhaohuanan/NutstoreFiles/MyNutstore/Scientific_research/2021_DdCBE_topic/TargetSeq/20220305_TargetSeq_bmat_alldone_add_v2"
)
os.listdir()
# %% load table all target
# load target-seq
df = pd.read_csv("table/20220305_DdCBE-target-seq-info.csv.gz")
df.drop("bmat_name", axis=1, inplace=True)
# # 查看treatment
print(df.treatment.value_counts())
# # 看一下na情况
print(df.info())
print(df.isna().sum())
# %%load base index
# 每个off-target region中指定base的TargetSeq ratio
df_idx = pd.read_excel(
"./table/20220305_DdCBE_only_one_mut_index_info.xlsx", sheet_name="Sheet1"
)
# print(df_idx)
# df_idx.info()
df_idx = df_idx[["region_id", "ref_base", "relative_pos"]].copy()
df_idx
# %% filter
# %%% filter_删除某些点
df = df[~(df.region_id == "ND516-share-1")].copy() # ND516-share-1 没做
df = df[~(df.region_id == "share-new-3")].copy() # share-new-3 扔掉了的点
df = df[~(df.region_id == "ND6-new-only17")].copy() # ND6-new-only17 扔掉了的点
# 查看所有位点
sorted(df.region_id.value_counts().index.tolist())
# %%% filter_删除某些replication
ls_treatment = [
# 脱靶验证系列1 【最终画barplot用】
'ND4-Det-Q5U',
'ND4-LIPO2000-Q5U',
'ND5-1-Det-Q5U',
'ND5-1-LIPO2000-Q5U',
'ND6-Det-Q5U',
'ND6-LIPO2000-Q5U',
'ND6-LTX-12-Q5U',
'untreat-Q5U'
]
df = df[df.treatment.map(lambda x: x in ls_treatment)].copy()
sorted(list(set(df.treatment.values)))
# %% fix
# 发现rep3有一个 ND5.1-new-only24,实际上是补实验替代rep1的
df[df.rep == 'rep3'].region_id.value_counts()
# ND5.1-new-only24 1140
# Name: region_id, dtype: int64
df.loc[df.rep == 'rep3', 'rep'] = 'rep1'
# %% merge all-target table and index table
df_one_base = df_idx.merge(df, how="left", on=["region_id", "ref_base", "relative_pos"])
# check df_one_base
# 135个点,没错
print(df_one_base.region_id.value_counts().index.__len__())
# %% calculating
# %%% 计算mut_ratio * 100
df_one_base["mut_ratio"] = df_one_base.mut_count / df_one_base.total_count * 100
df_one_base
# %%% filter C2T or G2A
df_one_base_filter_base = df_one_base[
((df_one_base.ref_base == "C") & (df_one_base.mut_base == "T"))
| (df_one_base.ref_base == "G") & (df_one_base.mut_base == "A")
].copy()
# %%% filter 用cutoff3(off) cutoff0(on)
def filter_cutoff(x):
if (x["cutoff"] == 3) and ("on-target" not in x["region_id"]):
return True
elif (x["cutoff"] == 0) and ("on-target" in x["region_id"]):
return True
else:
return False
df_one_base_filter_base = df_one_base_filter_base[
df_one_base_filter_base.apply(filter_cutoff, axis=1)
].copy()
df_use = df_one_base_filter_base.copy()
# check df_one_base
# 133个点,没错, 因为on-target不止count了一次
print(df_use.region_id.value_counts().index.__len__())
# %%% 计算mean
df_use1 = (
df_use
>> r.group_by(f.region_id, f.treatment)
>> r.summarise(mut_ratio_mean=np.mean(f.mut_ratio))
)
df_use2 = (
df_use
>> r.select(f.region_id, f.treatment, f.rep, f.mut_ratio)
>> r.left_join(df_use1)
)
df_plot = df_use2.copy()
df_plot
# %% plot
# 配色 https://blog.csdn.net/black000shirt/article/details/113724245
def off_barplot(df):
fig = (
p9.ggplot()
+ p9.geom_bar(
data=df,
mapping=p9.aes(x="region_id", y="mut_ratio_mean", fill="treatment"),
stat=p9.stats.stat_identity,
position=p9.positions.position_dodge(
width=0.9, preserve="total" # Bar width
),
color="black",
size=0.1,
raster=False,
)
+ p9.geom_point(
data=df,
mapping=p9.aes(
x="region_id",
y="mut_ratio",
# fill="treatment",
group="treatment",
),
# color="black",
position=p9.positions.position_dodge(
width=0.9, preserve="total" # Bar width
),
size=0.2,
)
+ p9.scale_x_discrete(name="Off-target sites")
+ p9.scale_y_continuous(
name="Editing ratio by Targeted deep sequencing (%)",
# breaks=np.arange(0, df.mut_ratio.max(), round(df.mut_ratio.max() / 10, 1)),
labels=lambda L: ["%.1f" % v for v in L],
)
+ p9.coord_flip()
+ p9.theme_classic()
# + p9.scale_fill_brewer(type="qualitative", palette="Set2") # 画share old的时候注释掉这一行不然颜色不够用
+ p9.ggtitle("Targeted deep sequencing ratio")
)
return fig
def off_barplot_log10(df):
# fix log10
df["mut_ratio"] = np.log10(df.mut_ratio) + 5
df["mut_ratio"] = df.mut_ratio.map(lambda x: x if x > 0 else 0)
df["mut_ratio_mean"] = np.log10(df.mut_ratio_mean) + 5
df["mut_ratio_mean"] = df.mut_ratio_mean.map(lambda x: x if x > 0 else 0)
# plot
fig = (
p9.ggplot()
+ p9.geom_bar(
data=df,
mapping=p9.aes(x="region_id", y="mut_ratio_mean", fill="treatment"),
stat=p9.stats.stat_identity,
position=p9.positions.position_dodge(
width=0.9, preserve="total" # Bar width
),
color="black",
size=0.1,
raster=False,
)
+ p9.geom_point(
data=df,
mapping=p9.aes(
x="region_id",
y="mut_ratio",
# fill="treatment",
group="treatment",
),
# color="black",
position=p9.positions.position_dodge(
width=0.9, preserve="total" # Bar width
),
size=0.2,
)
+ p9.scale_x_discrete(name="Off-target sites")
+ p9.scale_y_continuous(
# fix log10
limits=np.log10([0.00001, 100]) + 5,
breaks=np.log10([0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100]) + 5,
labels=[0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100],
name="log10(Editing ratio by Targeted deep sequencing (%))",
)
+ p9.coord_flip()
+ p9.theme_classic()
# + p9.scale_fill_brewer(type="qualitative", palette="Set2") # 画share old的时候注释掉这一行不然颜色不够用
+ p9.ggtitle("Targeted deep sequencing ratio")
)
return fig
def off_jitterplot(df):
fig = (
p9.ggplot(
data=df,
mapping=p9.aes(x="treatment", y="mut_ratio", fill="treatment"),
)
+ p9.geom_jitter(
**{
"stat": "identity",
"na_rm": False,
"width": 0.1,
"height": 0,
"random_state": None,
}
)
+ p9.geom_boxplot(alpha=0.2)
+ p9.scale_y_continuous(breaks=np.arange(0, df.mut_ratio.max(), 0.5))
+ p9.theme_classic()
)
return fig
# %%% final list
df_mut_ratio = df_use2[['region_id', 'treatment', 'rep', 'mut_ratio']].copy()
df_mut_ratio_mean = df_use1.copy()
ls_nd4 = ([f'ND4-only-{i}' for i in range(1, 11)]
+ [f'ND4-new-only{i}' for i in range(1, 14)]
+ [f'ND516-share-{i}' for i in range(2, 14)]
+ [f'share-new-{i}' for i in [1, 2, 4, 5, 11, 12, 13, 14, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 31]])
ls_nd5_1 = ([f'ND5.1-only-{i}' for i in range(1, 11)]
+ [f'ND5.1-new-only{i}' for i in range(1, 26)]
+ [f'ND516-share-{i}' for i in range(2, 14)]
+ [f'share-new-{i}' for i in [1, ] + list(range(5, 27)) + [28, 29, 30, 31, 32]])
ls_nd6 = ([f'ND6-only-{i}' for i in range(1, 14)]
+ [f'ND6-new-only{i}' for i in range(1, 17)]
+ [f'ND516-share-{i}' for i in range(2, 14)]
+ [f'share-new-{i}' for i in [5, 8, ] + list(range(11, 33))])
# %% 处理、点的名字归类
df_plot['belongto'] = None
flt4 = df_plot.apply(lambda x: (x['region_id'] in ls_nd4) & (x['treatment'] in ['ND4-Det-Q5U', 'ND4-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt5_1 = df_plot.apply(lambda x: (x['region_id'] in ls_nd5_1) & (x['treatment'] in ['ND5-1-Det-Q5U', 'ND5-1-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt6 = df_plot.apply(lambda x: (x['region_id'] in ls_nd6) & (x['treatment'] in ['ND6-Det-Q5U', 'ND6-LIPO2000-Q5U', 'ND6-LTX-12-Q5U', 'untreat-Q5U']), axis=1)
df_plot.loc[flt4, 'belongto'] = 'ND4'
df_plot.loc[flt5_1, 'belongto'] = 'ND5.1'
df_plot.loc[flt6, 'belongto'] = 'ND6'
df_mut_ratio['belongto'] = None
flt4 = df_mut_ratio.apply(lambda x: (x['region_id'] in ls_nd4) & (x['treatment'] in ['ND4-Det-Q5U', 'ND4-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt5_1 = df_mut_ratio.apply(lambda x: (x['region_id'] in ls_nd5_1) & (x['treatment'] in ['ND5-1-Det-Q5U', 'ND5-1-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt6 = df_mut_ratio.apply(lambda x: (x['region_id'] in ls_nd6) & (x['treatment'] in ['ND6-Det-Q5U', 'ND6-LIPO2000-Q5U', 'ND6-LTX-12-Q5U', 'untreat-Q5U']), axis=1)
df_mut_ratio.loc[flt4, 'belongto'] = 'ND4'
df_mut_ratio.loc[flt5_1, 'belongto'] = 'ND5.1'
df_mut_ratio.loc[flt6, 'belongto'] = 'ND6'
flt4 = df_mut_ratio.apply(lambda x: (x['region_id'] in ls_nd4) & (x['treatment'] in ['ND4-Det-Q5U', 'ND4-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt5_1 = df_mut_ratio.apply(lambda x: (x['region_id'] in ls_nd5_1) & (x['treatment'] in ['ND5-1-Det-Q5U', 'ND5-1-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt6 = df_mut_ratio.apply(lambda x: (x['region_id'] in ls_nd6) & (x['treatment'] in ['ND6-Det-Q5U', 'ND6-LIPO2000-Q5U', 'ND6-LTX-12-Q5U', 'untreat-Q5U']), axis=1)
df_mut_ratio.loc[flt4, 'belongto'] = 'ND4'
df_mut_ratio.loc[flt5_1, 'belongto'] = 'ND5.1'
df_mut_ratio.loc[flt6, 'belongto'] = 'ND6'
df_mut_ratio_mean['belongto'] = None
flt4 = df_mut_ratio_mean.apply(lambda x: (x['region_id'] in ls_nd4) & (x['treatment'] in ['ND4-Det-Q5U', 'ND4-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt5_1 = df_mut_ratio_mean.apply(lambda x: (x['region_id'] in ls_nd5_1) & (x['treatment'] in ['ND5-1-Det-Q5U', 'ND5-1-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt6 = df_mut_ratio_mean.apply(lambda x: (x['region_id'] in ls_nd6) & (x['treatment'] in ['ND6-Det-Q5U', 'ND6-LIPO2000-Q5U', 'ND6-LTX-12-Q5U', 'untreat-Q5U']), axis=1)
df_mut_ratio_mean.loc[flt4, 'belongto'] = 'ND4'
df_mut_ratio_mean.loc[flt5_1, 'belongto'] = 'ND5.1'
df_mut_ratio_mean.loc[flt6, 'belongto'] = 'ND6'
flt4 = df_mut_ratio_mean.apply(lambda x: (x['region_id'] in ls_nd4) & (x['treatment'] in ['ND4-Det-Q5U', 'ND4-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt5_1 = df_mut_ratio_mean.apply(lambda x: (x['region_id'] in ls_nd5_1) & (x['treatment'] in ['ND5-1-Det-Q5U', 'ND5-1-LIPO2000-Q5U', 'untreat-Q5U']), axis=1)
flt6 = df_mut_ratio_mean.apply(lambda x: (x['region_id'] in ls_nd6) & (x['treatment'] in ['ND6-Det-Q5U', 'ND6-LIPO2000-Q5U', 'ND6-LTX-12-Q5U', 'untreat-Q5U']), axis=1)
df_mut_ratio_mean.loc[flt4, 'belongto'] = 'ND4'
df_mut_ratio_mean.loc[flt5_1, 'belongto'] = 'ND5.1'
df_mut_ratio_mean.loc[flt6, 'belongto'] = 'ND6'
df_mut_ratio_mean.loc[df_mut_ratio_mean.treatment == 'ND1-Det-Q5U', 'belongto'] = 'ND1'
df_mut_ratio_mean.loc[df_mut_ratio_mean.treatment == 'ND4-L1333N-Det-Q5U', 'belongto'] = 'ND4-L1333N'
df_mut_ratio_mean.loc[df_mut_ratio_mean.treatment == 'ND4-L1397C-Det-Q5U', 'belongto'] = 'ND4-L1397C'
df_mut_ratio_mean.loc[df_mut_ratio_mean.treatment == 'ND5-1-L1333N-Det-Q5U', 'belongto'] = 'ND5-1-L1333N'
df_mut_ratio_mean.loc[df_mut_ratio_mean.treatment == 'ND5-3-L1397C-Det-Q5U', 'belongto'] = 'ND5-3-L1397C'
# %% map mpmat_index
df_index_m = pd.read_excel("/Users/zhaohuanan/NutstoreFiles/MyNutstore/Scientific_research/2021_DdCBE_topic/20220224_TargetSeq_IDs/20220307_name_match_target-seq_list-True-mpmat-index.xlsx")
# %% map V4 list name
# ND4 onlys
df_info = df_mut_ratio.copy()
df_name4 = pd.read_excel("/Users/zhaohuanan/NutstoreFiles/MyNutstore/Scientific_research/2021_DdCBE_topic/20220224_TargetSeq_IDs/20220312-DdCBE-off_target_type.FinallistV4.CheckPrimer.AddV4ID.xlsx")
df_out_info = df_name4[['region_id', 'off_target_id.V4.ND4', 'off_target_id.V4.ND5.1', 'off_target_id.V4.ND6']].copy()
df_out_info.columns = ['mpmat_index', 'ND4', 'ND5.1', 'ND6']
df_out = df_info[df_info['region_id'].map(lambda x: 'on-target' not in x)].copy()
df_out_final = df_out.merge(df_index_m, how='left').merge(df_out_info, how='left')
df_out_final.groupby(['belongto', 'treatment', 'rep']).describe()
df_out_final[(df_out_final['ND5.1'].isna()) & (df_out_final.belongto == "ND5.1")].region_id.value_counts().index.tolist()
# # 临时Fix
df_out_final.loc[
((df_out_final['ND4'].isna()) & (df_out_final.belongto == "ND4")
| df_out_final.loc[df_out_final.treatment == 'untreat-Q5U', 'ND4'].isna()), 'ND4'] = df_out_final.loc[(df_out_final['ND4'].isna()) & (df_out_final.belongto == "ND4"), 'mpmat_index']
df_out_final.loc[
((df_out_final['ND5.1'].isna()) & (df_out_final.belongto == "ND5.1")
| df_out_final.loc[df_out_final.treatment == 'untreat-Q5U', 'ND5.1'].isna()), 'ND5.1'] = df_out_final.loc[(df_out_final['ND5.1'].isna()) & (df_out_final.belongto == "ND5.1"), 'mpmat_index']
# ls_noname4 = ['ND5.1-only-2',
# 'ND5.1-only-4',
# 'ND5.1-only-8',
# 'ND5.1-only-10',
# 'ND5.1-new-only24',
# 'ND5.1-new-only17',
# 'ND5.1-new-only21']
# df_index_m[df_index_m.region_id.map(lambda x: x in ls_noname4)]
df_out_final.groupby(['belongto', 'treatment', 'rep']).describe()
# %% 输出graphpad表格
# %%% 输出转染条件比较的数据
# df_out_final[(df_out_final.region_id=='ND516-share-10') & (df_out_final.treatment=='untreat-Q5U')]
# df_out_final[(df_out_final.region_id=='ND516-share-12') & (df_out_final.treatment=='untreat-Q5U')]
# only的old ,share 的old
sort_for_table = False # True for fig False for table
df_trans_4 = df_out_final[
df_out_final.region_id.map(lambda x: ("ND4" in x) and ("new" not in x) and ("on-target" not in x)) # ND4 old only
| (df_out_final.region_id.map(lambda x: ("share" in x) and ("new" not in x) and ("on-target" not in x))
& df_out_final.treatment.map(lambda x: ("ND4" in x) | ("untreat-Q5U" in x))) # ND4 old share
].copy()
df_trans_4 = pd.pivot_table(
data=df_trans_4,
index=["region_id", 'ND4'],
# index=["region_id"],
columns=["treatment", "rep"],
values=["mut_ratio"],
)
df_trans_4["sort_key"] = df_trans_4[
[
("mut_ratio", "ND4-Det-Q5U", "rep1"),
("mut_ratio", "ND4-Det-Q5U", "rep2"),
]
].mean(axis=1)
df_trans_4.sort_values("sort_key", ascending=sort_for_table, inplace=True)
df_trans_4.drop(columns="sort_key", inplace=True)
df_trans_4
# only的old ,share 的old
df_trans_5 = df_out_final[
df_out_final.region_id.map(lambda x: ("ND5.1" in x) and ("new" not in x) and ("on-target" not in x)) # ND5.1 old only
| (df_out_final.region_id.map(lambda x: ("share" in x) and ("new" not in x) and ("on-target" not in x))
& df_out_final.treatment.map(lambda x: ("ND5-1" in x) | ("untreat-Q5U" in x))) # ND5.1 old share
].copy()
df_trans_5 = pd.pivot_table(
data=df_trans_5,
index=["region_id", 'ND5.1'],
columns=["treatment", "rep"],
values=["mut_ratio"],
)
df_trans_5["sort_key"] = df_trans_5[
[
("mut_ratio", "ND5-1-Det-Q5U", "rep1"),
("mut_ratio", "ND5-1-Det-Q5U", "rep2"),
]
].mean(axis=1)
df_trans_5.sort_values("sort_key", ascending=sort_for_table, inplace=True)
df_trans_5.drop(columns="sort_key", inplace=True)
df_trans_5
# only的old ,share 的old
df_trans_6 = df_out_final[
df_out_final.region_id.map(lambda x: ("ND6" in x) and ("new" not in x) and ("on-target" not in x)) # ND6 old only
| (df_out_final.region_id.map(lambda x: ("share" in x) and ("new" not in x) and ("on-target" not in x))
& df_out_final.treatment.map(lambda x: ("ND6" in x) | ("untreat-Q5U" in x))) # ND6 old share
].copy()
df_trans_6 = pd.pivot_table(
data=df_trans_6,
index=["region_id", 'ND6'],
# index=["region_id"],
columns=["treatment", "rep"],
values=["mut_ratio"],
)
df_trans_6["sort_key"] = df_trans_6[
[
("mut_ratio", "ND6-Det-Q5U", "rep1"),
("mut_ratio", "ND6-Det-Q5U", "rep2"),
]
].mean(axis=1)
df_trans_6.sort_values("sort_key", ascending=sort_for_table, inplace=True)
df_trans_6.drop(columns="sort_key", inplace=True)
df_trans_6
# %%% 输出Detect-seq自己的的验证数据
# only的old new ,share 的old new
df_comp_4 = df_out_final[
(
df_out_final.region_id.map(lambda x: ("ND4" in x) and ("on-target" not in x)) # only的old new
| (df_out_final.region_id.map(lambda x: ("share" in x) and ("on-target" not in x))
& df_out_final.treatment.map(lambda x: ("ND4" in x) | ("untreat-Q5U" in x))) # share 的old new
)
& df_out_final.treatment.map(lambda x: 'LIPO2000' not in x) # 排除lipo2000
].copy()
df_comp_4 = pd.pivot_table(
data=df_comp_4,
index=["region_id", 'ND4'],
# index=["region_id"],
columns=["treatment", "rep"],
values=["mut_ratio"],
)
df_comp_4["sort_key"] = df_comp_4[
[
("mut_ratio", "ND4-Det-Q5U", "rep1"),
("mut_ratio", "ND4-Det-Q5U", "rep2"),
]
].mean(axis=1)
df_comp_4.sort_values("sort_key", ascending=sort_for_table, inplace=True)
df_comp_4.drop(columns="sort_key", inplace=True)
df_comp_4
# only的old new ,share 的old new
df_comp_5 = df_out_final[
(
df_out_final.region_id.map(lambda x: ("ND5.1" in x) and ("on-target" not in x)) # only的old new
| (df_out_final.region_id.map(lambda x: ("share" in x) and ("on-target" not in x))
& df_out_final.treatment.map(lambda x: ("ND5-1" in x) | ("untreat-Q5U" in x))) # share 的old new
)
& df_out_final.treatment.map(lambda x: 'LIPO2000' not in x) # 排除lipo2000
].copy()
df_comp_5 = pd.pivot_table(
data=df_comp_5,
index=["region_id", 'ND5.1'],
# index=["region_id"],
columns=["treatment", "rep"],
values=["mut_ratio"],
)
df_comp_5["sort_key"] = df_comp_5[
[
("mut_ratio", "ND5-1-Det-Q5U", "rep1"),
("mut_ratio", "ND5-1-Det-Q5U", "rep2"),
]
].mean(axis=1)
df_comp_5.sort_values("sort_key", ascending=sort_for_table, inplace=True)
df_comp_5.drop(columns="sort_key", inplace=True)
df_comp_5
# only的old new ,share 的old new
df_comp_6 = df_out_final[
(
df_out_final.region_id.map(lambda x: ("ND6" in x) and ("on-target" not in x)) # only的old new
| (df_out_final.region_id.map(lambda x: ("share" in x) and ("on-target" not in x))
& df_out_final.treatment.map(lambda x: ("ND6" in x) | ("untreat-Q5U" in x))) # share 的old new
)
& df_out_final.treatment.map(lambda x: ('LIPO2000' not in x) and ('LTX-12' not in x)) # 排除lipo2000 和 ltx 12
].copy()
df_comp_6 = pd.pivot_table(
data=df_comp_6,
index=["region_id", 'ND6'],
# index=["region_id"],
columns=["treatment", "rep"],
values=["mut_ratio"],
)
df_comp_6["sort_key"] = df_comp_6[
[
("mut_ratio", "ND6-Det-Q5U", "rep1"),
("mut_ratio", "ND6-Det-Q5U", "rep2"),
]
].mean(axis=1)
df_comp_6.sort_values("sort_key", ascending=sort_for_table, inplace=True)
df_comp_6.drop(columns="sort_key", inplace=True)
df_comp_6
# %% save trans 和 comp的表格 v1 20220308
os.chdir("/Users/zhaohuanan/NutstoreFiles/MyNutstore/Scientific_research/2021_DdCBE_topic/Manuscript/20220311_My_tables")
df_trans_4 = df_trans_4.fillna(0)
df_comp_4 = df_comp_4.fillna(0)
# df_trans_4.index = df_trans_4.index.map(lambda x: '_'.join(x))
# df_trans_5.index = df_trans_5.index.map(lambda x: '_'.join(x))
# df_trans_6.index = df_trans_6.index.map(lambda x: '_'.join(x))
# df_comp_4.index = df_comp_4.index.map(lambda x: '_'.join(x))
# df_comp_5.index = df_comp_5.index.map(lambda x: '_'.join(x))
# df_comp_6.index = df_comp_6.index.map(lambda x: '_'.join(x))
with pd.ExcelWriter('20220308_TargetSeqInfoForBarPlot.xlsx') as writer: # doctest: +SKIP
df_trans_4.to_excel(writer, sheet_name='ND4_TRANS')
df_trans_5.to_excel(writer, sheet_name='ND5.1_TRANS')
df_trans_6.to_excel(writer, sheet_name='ND6_TRANS')
df_comp_4.to_excel(writer, sheet_name='ND4_COMP')
df_comp_5.to_excel(writer, sheet_name='ND5.1_COMP')
df_comp_6.to_excel(writer, sheet_name='ND6_COMP')
for df_final in [df_trans_4, df_trans_5, df_trans_6, df_comp_4, df_comp_5, df_comp_6]:
for col in df_final:
df_final[col] = df_final[col].map(lambda x: x if x >= 0.001 else 0.001)
with | pd.ExcelWriter('20220308_TargetSeqInfoForBarPlot_fixmin.xlsx') | pandas.ExcelWriter |
import os
# Enforce CPU Usage
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # Uncommenting enforces CPU usage # Commenting enforces GPU usage
# Seed the Random-Number-Generator in a bid to get 'Reproducible Results'
import tensorflow as tf
from keras import backend as K
from numpy.random import seed
seed(1)
tf.compat.v1.set_random_seed(3)
# load required modules
import pandas as pd
import numpy as np
import math, time
import igraph as igh
import scipy.stats as stats
from keras.models import Sequential
from keras.regularizers import l1_l2
from keras.layers.embeddings import Embedding
from keras.layers import Dense, Dropout, Activation, Flatten
from keras import initializers, losses, metrics, optimizers
from keras.layers.normalization import BatchNormalization
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import accuracy_score, average_precision_score, precision_score, recall_score, f1_score, roc_auc_score, matthews_corrcoef
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
# Import classes from my custom package
from custom_classes.Starter_Module_01 import Starter
def sigmoid(x):
x = np.array(x)
y = 1/(1 + np.exp(-x))
return y
def softmax(x):
x = np.array(x)
y = np.exp(x)
z = y/y.sum()
return z
def swish(x):
x = np.array(x)
y = x/(1-np.exp(-x))
return y
def euclidean_norm(x, y):
x = np.array(x)
y = np.array(y)
a = x - y
b = pow(a, 2)
c = np.sum(b)
return math.sqrt(c)
def cosine_sim(x, y):
x = np.array(x)
y = np.array(y)
a = np.sum(x * y)
b = pow(x, 2)
c = np.sum(b)
d = math.sqrt(c)
g = pow(y, 2)
h = np.sum(g)
i = math.sqrt(h)
return a/(d * i)
def common_neigh(x, y):
x = np.array(x)
y = np.array(y)
a = np.sum(x * y)
return a
def args_parse_cmd():
parser = ArgumentParser(description='START-HELP: Program for forecasting/predicting breakup or schism in social networks', epilog='END-HELP: End of assistance/help section',
formatter_class=ArgumentDefaultsHelpFormatter, conflict_handler='resolve')
parser.add_argument('-rp', '--root_path', nargs='+', default='generic_datasets/', type=str, help='Generic root path for application/program')
parser.add_argument('-el', '--edge_list', nargs='+', default='CiteSeer', type=str, help='Edge list (filename WITHOUT extension) of reference graph') #'CiteSeer', 'Cora', 'Internet-Industry-Partnerships', 'PubMed-Diabetes', 'Terrorists-Relation', 'Zachary-Karate'
parser.add_argument('-rm', '--run_mode', nargs='+', default='single', type=str, choices=['single', 'all'], help='Run model per specified dataset OR cumulatively for all intrinsic datasets')
args = parser.parse_args()
return args
def nodelist_edgelist_compute(myCls, args, edge_list):
# Load edge list
graph_fname = edge_list + '/' + edge_list
df_cln2 = myCls.load_data(args.root_path, graph_fname+'.edgelist', sep='\s', header=None, index_col=None, mode='READ')
df_cln2 = df_cln2.astype('int32')
temp_1 = df_cln2.values[:,:] # edge list (NUMPY array)
temp_2 = np.unique(temp_1) # node list (NUMPY array)
return temp_1, temp_2
def edgelist_scan(u, v, edges):
search = str(u) + '-' + str(v)
rtn_val = False
if search in edges:
rtn_val = True
return rtn_val
def paths_compute(u, v, truth_ties, graph):
bridge = 0
res = False
if u != v:
srch_res = edgelist_scan(u, v, list(truth_ties))
if srch_res == False:
k_paths = graph.get_all_shortest_paths(u, v)
#print(k_paths) # delete
if len(k_paths) <= bridge:
res = [u, v, 0]
return res
def falsehood_ties_gen(graph, nodes, edges):
truth_ties = pd.Series(edges[:,0]).map(str) + '-' + pd.Series(edges[:,1]).map(str)
false_ties = pd.DataFrame()
for u in nodes:
for v in nodes:
false_row = paths_compute(u, v, truth_ties, graph)
if false_row != False:
false_ties = false_ties.append([false_row], ignore_index=True)
#print(false_row) # delete
if false_ties.shape[0] >= truth_ties.shape[0]:
break
if false_ties.shape[0] >= truth_ties.shape[0]:
break
return false_ties
def predHits(truth, pred1, pred2, pred3):
hits_1 = 0
hits_3 = 0
pred1 = np.rint(pred1).astype(np.int32)
pred2 = np.rint(pred2).astype(np.int32)
pred3 = np.rint(pred3).astype(np.int32)
for i in range(len(truth)):
if truth[i] == pred1[i]:
hits_1 = hits_1 + 1
if (truth[i] == pred1[i]) or (truth[i] == pred2[i]) or (truth[i] == pred3[i]):
hits_3 = hits_3 + 1
top_1 = hits_1/len(truth)
top_3 = hits_3/len(truth)
return top_1, top_3
def evaluations(test_y, pred_y, pred_y_proba, pred_y_2, pred_y_3):
# Evalute results via ML standards
avg_pr = average_precision_score(test_y, pred_y_proba)
precision = precision_score(test_y, pred_y, average='binary')
recall = recall_score(test_y, pred_y, average='binary')
accuracy = accuracy_score(test_y, pred_y)
f1 = f1_score(test_y, pred_y, average='binary')
mcc = matthews_corrcoef(test_y, pred_y)
auc_roc = roc_auc_score(test_y, pred_y_proba)
top_1, top_3 = predHits(test_y, pred_y, pred_y_2, pred_y_3)
print("\nLink Prediction Evaluation Report:")
evals = {'avg_pr':round(avg_pr, 4), 'precision':round(precision, 4), 'recall':round(recall, 4), 'accuracy':round(accuracy, 4), 'f1':round(f1, 4), 'mcc':round(mcc, 4), 'auc_roc':round(auc_roc, 4), 'top_1':round(top_1, 4), 'top_3':round(top_3, 4)}
return evals
def embeddings_gen(myCls, train_X, train_y, test_X, test_y, input_enc_dim, nodes, fname):
# Hyperparameters
repeats = 1 # 100
n_epochs = 100 # 135
output_dim = 256
input_len = 2
# Implementing MODEL via Multiple Repeats OR Multiple Restarts
for r in range(repeats):
# Fit the Network
start_time = time.time() # START: Training Time Tracker
K.clear_session() # Kills current TF comp-graph & creates a new one
model = Sequential()
model.add(Embedding(input_enc_dim, output_dim, input_length=input_len, embeddings_initializer='uniform', embeddings_regularizer=l1_l2(l1=0.0, l2=0.0))) # Generate Vector-Embeddings wrt Nodes of the Edge-List
model.add(Flatten())
model.add(Dense(1, kernel_initializer='glorot_uniform', use_bias=False)) # use_bias=False
#model.add(BatchNormalization())
model.add(Activation('sigmoid')) # Use 'sigmoid' for binary classifier; 'softmax' for multi-class classifier, and 'linear' for regression.
model.compile(loss='mean_absolute_error', optimizer=optimizers.Nadam(), metrics=['accuracy'])
print(model.summary())
fitting_res = model.fit(train_X, train_y, epochs=n_epochs, validation_data=(test_X, test_y), verbose=2, shuffle=True) # train_on_batch()
end_time = time.time() # STOP: Training-Time Tracker
embeds_gen_time = end_time - start_time
# TRAINING: Evaluate model's performance (OVERFITTING = Train LOSS < Test LOSS)
scores_train = model.evaluate(train_X, train_y, verbose=0)
print("\nEmbeddings Training:- Mean Abs. Error: %.2f; Accuracy: %.2f%%" % (scores_train[0], scores_train[1]*100))
# VALIDATION: Evaluate model's performance (OVERFITTING = Train MAE < Test MAE)
scores_validtn = model.evaluate(test_X, test_y, verbose=0)
print("\nEmbeddings Validation:- Mean Abs. Error: %.2f; Accuracy: %.2f%%" % (scores_validtn[0], scores_validtn[1]*100))
# Accessing the embedding layer through a constructed model
# Firstly, `0` refers to the position of embedding layer in the `model`
# ONLY layers (Dense and/or Embedding) defined befor the Flatten() are reported/documented
# `layer weights` == model.layers[0].get_weights()[0] || `bias weights` == model.layers[0].get_weights()[1]
# `layer-1 weights` == model.layers[0].get_weights()[0] || `layer-2 weights` == model.layers[1].get_weights()[0] || `layer-3 weights` == model.layers[2].get_weights()[0]
embeddings = model.layers[0].get_weights()[0]
# `embeddings` has a shape of (num_vocab/input_enc_dim, embedding_dim/output_dim)
print("Original Embeddings Shape: ", embeddings.shape)
embeds = pd.concat([pd.DataFrame(nodes), pd.DataFrame(embeddings)], axis='columns', ignore_index=True)
embeds.to_csv(fname+'.embeds', sep='\t', header=False, index=False)
return embeds, embeds_gen_time
def inference_predictor(train_X, train_y, test_X, test_y, fname):
# X = ['source', 'destn'] + col_name
# y = ['y_cls', 'comn_dist', 'y_reg', 'kendall', 'euclidean_norm', 'cosine_sim']
X = np.append(train_X, test_X, axis=0)
y = np.append(train_y, test_y, axis=0)
# Training (Logistic Classifier)
start_time = time.time() # START: Training-Time Tracker
log_clf = LogisticRegression(solver='lbfgs', random_state=42)
log_clf.fit(train_X[:,2:], train_y[:,0])
# Training (Heuristics): Compute class scores
false_ties_score = list()
true_ties_score = list()
unlink_ties_score = list()
for i in range(X.shape[0]):
if y[i,0] == 0:
false_ties_score.append(y[i,2])
elif y[i,0] == 1:
true_ties_score.append(y[i,2])
unlink_ties_score = list(set(false_ties_score).intersection(true_ties_score))
if len(unlink_ties_score) == 0:
unlink_ties_score = [None]
end_time = time.time() # STOP: Training-Time Tracker
train_time = end_time - start_time
print("\nTraining Time: ", train_time, "seconds")
# Save Inference/Deduction scores
false_ties_score.sort()
true_ties_score.sort()
unlink_ties_score.sort()
inf_scores = pd.concat([pd.DataFrame(false_ties_score), pd.DataFrame(true_ties_score), | pd.DataFrame(unlink_ties_score) | pandas.DataFrame |
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
import osmnx as ox
import pandas as pd
import numpy as np
import geopandas as gpd
import networkx as nx
import math
from math import sqrt
import ast
import functools
from shapely.geometry import Point, LineString
pd.set_option("display.precision", 3)
pd.options.mode.chained_assignment = None
from .utilities import *
from .angles import angle_line_geometries
## Obtaining graphs ###############
def graph_fromGDF(nodes_gdf, edges_gdf, nodeID = "nodeID"):
"""
From two GeoDataFrames (nodes and edges), it creates a NetworkX undirected Graph.
Parameters
----------
nodes_gdf: Point GeoDataFrame
nodes (junctions) GeoDataFrame
edges_gdf: LineString GeoDataFrame
street segments GeoDataFrame
nodeID: str
column name that indicates the node identifier column (if different from "nodeID")
Returns
-------
G: NetworkX.Graph
the undirected street network graph
"""
nodes_gdf.set_index(nodeID, drop = False, inplace = True, append = False)
nodes_gdf.index.name = None
G = nx.Graph()
G.add_nodes_from(nodes_gdf.index)
attributes = nodes_gdf.to_dict()
# ignore fields containing values of type list
a = (nodes_gdf.applymap(type) == list).sum()
if len(a[a>0]):
to_ignore = a[a>0].index[0]
else:
to_ignore = []
for attribute_name in nodes_gdf.columns:
if attribute_name in to_ignore:
continue
# only add this attribute to nodes which have a non-null value for it
else:
attribute_values = {k: v for k, v in attributes[attribute_name].items() if pd.notnull(v)}
nx.set_node_attributes(G, name=attribute_name, values=attribute_values)
# add the edges and attributes that are not u, v (as they're added separately) or null
for _, row in edges_gdf.iterrows():
attrs = {}
for label, value in row.iteritems():
if (label not in ['u', 'v']) and (isinstance(value, list) or pd.notnull(value)):
attrs[label] = value
G.add_edge(row['u'], row['v'], **attrs)
return G
def multiGraph_fromGDF(nodes_gdf, edges_gdf, nodeIDcolumn):
"""
From two GeoDataFrames (nodes and edges), it creates a NetworkX.MultiGraph.
Parameters
----------
nodes_gdf: Point GeoDataFrame
nodes (junctions) GeoDataFrame
edges_gdf: LineString GeoDataFrame
street segments GeoDataFrame
nodeIDcolumn: string
column name that indicates the node identifier column.
Returns
-------
G: NetworkX.MultiGraph
the street network graph
"""
nodes_gdf.set_index(nodeIDcolumn, drop = False, inplace = True, append = False)
nodes_gdf.index.name = None
Mg = nx.MultiGraph()
Mg.add_nodes_from(nodes_gdf.index)
attributes = nodes_gdf.to_dict()
a = (nodes_gdf.applymap(type) == list).sum()
if len(a[a>0]):
to_ignore = a[a>0].index[0]
else: to_ignore = []
for attribute_name in nodes_gdf.columns:
if attribute_name in to_ignore:
continue
# only add this attribute to nodes which have a non-null value for it
attribute_values = {k:v for k, v in attributes[attribute_name].items() if pd.notnull(v)}
nx.set_node_attributes(Mg, name=attribute_name, values=attribute_values)
# add the edges and attributes that are not u, v, key (as they're added separately) or null
for _, row in edges_gdf.iterrows():
attrs = {}
for label, value in row.iteritems():
if (label not in ['u', 'v', 'key']) and (isinstance(value, list) or pd.notnull(value)):
attrs[label] = value
Mg.add_edge(row['u'], row['v'], key=row['key'], **attrs)
return Mg
## Building geo-dataframes for dual graph representation ###############
def dual_gdf(nodes_gdf, edges_gdf, epsg, oneway = False, angle = None):
"""
It creates two dataframes that are later exploited to generate the dual graph of a street network. The nodes_dual gdf contains edges
centroids; the edges_dual gdf, instead, contains links between the street segment centroids. Those dual edges link real street segments
that share a junction. The centroids are stored with the original edge edgeID, while the dual edges are associated with several
attributes computed on the original street segments (distance between centroids, deflection angle).
Parameters
----------
nodes_gdf: Point GeoDataFrame
nodes (junctions) GeoDataFrame
edges_gdf: LineString GeoDataFrame
street segments GeoDataFrame
epsg: int
epsg of the area considered
oneway: boolean
if true, the function takes into account the direction and therefore it may ignore certain links whereby vehichular movement is not allowed in a certain direction
angle: string {'degree', 'radians'}
it indicates how to express the angle of deflection
Returns
-------
nodes_dual, edges_dual: tuple of GeoDataFrames
the dual nodes and edges GeoDataFrames
"""
if list(edges_gdf.index.values) != list(edges_gdf.edgeID.values):
edges_gdf.index = edges_gdf.edgeID
edges_gdf.index.name = None
# computing centroids
centroids_gdf = edges_gdf.copy()
centroids_gdf['centroid'] = centroids_gdf['geometry'].centroid
centroids_gdf['intersecting'] = None
# find_intersecting segments and storing them in the centroids gdf
centroids_gdf['intersecting'] = centroids_gdf.apply(lambda row: list(centroids_gdf.loc[(centroids_gdf['u'] == row['u'])|(centroids_gdf['u'] == row['v'])|
(centroids_gdf['v'] == row['v'])|(centroids_gdf['v'] == row['u'])].index), axis=1)
if oneway:
centroids_gdf['intersecting'] = centroids_gdf.apply(lambda row: list(centroids_gdf.loc[(centroids_gdf['u'] == row['v']) | ((centroids_gdf['v'] == row['v']) & (centroids_gdf['oneway'] == 0))].index)
if row['oneway'] == 1 else list(centroids_gdf.loc[(centroids_gdf['u'] == row['v']) | ((centroids_gdf['v'] == row['v']) & (centroids_gdf['oneway'] == 0)) |
(centroids_gdf['u'] == row['u']) | ((centroids_gdf['v'] == row['u']) & (centroids_gdf['oneway'] == 0))].index), axis = 1)
# creating vertexes representing street segments (centroids)
centroids_data = centroids_gdf.drop(['geometry', 'centroid'], axis = 1)
if epsg is None:
crs = nodes_gdf.crs
else: crs = {'init': 'epsg:' + str(epsg)}
nodes_dual = gpd.GeoDataFrame(centroids_data, crs=crs, geometry=centroids_gdf['centroid'])
nodes_dual['x'], nodes_dual['y'] = [x.coords.xy[0][0] for x in centroids_gdf['centroid']],[y.coords.xy[1][0] for y in centroids_gdf['centroid']]
nodes_dual.index = nodes_dual.edgeID
nodes_dual.index.name = None
# creating fictious links between centroids
edges_dual = | pd.DataFrame(columns=['u','v', 'geometry', 'length']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 13 17:15:16 2020
@author: JAVIER
"""
import numpy as np
import pandas as pd
import pickle
from .. import bci_architectures as athena
from . import bci_penalty_plugin as bci_penalty
from .. import load_brain_data as lb
from Fancy_aggregations import penalties as pn
from Fancy_aggregations import binary_parser as bp
quasi_similarities = [pn._cuadratic_cost, pn._realistic_optimistic_cost, pn._huber_cost, pn._realistic_pesimistic_cost]
quasi_dis = [pn._anti_cuadratic_cost]
quasi_total = quasi_similarities + quasi_dis
quasisim_name = ['Quadratic', 'Optimistic', 'Huber', 'Pessimistic']
quasidis_name = ['Anti-Quadratic']
names_total = quasisim_name + quasidis_name
def generate_combination(cost1, cost2):
'''
Returns a lambda of the two cost combined according to the appropiate mixing
function.
:param cost1: index of cost 1 in the quasi_total vector
'''
if (cost1 < len(quasi_similarities)) and (cost2 < len(quasi_similarities)):
comb_function = pn._convex_comb
else:
comb_function = pn._convex_quasi_comb
return comb_function(quasi_total[cost1], quasi_total[cost2])
# =============================================================================
# BINARY CLASSIFICATION GRADIENT FOR THE AGGREGATION MFF
# =============================================================================
def binary_cross_entropy_loss(output, y):
return np.log(y * output + (1 - y) * output)
def binary_update_alpha(X, cost, alpha, y):
forward_logits = multi_alpha_forward(X, cost, alpha)
loss = binary_cross_entropy_loss(forward_logits[:,0], y)
update_alpha = loss * X
return update_alpha
def binary_train_loop(X, cost, alpha, y, learning_rate=0.01):
'''
'''
loss = np.inf
new_loss = 0
while new_loss >= loss:
loss = new_loss
alpha += binary_update_alpha(X, cost, alpha, y)
forward_logits = multi_alpha_forward(X, cost, alpha)
new_loss = binary_cross_entropy_loss(forward_logits, y)
return alpha, new_loss
multi_cost_names = ['R2+Opt', 'hub+Opt','Anti+Opt', 'Hub+Anti']
uni_cost_names = ['R2', 'opt', 'Anti-consensus', 'Huber', 'Pes']
# =============================================================================
# DYNAMIC ALPHA LEARNING METHOD
# =============================================================================
def logistic_alpha_forward(X, cost_convex, clf):
'''
X shape: (bandas, samples, clases)
out shape: (samples, clases)
'''
reformed_X = np.swapaxes(X, 0, 1)
reformed_X = reformed_X.reshape((reformed_X.shape[0], reformed_X.shape[1]*reformed_X.shape[2]))
alphas = clf.predict(reformed_X)
result = np.zeros((X.shape[1], X.shape[2]))
for sample in range(X.shape[1]):
alpha_cost = lambda real, yhat, axis: cost_convex(real, yhat, axis, alphas[sample])
result[sample] = pn.penalty_aggregation(X[:, sample, :], [bp.parse(x) for x in athena.classical_aggregations], axis=0, keepdims=False, cost=alpha_cost)
return result
def multimodal_alpha_forward(X, cost, cost2, alpha):
'''
X shape: list of n arrays (bandas, samples, clases)
clfs: list of alphas.
out shape: (samples, clases)
'''
david_played_and_it_pleased_the_lord = [bp.parse(x) for x in athena.agg_functions_names]
agg_phase_1 = lambda X0, alpha, keepdims=False, axis=0: pn.penalty_aggregation(X0, david_played_and_it_pleased_the_lord, axis=axis, keepdims=keepdims, cost=lambda real, yhat, axis: cost(real, yhat, axis, alpha=alpha))
agg_phase_2 = lambda X0, alpha, keepdims=False, axis=0: pn.penalty_aggregation(X0, david_played_and_it_pleased_the_lord, axis=axis, keepdims=keepdims, cost=lambda real, yhat, axis: cost2(real, yhat, axis, alpha=alpha))
return mpa_aggregation(X, agg_phase_1, agg_phase_2, alpha, keepdims=False)
def generate_real_alpha(X, y, aggs, cost, opt=1):
a = None
b = None
for alpha in np.arange(0.01, 1.01, 0.1):
alpha_cost = lambda X, yhat, axis: cost(X, yhat, axis, alpha)
pagg = pn.penalty_aggregation(X, [bp.parse(x) for x in aggs], axis=0, keepdims=False, cost=alpha_cost)
if np.argmax(pagg) == y:
if a is None:
a = alpha
else:
b = alpha
if a is None:
a = 0.5
if b is None:
b = 0.5
d1 = np.abs(a - 0.5)
d2 = np.abs(b - 0.5)
if opt == 1:
if d1 <= d2:
return a
else:
return b
elif opt == 2:
return (a + b) / 2
def generate_train_data_alpha(logits, labels, aggs=athena.classical_aggregations, cost=pn.cost_functions[0], opt=1):
bands, samples, classes = logits.shape
y = np.zeros((samples,))
for sample in range(samples):
y[sample] = generate_real_alpha(logits[:,sample,:], labels[sample], aggs, cost, opt=opt)
return y
def gen_all_good_alpha_trad(cost, aggs=athena.classical_aggregations, opt=1, multi_class=False):
'''
Learn the logistic regression for the whole set of datasets. (Trad framework)
'''
import sklearn.linear_model
from sklearn.model_selection import KFold
n_splits = 5
kf = KFold(n_splits=n_splits)
all_data = carmen_penalty_cache_logits(mff=False, multi_class=multi_class)
all_logits_train, all_y_train, all_logits_test, all_y_test = all_data
clfs = []
for logits_train, y_train, logits_test, y_test in zip(all_logits_train, all_y_train, all_logits_test, all_y_test):
if opt == 1 or opt == 2:
logits_reshaped = np.swapaxes(logits_train, 0, 1)
logits_reshaped = logits_reshaped.reshape((logits_reshaped.shape[0], logits_reshaped.shape[1]*logits_reshaped.shape[2]))
y_alpha = generate_train_data_alpha(logits_train, y_train, aggs=aggs, cost=cost, opt=opt)
clf = sklearn.linear_model.LinearRegression().fit(logits_reshaped, y_alpha)
elif opt == 'classic':
def func_opt(alphita):
acc = lambda x_data, y : np.mean(np.equal(y, np.argmax( pn.penalty_aggregation(x_data, [bp.parse(x) for x in aggs], axis=0, keepdims=False, cost=lambda X, yhat, axis: cost(X, yhat, axis, alpha=alphita)), axis=1)))
acum_acc = 0
for train_index, test_index in kf.split(logits_train[0,:,:]):
_, X_test = logits_train[:, train_index, :], logits_train[:, test_index, :]
_, y_test = y_train[train_index], y_train[test_index]
acum_acc += acc(X_test, y_test)
return 1 - acum_acc / n_splits #Rememeber: we are minimizing
clf = athena.my_optimization(func_opt)
clfs.append(clf)
return clfs
# =============================================================================
# MULTIMODAL ALPHA OPTIMIZATION
# =============================================================================
def eval_alpha(alpha_v, y_hat, y):
'''
Returns
-------
None.
'''
alpha_score = np.mean(np.minimum(alpha_v, 1 - alpha_v))
acc_score = np.mean(np.equal(y_hat, y))
return (alpha_score + acc_score) / 2
def mpa_aggregation(logits, agg1, agg2, alpha, keepdims=False):
n_2 = len(logits)
n_1, samples, clases = logits[0].shape
res = np.zeros((n_2, samples, clases))
for ix, logit in enumerate(logits):
res[ix, :, :] = agg1(logit, axis=0, keepdims=False, alpha=alpha[ix])
return agg2(res, axis=0, keepdims=keepdims, alpha=alpha[-1])
def eval_conf(X, alpha, y, agg1, agg2):
'''
Computes the mpa agg for X, and returns the optimization score.
Parameters
----------
X : TYPE
DESCRIPTION.
alpha : TYPE
DESCRIPTION.
y : TYPE
DESCRIPTION.
agg1 : TYPE
DESCRIPTION.
agg2 : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
'''
y_hat = np.argmax(mpa_aggregation(X, agg1, agg2, alpha), axis=1)
return eval_alpha(alpha, y_hat, y)
def gen_all_good_alpha_mff(cost, cost2, aggs=athena.agg_functions_names, opt=1, four_class=False):
'''
Learn the logistic regression for the whole set of datasets.
'''
from scipy.optimize import least_squares
all_data = carmen_penalty_cache_logits(mff=True, multi_class=four_class)
all_logits_train, all_y_train, all_logits_test, all_y_test = all_data
datasets = []
agg_phase_1 = lambda X, alpha, keepdims=False, axis=0: pn.penalty_aggregation(X, aggs, axis=axis, keepdims=keepdims, cost=lambda real, yhat, axis: cost(real, yhat, axis, alpha=alpha))
agg_phase_2 = lambda X, alpha, keepdims=False, axis=0: pn.penalty_aggregation(X, aggs, axis=axis, keepdims=keepdims, cost=lambda real, yhat, axis: cost2(real, yhat, axis, alpha=alpha))
for logits_train, y_train, logits_test, y_test in zip(all_logits_train, all_y_train, all_logits_test, all_y_test):
optimize_lambda = lambda alpha: -eval_conf(logits_train, alpha, y_train, agg_phase_1, agg_phase_2) #Remember we are minimizng
x0_alpha = np.array([0.5] * len(logits_train) + [0.5])
res_1 = least_squares(optimize_lambda, x0_alpha, bounds=[0.0001, 0.9999])
datasets.append(res_1.x)
return datasets
# =============================================================================
# SAVE AND LOAD CLFS
# =============================================================================
def save_clfs(cost, cost_name, mff=False, four_classes=False, opt=1):
if mff:
clfs = gen_all_good_alpha_mff(cost, cost, [bp.parse(x) for x in athena.mff_aggregations], opt=opt, four_class=four_classes)
else:
clfs = gen_all_good_alpha_trad(cost, athena.classical_aggregations, opt=opt, multi_class=four_classes)
ending = ''
if not mff:
ending += '_trad'
else:
ending += '_mff'
if four_classes:
ending += '_tonguefoot'
else:
ending += '_binary'
ending += '_' + str(opt)
if (opt == 1) or (opt == 2):
with open('./regression_models/lclfs_' + cost_name + ending + '.pckl', 'wb') as f:
pickle.dump(clfs, f)
elif opt == 'classic':
with open('./classic_alpha_models/alpha_' + cost_name + ending + '.pckl', 'wb') as f:
pickle.dump(clfs, f)
def load_clfs(cost_name, mff=False, four_classes=False, opt=1):
import pickle
ending = ''
if not mff:
ending += '_trad'
else:
ending += '_mff'
if four_classes or four_classes == '_4_class':
ending += '_tonguefoot'
else:
ending += '_binary'
ending += '_' + str(opt)
if (opt == 1) or (opt == 2):
with open('./regression_models/lclfs_' + cost_name + ending + '.pckl', 'rb') as f:
clfs = pickle.load(f)
elif opt == 'classic':
with open('./classic_alpha_models/alpha_' + cost_name + ending + '.pckl', 'rb') as f:
clfs = pickle.load(f)
return clfs
def compute_accuracy_logistic_alpha(cost_convex, cost_name, mff=True, four_classes=False, opt=1):
accuracy = lambda yhat, yreal: np.mean(np.equal(yhat, yreal))
clfs = load_clfs(cost_name, mff, four_classes, opt)
all_logits_train, all_y_train, all_logits_test, all_y_test = carmen_penalty_cache_logits(mff=mff, multi_class=four_classes)
train_accuracies = np.zeros((len(all_y_train)))
test_accuracies = np.zeros((len(all_y_test)))
ix = 0
for logits_train, y_train, logits_test, y_test in zip(all_logits_train, all_y_train, all_logits_test, all_y_test):
if mff:
agg_logits = multimodal_alpha_forward(logits_train, cost_convex, cost_convex, clfs[ix])
agg_logits_test = multimodal_alpha_forward(logits_test, cost_convex, cost_convex, clfs[ix])
else:
if (opt == 1) or (opt == 2):
agg_logits = logistic_alpha_forward(logits_train, cost_convex, clfs[ix])
agg_logits_test = logistic_alpha_forward(logits_test, cost_convex, clfs[ix])
elif opt == 'classic':
alpha_cost = lambda X, yhat, axis: cost_convex(X, yhat, axis, alpha=clfs[ix])
agg_logits = bci_penalty._forward_penalty(logits_train, alpha_cost)
agg_logits_test = bci_penalty._forward_penalty(logits_test, alpha_cost)
yhat_train = np.argmax(agg_logits, axis=1)
yhat_test = np.argmax(agg_logits_test, axis=1)
train_accuracies[ix] = accuracy(yhat_train, y_train)
test_accuracies[ix] = accuracy(yhat_test, y_test)
ix+= 1
return train_accuracies, test_accuracies
# =============================================================================
# FORWARD AND MISCELLANIOUS
# =============================================================================
def carmen_penalty_cache_logits(mff=True, multi_class=False):
if mff:
framework='mff'
archi = athena.bci_achitecture.emf_carmen_penalty_architecture
else:
framework='trad'
archi = athena.bci_achitecture.trad_carmen_penalty_architecture
if multi_class or (multi_class == '_4_class'):
class_mode = '4_class'
elif not multi_class or (multi_class == '_binary'):
class_mode = 'binary'
try:
with open('logits_' + framework + '_carmen_' + class_mode + '_train.pckl', 'rb') as f:
logits_train = pickle.load(f)
with open('logits_' + framework + '_carmen_'+ class_mode +'_test.pckl', 'rb') as f:
logits_test = pickle.load(f)
with open('logits_' + framework + '_carmen_'+ class_mode +'_train_y.pckl', 'rb') as f:
y_train = pickle.load(f)
with open('logits_' + framework + '_carmen_'+ class_mode +'_test_y.pckl', 'rb') as f:
y_test = pickle.load(f)
except (IOError, EOFError) as e:
print('Recomputing logits...', e)
logits_train, y_train, logits_test, y_test = carmen_best_alpha_2a_all_logits(archi, 0, verbose=False, agregacion=pn.base_cost_functions[0], mff=mff, four_clases=multi_class)
return logits_train, y_train, logits_test, y_test
def multi_alpha_forward(X, cost_convex, alpha):
try:
multiple_alpha = len(alpha) > 1
except:
multiple_alpha = False
agg_logits = np.zeros((len(X), X[0].shape[1], X[0].shape[2]))
for wave in range(len(X)):
if not multiple_alpha:
alpha_cost = lambda real, yhat, axis: cost_convex(real, yhat, axis, alpha[0])
else:
alpha_cost = lambda real, yhat, axis: cost_convex(real, yhat, axis, alpha[wave])
agg_logits[wave] = pn.penalty_aggregation(X[wave], [bp.parse(x) for x in athena.mff_aggregations], axis=0, keepdims=False, cost=alpha_cost)
if multiple_alpha:
alpha_cost = lambda real, yhat, axis: cost_convex(real, yhat, axis, alpha[-1])
else:
alpha_cost = lambda real, yhat, axis: cost_convex(real, yhat, axis, alpha)
return pn.penalty_aggregation(agg_logits, [bp.parse(x) for x in athena.mff_aggregations], axis=0, keepdims=False, cost=alpha_cost)
def _alpha_learn(X, y, cost, mff=False):
def compute_accuracy(yhat, y):
return np.mean(np.equal(yhat, y))
def optimize_function(X, y, cost_convex, alpha):
alpha_cost = lambda real, yhat, axis: cost_convex(real, yhat, axis, alpha)
agg_logits = pn.penalty_aggregation(X, [bp.parse(x) for x in athena.classical_aggregations], axis=0, keepdims=False, cost=alpha_cost)
yhat = np.argmax(agg_logits, axis=1)
return 1 - compute_accuracy(yhat, y)
def optimize_function_mff(X, y, cost_convex, alpha):
agg_logits = multi_alpha_forward(X, cost_convex, alpha)
yhat = np.argmax(agg_logits, axis=1)
return 1 - compute_accuracy(yhat, y)
if mff:
function_alpha = lambda a: optimize_function_mff(X, y, cost, a)
x0 = [0.5] * 5
else:
function_alpha = lambda a: optimize_function(X, y, cost, a)
x0 = [0.5]
res = athena.my_optimization(function_alpha, x0=x0, niter=100, mode='montecarlo')
alpha_value = res
if hasattr(alpha_value, 'len'):
alpha_value = alpha_value[0]
return lambda real, yhatf, axis: cost(real, yhatf, axis, alpha_value), alpha_value
def study_alpha(architecture, x_test, y_test, cost, mff=False):
lgts = architecture.csp_pass(x_test)
alpha_f, alpha = _alpha_learn(lgts, y_test, cost, mff=mff)
aggregation_set = athena.classical_aggregations if not mff else athena.mff_aggregations
if not mff:
agg_logits = pn.penalty_aggregation(lgts, [bp.parse(x) for x in aggregation_set], axis=0, keepdims=False, cost=alpha_f)
else:
agg_logits = multi_alpha_forward(lgts, cost, alpha)
yhat = np.argmax(agg_logits, axis=1)
return np.mean(np.equal(y_test, yhat))
def carmen_train_2a_all(architecture, derivate=0, verbose=False, agregacion=None, four_clases=False, opt=False):
accuracies = []
for dataset_train, dataset_test in zip(lb.all_carmen_datasets_partitions(full_tasks=four_clases, opt=opt), lb.all_carmen_datasets_partitions(full_tasks=four_clases, test=True, opt=opt)):
X_train, y_train = dataset_train
X_test, y_test = dataset_test
X_train = np.transpose(X_train, (2, 1, 0))
X_test = np.transpose(X_test, (2, 1, 0))
my_architecture = athena.bci_achitecture()
if agregacion is None:
architecture(my_architecture, X_train, y_train, verbose)
else:
architecture(my_architecture, X_train, y_train, verbose, agregacion)
accuracies.append(np.mean(np.equal(my_architecture.forward(X_test), y_test)))
if verbose:
print('N-th accuracy: ' + str(np.mean(np.equal(my_architecture.forward(X_test), y_test))), 'Actual mean: ' + str(np.mean(accuracies)))
return accuracies, my_architecture
def carmen_best_alpha_2a_all(architecture, cost, derivate=0, verbose=False, agregacion=None, four_clases=False, opt=False, mff=False):
accuracies = []
for dataset_train, dataset_test in zip(lb.all_carmen_datasets_partitions(full_tasks=four_clases, opt=opt), lb.all_carmen_datasets_partitions(full_tasks=four_clases, test=True, opt=opt)):
X_train, y_train = dataset_train
X_test, y_test = dataset_test
X_train = np.transpose(X_train, (2, 1, 0))
X_test = np.transpose(X_test, (2, 1, 0))
my_architecture = athena.bci_achitecture()
if agregacion is None:
architecture(my_architecture, X_train, y_train, verbose)
else:
architecture(my_architecture, X_train, y_train, verbose, agregacion)
accuracies.append(study_alpha(my_architecture, X_test, y_test, agregacion, mff))
if verbose:
print('N-th accuracy: ' + str(np.mean(np.equal(my_architecture.forward(X_test), y_test))), 'Actual mean: ' + str(np.mean(accuracies)))
return accuracies, my_architecture
def carmen_best_alpha_2a_all_logits(architecture, cost, derivate=0, verbose=False, agregacion=None, four_clases=False, opt=False, mff=False):
logits_train = []
logits_test = []
all_y_train = []
all_y_test = []
n_partitions = 20
for dataset_train, dataset_test in zip(lb.all_carmen_datasets_partitions(full_tasks=four_clases, opt=opt, n_partition=n_partitions), lb.all_carmen_datasets_partitions(full_tasks=four_clases, test=True, opt=opt, n_partition=n_partitions)):
X_train, y_train = dataset_train
X_test, y_test = dataset_test
X_train = np.transpose(X_train, (2, 1, 0))
X_test = np.transpose(X_test, (2, 1, 0))
my_architecture = athena.bci_achitecture()
if agregacion is None:
architecture(my_architecture, X_train, y_train, verbose)
else:
architecture(my_architecture, X_train, y_train, verbose, agregacion)
logits_train.append(my_architecture.logits)
logits_test.append(my_architecture.csp_pass(X_test))
all_y_train.append(y_train)
all_y_test.append(y_test)
import pickle
framework = 'mff' if mff else 'trad'
class_mode = 'binary' if not four_clases else '4_class'
with open('logits_' + framework + '_carmen_' + class_mode + '_train.pckl', 'wb') as f:
pickle.dump(logits_train, f)
with open('logits_' + framework + '_carmen_' + class_mode + '_train_y.pckl', 'wb') as f:
pickle.dump(all_y_train, f)
with open('logits_' + framework + '_carmen_' + class_mode + '_test.pckl', 'wb') as f:
pickle.dump(logits_test, f)
with open('logits_' + framework + '_carmen_' + class_mode + '_test_y.pckl', 'wb') as f:
pickle.dump(all_y_test, f)
return logits_train, all_y_train, logits_test, all_y_test
def classifier_new_alpha(nombre, coste, multi_class=False, reload=True, mff=False, opt=1):
if reload:
save_clfs(coste, nombre, mff, four_classes=multi_class, opt=opt)
train_acc, test_acc = compute_accuracy_logistic_alpha(coste, nombre, mff, multi_class, opt)
return train_acc, test_acc
def accuracy_report(cost_names, cost_funcs, mff, reload, multi_class, opt):
accuracies_df = pd.DataFrame(np.zeros((len(cost_names), 1)), index=cost_names, columns=['Accuracy'])
for ix, cost_func in enumerate(cost_funcs):
accuracies_train, accuracies_test = classifier_new_alpha(cost_names[ix], cost_func, mff=mff, reload=reload, multi_class=multi_class, opt=opt)
accuracies_df.iloc[ix,0] = np.mean(accuracies_test)
return accuracies_df
def accuracy_report_dual(mff, reload, multi_class_bool, opt):
'''
Igual que accuracy report pero en una table de coste x coste
'''
accuracies_df = pd.DataFrame(np.zeros((len(names_total), len(names_total))), index=names_total, columns=names_total)
for ix, cost_func in enumerate(quasi_total):
for jx, cost_func2 in enumerate(quasi_total):
cost = generate_combination(ix, jx)
accuracies_train, accuracies_test = classifier_new_alpha(names_total[ix] + '+' + names_total[jx], cost, mff=mff, reload=reload, multi_class=multi_class_bool, opt=opt)
acc_test_df = | pd.DataFrame(accuracies_test) | pandas.DataFrame |
import numpy as np
import matplotlib.pyplot as pl
import pandas as pd
sheets = pd.read_excel('Family Predictions 2022.xlsx', sheet_name=None)
questions = sheets.pop('Main')
player_sheets = sheets
player_sheets.keys()
questions['ID'] = questions['ID'].astype(int)
questions.set_index('ID')
questions = questions.drop(['Clarifications', 'Outcome', 'Outcome comments', 'Outcome supporting link', 'ID'], axis=1)
guesses = {k: player_sheets[k]['Probability (0.0 to 1.0)'] for k in player_sheets}
guesses = | pd.DataFrame(data=guesses) | pandas.DataFrame |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
| tm.assert_series_equal(res, s) | pandas.util.testing.assert_series_equal |
import pandas as pd
import spell
from curami.commons import file_utils
'''
find spelling mistakes of identified similar pairs in previous step
'''
match_ratio = 0.85
def analyze():
attributes = | pd.read_csv(file_utils.matched_attributes_file, encoding=file_utils.encoding) | pandas.read_csv |
#%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting
# ms-python.python added
import os
try:
os.chdir(os.path.join(os.getcwd(), 'eddy_src/q_learning_stock'))
print(os.getcwd())
except:
pass
import indicators
import util
import config
import numpy as np
import pandas as pd
import scipy
import time
import calendar
import math
#%% [markdown]
## Data Generation script for csv data. Use this script to generate data for missing csv data. For scripts using Alpha Vantage, a user key should be obtained
#%% [markdown]
### Generate fundamental data for JP morgan: JPM
#%%
# df = pd.read_csv('data/fundamental.csv')
# df = df.drop(['SimFin ID', 'Company Industry Classification Code'], axis=1)
# df = df.pivot_table(index=['Ticker','publish date', 'Indicator Name'],
# values=['Indicator Value'])
# jpm = df.iloc[df.index.get_level_values('Ticker')=='JPM']
# jpm.to_csv('data/jpm_fundamental.csv')
#%% [markdown]
### Generate dataset for portfolios
#%%
def generate_portfolio_dataset(stocks: list, path: str, start='1/1/2016', end='31/12/2018'):
"""Note that api might not respond fast enough, or have a response limit.
"""
for stock in stocks:
try:
df = util.get_stock_data(stock, start, end)
df.index.name = 'Date'
filename_path = os.path.join(path, stock)
df.to_csv('{}.csv'.format(filename_path), header=['Open', 'High', 'Low', 'Close', 'Volume'])
print("{} done".format(stock))
time.sleep(1)
except Exception as e:
print("Unable to fetch data for {}".format(stock))
print(e)
#%% [markdown]
#### Generate stock data for various market indexes
##### CAC 40: ^FCHI
##### DAX PERFORMANCE-INDEX: ^GDAXI
##### Dow Jones Industrial Average: ^DJI
##### S&P 500 Index: ^GSPC
##### S&P 100: ^OEX
##### FTSE 100 Index: ^FTSE
##### Nikkei 225: ^N225
##### SSE Composite Index: ^SSEC
##### NYSE Composite: ^NYA
##### Euro Stoxx 50: ^STOXX50E
##### Technology Select Sector SPDR ETF: XLK
##### Energy Select Sector SPDR Fund: XLE
##### Health Care Select Sector SPDR Fund: XLV
##### Consumer Discret Sel Sect SPDR ETF: XLY
#%%
stocks = ['^FCHI', '^GDAXI', '^DJI', '^GSPC', '^OEX', '^FTSE', '^N225', '^SSEC', '^NYA', '^STOXX50E','XLK', 'XLE', 'XLV', 'XLY']
generate_portfolio_dataset(stocks, 'data/indexes')
#%% [markdown]
## Generate for algo
### IBOVESPA (Brazil): ^BVSP
### TSEC weighted index: ^TWII
### NASDAQ Composite: ^IXIC
#%%
# alpha vantage failed to fetch for nasdaq composite. Data can be downloaded here: https://www.quandl.com/data/NASDAQOMX/COMP-NASDAQ-Composite-COMP manually
stocks = ['^BVSP', '^TWII']
generate_portfolio_dataset(stocks, 'data/indexes', '1/1/2014')
#%% [markdown]
##Generate some stocks for testing in algo
### BVSP
#### Equatorial Energia S.A.: EQTL3.SA
#### Itaúsa - Investimentos Itaú S.A.: ITSA4.SA
#### Petróleo Brasileiro S.A. - Petrobras: PETR3.SA
#%%
stocks = ['EQTL3.SA', 'ITSA4.SA', 'PETR3.SA']
generate_portfolio_dataset(stocks, 'data/algo/^BVSP', '1/1/2014')
#%% [markdown]
### TWII
#### Formosa Chemicals & Fibre Corporation: 1326.TW
#### LARGAN Precision Co.,Ltd: 3008.TW
#### Cathay Financial Holding Co., Ltd. 2882.TW
#%%
stocks = ['1326.TW', '3008.TW', '2882.TW']
generate_portfolio_dataset(stocks, 'data/algo/^TWII', '1/1/2014')
#%% [markdown]
### IXIC
#### Tesla, Inc.: TSLA
#### IBERIABANK Corporation: IBKC
#### FireEye, Inc.: FEYE
#%%
stocks = ['TSLA', 'IBKC', 'FEYE']
generate_portfolio_dataset(stocks, 'data/algo/^IXIC', '1/1/2014')
#%% [markdown]
#### Generate stock data for Goldman Sachs
#### GS Equity Growth Portfolio - Institutional
##### International Equity Insights Fund Institutional Class: GCIIX
##### Goldman Sachs Large Cap Growth Insights Fund Institutional Class: GCGIX
##### Goldman Sachs Large Cap Value Insights Fund Institutional Class: GCVIX
##### Goldman Sachs Emerging Markets Equity Insights Fund International: GERIX
##### Goldman Sachs ActiveBeta U.S. Large Cap Equity ETF: GSLC
##### Goldman Sachs ActiveBeta Emerging Markets Equity ETF: GEM
##### GS Global Real Estate Securities Fund: GARSX
##### Goldman Sachs Small Cap Equity Insights Fund Institutional Class: GCSIX
##### Goldman Sachs ActiveBeta International Equity ETF: GSIE
##### GS Financial Square Government Fund: FGTXX
##### Goldman Sachs International Small Cap Insights Fund Institutional Class: GICIX
##### iShares MSCI Brazil Capped ETF: EWZ
#%%
# Note that 'FGTXX' is ignored due to having no data avaliable. It consists of only U.S. Government and U.S. Treasury securities including bills, bonds, notes and repurchase agreements, which are stable
stocks = ['GCIIX', 'GCGIX', 'GCVIX', 'GERIX', 'GSLC', 'GEM', 'GARSX', 'GCSIX', 'GSIE', 'GICIX', 'EWZ']
generate_portfolio_dataset(stocks, 'data/goldman')
#%% [markdown]
#### Generate quarterly returns for GS Portfolios
##### Obtain stock data for Balanced Strategy Portfolio: GIPAX, GIPCX, GIPIX, GIPSX, GIPTX, GIPRX, GIPUX
#%%
stocks = ['GIPAX', 'GIPCX', 'GIPIX', 'GIPSX', 'GIPTX', 'GIPRX', 'GIPUX']
generate_portfolio_dataset(stocks, 'data/goldman/portfolio/balanced')
#%% [markdown]
##### Obtain stock data for Equity Growth Strategy Portfolio: GAPAX, GAXCX, GAPIX, GAPSX, GAPTX, GAPRX, GAPUX
#%%
stocks = ['GAPAX', 'GAXCX', 'GAPIX', 'GAPSX', 'GAPTX', 'GAPRX', 'GAPUX']
generate_portfolio_dataset(stocks, 'data/goldman/portfolio/equity_growth')
#%% [markdown]
##### Obtain stock data for Growth and Income Strategy Portfolio: GOIAX, GOICX, GOIIX, GOISX, GPITX, GPIRX, GOIUX
#%%
stocks = ['GOIAX', 'GOICX', 'GOIIX', 'GOISX', 'GPITX', 'GPIRX', 'GOIUX']
generate_portfolio_dataset(stocks, 'data/goldman/portfolio/growth_income')
#%% [markdown]
##### Obtain stock data for Growth Strategy Portfolio: GGSAX, GGSCX, GGSIX, GGSSX, GGSTX, GGSRX, GGSUX
#%%
stocks = ['GGSAX', 'GGSCX', 'GGSIX', 'GGSSX', 'GGSTX', 'GGSRX', 'GGSUX']
generate_portfolio_dataset(stocks, 'data/goldman/portfolio/growth')
#%% [markdown]
##### Obtain stock data for Satellite Strategy Portfolio: GXSAX, GXSCX, GXSIX, GXSSX, GXSTX, GXSRX, GXSUX
#%%
stocks = ['GXSAX', 'GXSCX', 'GXSIX', 'GXSSX', 'GXSTX', 'GXSRX', 'GXSUX']
generate_portfolio_dataset(stocks, 'data/goldman/portfolio/satellite')
#%% [markdown]
#### Generate quarterly returns for portfolios
#%%
def generate_portfolio_quarterly_returns(directory_path: str):
"""Generate portfolio_quarterly_return.csv in path. Not recursive.
"""
quarterly_dict = {'symbol': [], 'start_period': [], 'end_period': [], 'quarterly_return': []}
print('Found the following files in directory: {}'.format(os.listdir(directory_path)))
for filename in os.listdir(directory_path):
not_stocks = ['portfolio_quarterly_return.csv', 'pearson_correlation.csv',
'portfolio_quarterly_return.csv', 'best_portfolio_switch.csv']
if filename.endswith(".csv") and filename not in not_stocks:
print('Processing {}...'.format(filename))
file_path = os.path.join(directory_path, filename)
df = pd.read_csv(file_path, parse_dates=['Date'])
start_year = df['Date'].iloc[0].year
end_year = df['Date'].iloc[-1].year
symbol = filename[:-4]
for year in range(start_year, end_year + 1):
for quarter_start in range(1,13,3):
q_start = '{}-{}-1'.format(year, quarter_start)
q_end = '{}-{}-{}'.format(year, quarter_start + 2, calendar.monthrange(year, quarter_start + 2)[1])
temp_df = df[(df['Date'] >= q_start) & (df['Date'] <= q_end)]
# Remove empty data
temp_df = temp_df[temp_df['Close'] != 0]
if len(temp_df.index) > 2:
quarter_start_close = temp_df['Close'].iloc[0]
quarter_end_close = temp_df['Close'].iloc[-1]
quarter_return = (quarter_end_close - quarter_start_close) / quarter_start_close * 100
quarterly_dict['symbol'].append(symbol)
quarterly_dict['start_period'].append(temp_df['Date'].iloc[0].date())
quarterly_dict['end_period'].append(temp_df['Date'].iloc[-1].date())
quarterly_dict['quarterly_return'].append(quarter_return)
df = pd.DataFrame(quarterly_dict)
report_path = os.path.join(directory_path, 'portfolio_quarterly_return.csv')
df.to_csv(report_path)
#%%
##### Generate quarterly returns for all portfolios in goldman
portfolios = ['balanced', 'equity_growth', 'growth', 'growth_income', 'satellite']
for portfolio in portfolios:
try:
portfolio_path = os.path.join('data/goldman/portfolio', portfolio)
generate_portfolio_quarterly_returns(portfolio_path)
print('Generated report for {}'.format(portfolio))
except Exception as e:
print('Error generating report for {}'.format(portfolio))
print(e)
print('Report generation done.')
#%% [markdown]
##### Generate best portfolio switch for goldman
###### Concatenate all portfolio returns
#%%
portfolios = ['balanced', 'equity_growth', 'growth', 'growth_income', 'satellite']
portfolio_dfs = []
for portfolio in portfolios:
portfolio_report_path = os.path.join('data/goldman/portfolio', portfolio, 'portfolio_quarterly_return.csv')
portfolio_dfs.append( | pd.read_csv(portfolio_report_path) | pandas.read_csv |
import pandas as pd
from datetime import datetime
import numpy as np
import scipy.stats as ss
from sklearn import preprocessing
data_root = '/media/jyhkylin/本地磁盘1/study/数据挖掘竞赛/SMPCUP2017/'
post_data = pd.read_table(data_root+'SMPCUP2017dataset/2_Post.txt' ,sep='\001' ,names=['userID' ,'blogID' ,'date'])
browse_data = pd.read_table(data_root+'SMPCUP2017dataset/3_Browse.txt' ,sep='\001' ,names=['userID' ,'blogID' ,'date'])
comment_data = pd.read_table(data_root+'SMPCUP2017dataset/4_Comment.txt' ,sep='\001' ,names=['userID' ,'blogID' ,'date'])
voteup_data = pd.read_table(data_root+'SMPCUP2017dataset/5_Vote-up.txt' ,sep='\001' ,names=['userID' ,'blogID' ,'date'])
votedown_data = pd.read_table(data_root+'SMPCUP2017dataset/6_Vote-down.txt' ,sep='\001' ,names=['userID' ,'blogID' ,'date'])
favorite_data = pd.read_table(data_root+'SMPCUP2017dataset/7_Favorite.txt' ,sep='\001' ,names=['userID' ,'blogID' ,'date'])
follow_data = pd.read_table(data_root+'SMPCUP2017dataset/8_Follow.txt' ,sep='\001' ,names=['userID1' ,'userID2'])
letter_data = pd.read_table(data_root+'SMPCUP2017dataset/9_Letter.txt' ,sep='\001' ,names=['userID1' ,'userID2' ,'date'])
names = locals()
mainAct = ['post' ,'browse']
secondAct = ['comment' ,'voteup' ,'votedown' ,'favorite']
relAct = ['follow' ,'letter']
passiveAct = ['browse' ,'comment' ,'voteup' ,'votedown' ,'favorite' ,'follow']
userList = list()
for act in mainAct+secondAct:
userList= userList + names['%s_data'%act]['userID'].values.tolist()
userList = list(set(userList))
actData = | pd.DataFrame(index=userList) | pandas.DataFrame |
"""initialize gui by reading in images specified by user inputs in notebooks/label.ipynb"""
import cocpit
import os
import pandas as pd
from typing import Tuple
import itertools
def read_parquet(
year: int, time_of_day: str, precip_threshold: float, precip: str
) -> Tuple[pd.DataFrame, str]:
"""
Read a time matched parquet file for a given year between camera images and observations.
Filter based on year, precip or no precip, precip threshold, and time of day (day or night).
Args:
year (int): user-specified year to label images from
time_of_day (str): 'day' or 'night'
precip_threshold (float): only grab images above this threshold
precip (str): 'precip' or 'no precip'
"""
df = pd.read_parquet(f"/ai2es/matched_parquet/{year}_timeofday.parquet")
df = df[df["night"] == True if time_of_day == "night" else df["night"] == False]
df = df[
df["precip_accum_1min [mm]"] > precip_threshold
if precip == "precip"
else df["precip_accum_1min [mm]"] == 0.0
]
return (df, f"/ai2es/{time_of_day}_{precip}_hand_labeled/{year}")
def shuffle_df(df: pd.DataFrame) -> pd.DataFrame:
"""shuffle df paths such that there is station diversity in training dataset"""
return df.sample(frac=1)
def make_folders(folder_dest) -> None:
"""
Make folders in training dir to save to if they don't exist
Args:
folder_dest (str): folder to save images to
"""
for label in cocpit.config.CLASS_NAME_MAP.values():
save_path = os.path.join(folder_dest, label)
if not os.path.exists(save_path):
os.makedirs(save_path)
def show_new_images(df: pd.DataFrame) -> pd.DataFrame:
"""Make sure images shown haven't already been labeled
Args:
df (pd.DataFrame): input df with all paths from read parquet
Returns:
df (pd.DataFrame): df where paths are removed if already labeled
"""
all_classes = [
os.listdir(
os.path.join(cocpit.config.DATA_DIR, cocpit.config.CLASS_NAME_MAP[class_])
)
for class_ in cocpit.config.CLASS_NAMES
]
all_classes = list(itertools.chain.from_iterable(all_classes))
already_labeled = | pd.DataFrame({"path": all_classes}) | pandas.DataFrame |
"""
Tests for the simulation codebase.
"""
from __future__ import division
import numpy as np
import pandas as pd
import pytest
import multiprocessing
from choicemodels import MultinomialLogit
from choicemodels.tools import (iterative_lottery_choices, monte_carlo_choices,
MergedChoiceTable, parallel_lottery_choices)
# TO DO - could we set a random seed and then verify that monte_carlo_choices() provides
# the same output as np.random.choice()?
def build_data(num_obs, num_alts):
"""
Build a simulated list of scenarios, alternatives, and probabilities
"""
obs = np.repeat(np.arange(num_obs), num_alts)
alts = np.random.randint(0, num_alts*10, size=num_obs*num_alts)
weights = np.random.rand(num_alts, num_obs)
probs = weights / weights.sum(axis=0)
probslist = probs.flatten(order='F')
data = pd.DataFrame({'oid': obs, 'aid': alts, 'probs': probslist})
data = data.set_index(['oid','aid']).probs
return data
def test_monte_carlo_choices():
"""
Test simulation of choices without capacity constraints. This test just verifies that
the code runs, using a fairly large synthetic dataset.
"""
data = build_data(1000, 100)
monte_carlo_choices(data)
def test_simulation_accuracy():
"""
This test checks that the simulation tool is generating choices that match the
provided probabilities.
"""
data = build_data(5,3)
# Get values associated with an arbitrary row
r = np.random.randint(0, 15, 1)
row = pd.DataFrame(data).reset_index().iloc[r]
oid = int(row.oid)
aid = int(row.aid)
prob = float(pd.DataFrame(data).query('oid=='+str(oid)+' & aid=='+str(aid)).sum())
n = 1000
count = 0
for i in range(n):
choices = monte_carlo_choices(data)
if (choices.loc[oid] == aid):
count += 1
assert(count/n > prob-0.1)
assert(count/n < prob+0.1)
# CHOICE SIMULATION WITH CAPACITY CONSTRAINTS
@pytest.fixture
def obs():
d1 = {'oid': np.arange(50),
'obsval': np.random.random(50),
'choice': np.random.choice(np.arange(60), size=50)}
return | pd.DataFrame(d1) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""# 基於內容推薦"""
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import paired_distances,cosine_similarity
movies = pd.read_csv('./ml-latest-small/movies.csv')
rate = pd.read_csv('./ml-latest-small/ratings.csv')
display(movies.head())
display(rate.head())
# movies留下movieId與genres(電影分類)
# rate留下userId與movieId
# 把movies與rate以movieId合併成一個df
movies.drop('title',axis=1,inplace=True)
rate.drop(['rating', 'timestamp'],axis=1,inplace=True)
df = pd.merge(rate, movies, on='movieId')
df.head()
# 建立movie的特徵矩陣
oneHot = movies["genres"].str.get_dummies("|") # One-Hot Encoding
movie_arr = pd.concat([movies, oneHot], axis=1)
movie_arr.drop("genres",axis=1,inplace=True)
movie_arr.set_index("movieId",inplace=True)
display(movie_arr.head())
# 建立user的特徵矩陣
oneHot = df["genres"].str.get_dummies("|") # One-Hot Encoding
user_arr = | pd.concat([df, oneHot], axis=1) | pandas.concat |
'''
Functions used to compute and compare TOP_K KDE or IsolationForest scores for on different ports, in order to determine top ranked most anomalous time windows.
'''
# --- Imports ---
from sklearn.preprocessing import MinMaxScaler
import scipy.integrate as integrate
import pandas as pd
import numpy as np
import time
import os, sys
# add the parent directory to the path
sys.path.insert(0, os.path.abspath("../"))
from common import *
from constants import *
from constants_model import *
from model import *
# -----------------
# used for gathering performance metrics every top_k elements in the sorted anomaly scores list
TOP_K = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 900, 1000, 1100, 1200, 1300, 1400, 1440]
def get_top_k_scores(scores, k):
score_tuples = [(v, i) for i, v in enumerate(scores)]
scores_ranked = [(i, v) for (v, i) in sorted(score_tuples)]
# scores_ranked = [(i, v) for (v, i) in sorted(score_tuples, reverse=True)]
# print(scores_ranked[-10:-1])
return scores_ranked[0:k]
def order_per_port(scores, port, prob_index=3):
df = []
for i in range(len(scores)):
tuple_crt = scores[i]
window = tuple_crt[0]
val = tuple_crt[prob_index]
row = [port, window, val]
df.append(row)
print(row)
# print(df)
return df
def write_scores_df(df, ports, out_file, across_all=True):
col_headers = []
if across_all:
if MODEL == 'kde':
col_headers = ["port", "window", "normalized_probability"]
elif MODEL == 'isolation':
col_headers = ["port", "window", "anomaly_score"]
else:
col_headers = ["port", "window", "score"]
pd.DataFrame(df, columns=col_headers).to_csv(out_file, index=False)
def write_metrics(metrics, ports, out_file, across_all=True):
df = []
col_headers = []
col_headers = ["k", "tp_total", "fp_total"]
if across_all:
for port in ports:
fp_name = "fp_" + str(port)
col_headers.append(fp_name)
print(col_headers)
for k in TOP_K:
tp_values = metrics[0][k].values()
fp_values = metrics[1][k].values()
tp_total = sum(tp_values)
fp_total = sum(fp_values)
row = [k, tp_total, fp_total]
if across_all:
row = row + list(fp_values)
df.append(row)
print("df: ", df)
| pd.DataFrame(df, columns=col_headers) | pandas.DataFrame |
import pandas as pd
from datetime import datetime
from sklearn.preprocessing import LabelEncoder, label_binarize
from sklearn.ensemble import RandomForestClassifier
train_payments_file = 'data/qiwi_payments_data_train.csv'
train_users_file = 'data/qiwi_users_data_train.csv'
# test files
test_payments_file = 'data/qiwi_payments_data_test.csv'
test_users_file = 'data/qiwi_users_data_test.csv'
def load_train_set():
train_payments = pd.read_csv(train_payments_file, sep=';')
train_users = pd.read_csv(train_users_file, sep=';')
# merge users and payments by user_id
train = pd.merge(train_users, train_payments, on='user_id')
# parse dates
train = parse_dates(train)
return train
def load_test_set():
test_payments = pd.read_csv(test_payments_file, sep=';')
test_users = pd.read_csv(test_users_file, sep=';')
# merge users and payments by user_id
test = pd.merge(test_users, test_payments, on='user_id')
test = parse_dates(test)
return test
def parse_dates(dataset):
''' Parses `date_month` to unix time.
One of the columns contain year and another month.
'''
dates = [datetime.strptime(dt, '%Y-%m') for dt in dataset['date_month']]
unix = [int(dt.timestamp()) for dt in dates]
dataset['unix_time'] = unix
dataset = dataset.drop(['date_month'], axis=1)
return dataset
def prepare_data():
''' Workflow to clean data.
This function cleans the data, add it at the beggining of main()
to refresh datasets.
'''
# load the data from source
train = load_train_set()
test = load_test_set()
# encode sex in train set
le = LabelEncoder()
le.fit(train['sex'])
train['sex'] = le.transform(train['sex'])
# encode universities with labels
train, test = binarize_uni(train, test)
train, test = binarize_category(train, test)
# fill None in graduation year with 0s
train['graduation_year'] = train['graduation_year'].fillna(value=0)
test['graduation_year'] = test['graduation_year'].fillna(value=0)
# change `graduation_year` type to int
train['graduation_year'] = train['graduation_year'].astype(int)
test['graduation_year'] = test['graduation_year'].astype(int)
test.to_csv('data/test.csv', index=False)
train.to_csv('data/train.csv', index=False)
def binarize_uni(train, test):
unis = set(train['university'])
# unit 2 sets of names to get all of them
unis = pd.Series(list(unis.union(test['university'])))
le = LabelEncoder()
le.fit(unis)
# transformation
train['university'] = le.transform(train[['university']])
test['university'] = le.transform(test['university'])
unis_transformed = le.transform(unis)
# encode `university` with OneHotEncoding
train_uni_bin = label_binarize(train['university'], unis_transformed)
test_uni_bin = label_binarize(test['university'], unis_transformed)
train_uni_bin = pd.DataFrame(train_uni_bin)
test_uni_bin = pd.DataFrame(test_uni_bin)
# add binarized columns to DataFrames
train = pd.concat([train, train_uni_bin], axis=1)
test = pd.concat([test, test_uni_bin], axis=1)
# drop encoded columns
train = train.drop(['university'], axis=1)
test = test.drop(['university'], axis=1)
return train, test
def binarize_category(train, test):
# binarize cathogory column
categories = set(train['category'])
categories = pd.Series(list(categories.union(test['category'])))
le = LabelEncoder()
le.fit(categories)
cols = ['cat={}'.format(i) for i in range(len(categories))]
train_cat_bin = pd.DataFrame(label_binarize(train['category'], categories),
columns=cols)
test_cat_bin = pd.DataFrame(label_binarize(test['category'], categories),
columns=cols)
train = | pd.concat([train, train_cat_bin], axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
"""
Created on Mon November 10 14:13:20 2019
@author: <NAME>
takes the condition name as input (e.g. lik or int)
"""
def covariate (cond):
# data analysis and wrangling
import pandas as pd
import numpy as np
import os
from pathlib import Path
#addpath
home = str(Path.home())
#declare variables
GLM = ("GLM-10")
s = ("01", "02", "03", "04", "05", "06", "07", "09", "10", "11", "12", "13","14", "15", "16", "17","18", "20", "21", "22","23", "24","25", "26")
taskDIR = ("hedonic")
df1 = []
df2 = []
df3 = []
df5 = []
dfsubj = []
df01 = pd.DataFrame()
df02 = pd.DataFrame()
df03 = pd.DataFrame()
df05 = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import logging
FORMAT = ">>> %(filename)s, ln %(lineno)s - %(funcName)s: %(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO)
review_folder = 'Z:\\LYR\\LYR_2017studies\\LYR17_2Dmodelling\\LYR17_1_EDDPD\\review\\133'
# initializing csv file lists
hpc_files = []
review_files = []
PO_conv_files = []
# initializing dataframes
hpc_sum = | pd.DataFrame() | pandas.DataFrame |
### RF TRAINING AND EVALUATION FOR MULTICLASS CLINICAL OUTCOMES ###
# The script is divided in 4 parts:
# 1. Data formatting
# 2. Hyperparameter Tuning (HT_results)
# 3. Model training and cross validation (CV_results)
# 4. Model training and predictions (TEST_results)
## Intended to be run with arguments:
# bsub "python RF_training_mc_outcome.py Day1 D1_raw_data D1_no_pda D1_no_pda_MC_OUTCOME"
###############################################
##### 1. DATA PREPARATION AND ASSESSMENT #####
###############################################
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import sys, getopt
# TO PASS THE ARGUMENTS:
day = sys.argv[1]
data_type = sys.argv[2]
demog = sys.argv[3]
outcome = sys.argv[4]
# Example:
# day = 'Day1'
# data_type = 'D1_raw_data'
# demog = 'D1_pda'
# outcome = 'D1_pda_MC_OUTCOME'
# RESULTS PATHS:
# results_root = results_root_path
# assessment_path = results_root+'assessment/'
# ht_path = results_root+'HT_results/'
# cv_path = results_root+'CV_results/'
# test_path = results_root+'TEST_results/'
# TEST_features_importance = results_root+'/TEST_features_importance/'
# TEST_trained_models = results_root+'/TEST_trained_models/'
# TO READ THE INPUT DATA (The datasets have been previously created to include only the relevant variables)
# root_path = root_path
file_name = 'file_name.txt'
TTdata = root_path + file_name
df = pd.read_table(TTdata)
df = df.set_index('AE')
input_variables = list(df.columns)
with open(assessment_path+'input_data_column_names.txt', "w") as output:
output.write(str(input_variables))
# DATA PROCESSING: Features and Targets and Convert Data to Arrays
# Outcome (or labels) are the values we want to predict
outcome = pd.DataFrame(df[['id', 'da', 'pdr', 'pdrm', 'pdm']])
descriptors = df.drop(['id', 'da', 'pdr', 'pdrm', 'pdm'], axis = 1)
descriptors_list = list(descriptors.columns)
with open(assessment_path+'input_data_features.txt', "w") as output:
output.write(str(descriptors_list))
# TRAINING/VALIDATION (TV, for hyperparameter tuning) and TEST (Tt, for model evaluation) Sets:
# Split the data into training and testing sets:
TV_features_df, Tt_features_df, TV_outcome_df, Tt_outcome_df = train_test_split(descriptors, outcome,
test_size = 0.30, random_state = 11,
stratify=outcome) # Important to keep the % of classes similar in TV and Tt
# To transform to numpy arrays without index:
TV_features = np.array(TV_features_df)
Tt_features = np.array(Tt_features_df)
TV_outcome = np.array(TV_outcome_df[['id', 'da', 'pdr', 'pdrm', 'pdm']])
Tt_outcome = np.array(Tt_outcome_df[['id', 'da', 'pdr', 'pdrm', 'pdm']])
# Percentage of indviduals in each class:
TV_class_frac = TV_outcome_df.sum(axis = 0, skipna = True)*100/len(TV_outcome)
Tt_class_frac = Tt_outcome_df.sum(axis = 0, skipna = True)*100/len(Tt_outcome)
# Save it:
fractions = | pd.DataFrame(columns=['id', 'da', 'pdr', 'pdrm', 'pdm']) | pandas.DataFrame |
#!/usr/bin/env python3
# Copyright (c) 2022. RadonPy developers. All rights reserved.
# Use of this source code is governed by a BSD-3-style
# license that can be found in the LICENSE file.
__version__ = '0.2.1'
import matplotlib
matplotlib.use('Agg')
import pandas as pd
import os
import platform
import radonpy
# For Fugaku
#from radonpy.core import const
#const.mpi_cmd = 'mpiexec -stdout ./%%n.%%j.out -stderr ./%%n.%%j.err -n %i'
from radonpy.core import utils, calc, poly
from radonpy.ff.gaff2_mod import GAFF2_mod
from radonpy.sim.preset import eq
if __name__ == '__main__':
data = {
'DBID': os.environ.get('RadonPy_DBID'),
'monomer_ID': os.environ.get('RadonPy_Monomer_ID', None),
'ter_ID_1': os.environ.get('RadonPy_TER_ID', 'CH3'),
'smiles_list': os.environ.get('RadonPy_SMILES'),
'monomer_dir': os.environ.get('RadonPy_Monomer_Dir', None),
'copoly_ratio_list': os.environ.get('RadonPy_Copoly_Ratio', '1'),
'copoly_type': os.environ.get('RadonPy_Copoly_Type', 'random'),
'input_natom': int(os.environ.get('RadonPy_NAtom', 1000)),
'input_nchain': int(os.environ.get('RadonPy_NChain', 10)),
'ini_density': float(os.environ.get('RadonPy_Ini_Density', 0.05)),
'temp': float(os.environ.get('RadonPy_Temp', 300.0)),
'press': float(os.environ.get('RadonPy_Press', 1.0)),
'input_tacticity': os.environ.get('RadonPy_Tacticity', 'atactic'),
'tacticity': '',
'remarks': os.environ.get('RadonPy_Remarks', ''),
'Python_ver': platform.python_version(),
'RadonPy_ver': radonpy.__version__,
'preset_eq_ver': eq.__version__,
'check_eq': False,
'check_tc': False
}
omp = int(os.environ.get('RadonPy_OMP', 0))
mpi = int(os.environ.get('RadonPy_MPI', utils.cpu_count()))
gpu = int(os.environ.get('RadonPy_GPU', 0))
retry_eq = int(os.environ.get('RadonPy_RetryEQ', 3))
work_dir = './%s' % data['DBID']
if not os.path.isdir(work_dir):
os.makedirs(work_dir)
save_dir = os.path.join(work_dir, 'analyze')
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
monomer_dir = data['monomer_dir'] if data['monomer_dir'] else None
smi_list = data['smiles_list'].split(',')
ratio = [float(x) for x in str(data['copoly_ratio_list']).split(',')]
if data['monomer_ID']: monomer_id = data['monomer_ID'].split(',')
# Load shared monomer_data.csv file
if data['monomer_dir'] and data['monomer_ID']:
for i, mid in enumerate(monomer_id):
monomer_df = pd.read_csv(os.path.join(monomer_dir, 'monomer_%s_data.csv' % mid), index_col=0)
monomer_data = monomer_df.iloc[0].to_dict()
data['smiles_%i' % (i+1)] = monomer_data.pop('smiles')
data['monomer_ID_%i' % (i+1)] = mid
data['copoly_ratio_%i' % (i+1)] = ratio[i]
for k in monomer_data.keys(): data['%s_monomer%i' % (k, i+1)] = monomer_data[k]
# Load qm_data.csv file
elif os.path.isfile(os.path.join(save_dir, 'qm_data.csv')):
qm_df = pd.read_csv(os.path.join(save_dir, 'qm_data.csv'), index_col=0)
qm_data = qm_df.iloc[0].to_dict()
data = {**qm_data, **data}
if monomer_dir is None: monomer_dir = save_dir
elif os.path.isfile(os.path.join(work_dir, 'qm_data.csv')):
qm_df = pd.read_csv(os.path.join(work_dir, 'qm_data.csv'), index_col=0)
qm_data = qm_df.iloc[0].to_dict()
data = {**qm_data, **data}
if monomer_dir is None: monomer_dir = work_dir
else:
print('ERROR: Cannot find monomer data.')
# Load monomer pickle file
mols = []
for i in range(len(smi_list)):
if data['monomer_ID']:
mol = utils.pickle_load(os.path.join(monomer_dir, 'monomer_%s.pickle' % (monomer_id[i])))
else:
mol = utils.pickle_load(os.path.join(monomer_dir, 'monomer%i.pickle' % (i+1)))
mols.append(mol)
if data['ter_ID_1']:
ter = utils.pickle_load(os.path.join(monomer_dir, 'ter_%s.pickle' % data['ter_ID_1']))
else:
ter = utils.pickle_load(os.path.join(monomer_dir, 'ter1.pickle'))
ff = GAFF2_mod()
n = poly.calc_n_from_num_atoms(mols, data['input_natom'], ratio=ratio, terminal1=ter)
data['DP'] = n
# Generate homopolymer chain
if len(mols) == 1:
data['copoly_ratio_list'] = '1'
ratio = [1]
data['copoly_type'] = ''
homopoly = poly.polymerize_rw(mols[0], n, tacticity=data['input_tacticity'])
homopoly = poly.terminate_rw(homopoly, ter)
data['tacticity'] = poly.get_tacticity(homopoly)
# Force field assignment
result = ff.ff_assign(homopoly)
if not result:
data['remarks'] += '[ERROR: Can not assign force field parameters.]'
utils.pickle_dump(homopoly, os.path.join(save_dir, 'polymer.pickle'))
# Generate amorphous cell
ac = poly.amorphous_cell(homopoly, data['input_nchain'], density=data['ini_density'])
# Generate random copolymer chain
elif len(mols) > 1 and data['copoly_type'] == 'random':
copoly_list = poly.random_copolymerize_rw_mp(mols, n, ratio=ratio, tacticity=data['input_tacticity'],
nchain=data['input_nchain'], mp=min([omp*mpi, data['input_nchain'], 60]))
for i in range(data['input_nchain']):
copoly_list[i] = poly.terminate_rw(copoly_list[i], ter)
# Force field assignment
result = ff.ff_assign(copoly_list[i])
if not result:
data['remarks'] += '[ERROR: Can not assign force field parameters.]'
utils.pickle_dump(copoly_list[i], os.path.join(save_dir, 'polymer%i.pickle' % i))
data['tacticity'] = poly.get_tacticity(copoly_list[0])
# Generate amorphous cell
ac = poly.amorphous_mixture_cell(copoly_list, [1]*data['input_nchain'], density=data['ini_density'])
# Generate alternating copolymer chain
elif len(mols) > 1 and data['copoly_type'] == 'alternating':
ratio = [1/len(mols)]*len(mols)
data['copoly_ratio_list'] = ','.join([str(x) for x in ratio])
n = poly.calc_n_from_num_atoms(mols, data['input_natom'], ratio=ratio, terminal1=ter)
n = round(n/len(mols))
data['DP'] = n
copoly = poly.copolymerize_rw(mols, n, tacticity=data['input_tacticity'])
copoly = poly.terminate_rw(copoly, ter)
data['tacticity'] = poly.get_tacticity(copoly)
# Force field assignment
result = ff.ff_assign(copoly)
if not result:
data['remarks'] += '[ERROR: Can not assign force field parameters.]'
utils.pickle_dump(copoly, os.path.join(save_dir, 'polymer.pickle'))
# Generate amorphous cell
ac = poly.amorphous_cell(copoly, data['input_nchain'], density=data['ini_density'])
# Generate block copolymer chain
elif len(mols) > 1 and data['copoly_type'] == 'block':
n_list = [round(n*(x/sum(ratio))) for x in ratio]
copoly = poly.block_copolymerize_rw(mols, n_list, tacticity=data['input_tacticity'])
copoly = poly.terminate_rw(copoly, ter)
data['tacticity'] = poly.get_tacticity(copoly)
# Force field assignment
result = ff.ff_assign(copoly)
if not result:
data['remarks'] += '[ERROR: Can not assign force field parameters.]'
utils.pickle_dump(copoly, os.path.join(save_dir, 'polymer.pickle'))
# Generate amorphous cell
ac = poly.amorphous_cell(copoly, data['input_nchain'], density=data['ini_density'])
utils.pickle_dump(ac, os.path.join(save_dir, 'amorphous.pickle'))
# Input data and monomer properties are outputted
poly_stats_df = poly.polymer_stats(ac, df=True)
data_df = pd.concat([pd.DataFrame(data, index=[0]), poly_stats_df], axis=1).set_index('DBID')
data_df.to_csv(os.path.join(save_dir, 'input_data.csv'))
# Equilibration MD
eqmd = eq.EQ21step(ac, work_dir=work_dir)
ac = eqmd.exec(temp=data['temp'], press=data['press'], mpi=mpi, omp=omp, gpu=gpu)
analy = eqmd.analyze()
prop_data = analy.get_all_prop(temp=data['temp'], press=data['press'], save=True)
result = analy.check_eq()
# Additional equilibration MD
for i in range(retry_eq):
if result: break
eqmd = eq.Additional(ac, work_dir=work_dir)
ac = eqmd.exec(temp=data['temp'], press=data['press'], mpi=mpi, omp=omp, gpu=gpu)
analy = eqmd.analyze()
prop_data = analy.get_all_prop(temp=data['temp'], press=data['press'], save=True)
result = analy.check_eq()
# Calculate refractive index
polarizability = [data[x] for x in data.keys() if 'qm_polarizability_monomer' in str(x)]
mol_weight = [data[x] for x in data.keys() if 'mol_weight_monomer' in str(x)]
prop_data['refractive_index'] = calc.refractive_index(polarizability, prop_data['density'], mol_weight, ratio=ratio)
data_df.loc[data['DBID'], 'check_eq'] = result
data_df.loc[data['DBID'], 'do_TC'] = result
if not result:
data_df.loc[data['DBID'], 'remarks'] += '[ERROR: Did not reach an equilibrium state.]'
if prop_data['nematic_order_parameter'] >= 0.1:
data_df.loc[data['DBID'], 'remarks'] += '[ERROR: The system is partially oriented.]'
data_df.loc[data['DBID'], 'do_TC'] = False
# Data output after equilibration MD
eq_data_df = pd.concat([data_df, | pd.DataFrame(prop_data, index=[data['DBID']]) | pandas.DataFrame |
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.dates import DateFormatter
from matplotlib.ticker import FormatStrFormatter
def history_to_png(history_file, output_file, period=None):
history = pd.read_csv(history_file, parse_dates=True, index_col=0)
if period is None:
history = history.loc[history.index > pd.to_datetime("now") - pd.to_timedelta("30D")]
elif isinstance(period, str):
try:
history = history.loc[history.index > pd.to_datetime(period)]
except pd.errors.ParserError:
history = history.loc[history.index > pd.to_datetime("now") - pd.to_timedelta(period)]
elif isinstance(period, pd.Timestamp):
history = history.loc[history.index > period]
elif isinstance(period, pd.Timedelta):
history = history.loc[history.index > pd.to_datetime("now") - period]
elif isinstance(period, tuple):
start, end = period
if isinstance(start, str) and isinstance(end, str):
history = history.loc[ | pd.to_datetime(end) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Functions for collecting data from swehockey
"""
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import requests
import time
from datetime import datetime
def getGames(df_ids):
"""
Get all games from list of ids
Output is dataframe with all games
"""
id_list = df_ids['schedule_id']
data=[]
# Loop over all players
for index, schedule_id in enumerate(id_list):
url = 'http://stats.swehockey.se/ScheduleAndResults/Schedule/' + schedule_id
# print('Collects data from ' + url)
df_games = pd.read_html(url)[2]
# Select relevant columns and rename (structure of table changed from season 18/19)
if df_games.columns[0][1]=='Round':
df_games = df_games.iloc[:,[1,2,3,4,5]]
else:
df_games = df_games.iloc[:,[1,3,4,5,6]]
df_games.columns = ['date', 'game', 'score', 'periodscore', 'spectators']
# Adjust date column; remove time and fill empty rows with previous value
df_games['date'] = df_games['date'].map(lambda x: str(x)[:-5])
df_games['date'] = df_games['date'].replace('', np.nan).ffill(axis=0)
df_games['schedule_id'] = schedule_id
# Extract game-id (found in href - javascript)
agent = {"User-Agent":"Mozilla/5.0"}
page = requests.get(url, headers=agent)
soup = BeautifulSoup(page.text, 'html.parser')
address_class = soup.find_all('td', {"class": ["tdOdd standardPaddingTop", "tdNormal standardPaddingTop",
"tdOdd paddingTop", "tdNormal paddingTop"]})
all_a = []
for row in address_class:
if row.find('a') is not None:
all_a.append(str(row.find('a')))
df_id = pd.DataFrame(all_a, columns=['href'])
#Split string, only keep ID
df_id['href'] = df_id['href'].str.split(">", n=0, expand=True)
df_id['game_id'] = df_id['href'].str.extract('(\d+)')
df_games=pd.concat([df_games,df_id['game_id']], axis=1)
data.append(df_games)
#print(schedule_id, " collected")
games=pd.concat(data)
# Add season and leaugue
games = pd.merge(games, df_ids, on='schedule_id', how='left')
return games
def getPeriodWinner(homescore, awayscore):
"""
Function to determine periodwinner
"""
if (homescore==awayscore):
return 'draw'
elif (homescore>awayscore):
return 'home'
elif (homescore<awayscore):
return 'away'
else:
return None
def replaceTeamName(teamname):
"""
Replace teamnames that are not consistently named
"""
if ((teamname == 'A I K IF') | (teamname == 'AIK IF')):
return 'AIK'
elif (teamname=='Bofors IK Karlskoga'):
return 'Bofors IK'
elif ((teamname=='Färjestad BKMatchstart ca 20.30') |(teamname=='Färjestads BK')) :
return 'Färjestad BK'
elif (teamname=='Linköpings HC'):
return 'Linköping HC'
elif (teamname=='VIK Västerås HK'):
return 'Västerås IK'
else:
return teamname
def cleanGames(df_games):
"""
Clean output from getGames into data for analysis
"""
df_games[['home', 'away']] = df_games.game.str.split('-', expand = True, n=2)
df_games[['score_home', 'score_away']] = df_games.score.str.split('-', expand = True, n=2)
df_games.columns = df_games.columns.str.strip()
df_games['home'] = df_games['home'].str.strip()
df_games['away'] = df_games['away'].str.strip()
df_games['home'] = df_games.apply(lambda x: replaceTeamName(x.home), axis=1)
df_games['away'] = df_games.apply(lambda x: replaceTeamName(x.away), axis=1)
# Periodscore
df_games['periodscore'] = df_games['periodscore'].str.strip('()')
df_games[['p1score', 'p2score', 'p3score', 'p4score', 'p5score']] = df_games.periodscore.str.split(',', expand = True, n=4)
df_games[['p1score_home', 'p1score_away']] = df_games.p1score.str.split('-', expand = True, n=2)
df_games[['p2score_home', 'p2score_away']] = df_games.p2score.str.split('-', expand = True, n=2)
df_games[['p3score_home', 'p3score_away']] = df_games.p3score.str.split('-', expand = True, n=2)
df_games[['p4score_home', 'p4score_away']] = df_games.p4score.str.split('-', expand = True, n=2)
df_games[['p5score_home', 'p5score_away']] = df_games.p5score.str.split('-', expand = True, n=2)
cols_to_num = ['score_home', 'score_away', 'p1score_home', 'p1score_away',
'p2score_home', 'p2score_away', 'p3score_home', 'p3score_away',
'p4score_home', 'p4score_away', 'p5score_home', 'p5score_away']
df_games[cols_to_num] = df_games[cols_to_num].apply(lambda x: x.str.strip())
df_games[cols_to_num] = df_games[cols_to_num].apply(pd.to_numeric, errors='coerce')
df_games.loc[ | pd.notna(df_games['p4score']) | pandas.notna |
import itertools
import numpy as np
import pytest
import pandas as pd
from pandas.core.internals import ExtensionBlock
from .base import BaseExtensionTests
class BaseReshapingTests(BaseExtensionTests):
"""Tests for reshaping and concatenation."""
@pytest.mark.parametrize('in_frame', [True, False])
def test_concat(self, data, in_frame):
wrapped = | pd.Series(data) | pandas.Series |
import numpy as np
import pandas as pd
from . import time_utils as time
desired_fields = [
'last_reported',
# 'num_bikes_available',
'capacity',
'day_of_week',
'is_holiday',
'season',
'segment_of_day',
'cloud_coverage',
'condition',
'condition_class',
'humidity',
'pressure',
'rain',
'snow',
'temp',
'wind_speed'
]
def flattenWeatherDict(data):
weather = {
'weather_time': data['dt'],
'temp': data['main']['temp'],
'pressure': data['main']['pressure'],
'humidity': data['main']['humidity'] / 100,
'cloud_coverage': data['clouds']['all'] / 100,
'wind_speed': data['wind']['speed']
}
if isinstance(data['weather'], list):
weather['condition'] = data['weather'][0]['id']
else:
weather['condition'] = data['weather']['id']
weather['condition_class'] = weather['condition'] // 100
try:
weather['rain'] = data['rain']['3h']
except KeyError:
weather['rain'] = 0
try:
weather['snow'] = data['snow']['3h']
except KeyError:
weather['snow'] = 0
return weather
def transform_time(data):
return {
'last_reported': data,
'segment_of_day': time.segment_of_day(data),
'day_of_week': time.day_of_week(data),
'is_holiday': time.is_holiday(data),
'season': time.season(data)
}
def find_weather(weather, row):
target_time = row['last_reported']
try:
weather_time = weather[weather.weather_time < target_time][-1:].iloc[0]['weather_time']
except IndexError:
weather_time = np.nan
return weather_time
def remove_status_outliers(status):
return status[status.last_reported > 1]
def transform_data(data):
status, meta, weather = data
status = pd.DataFrame(status)
meta = pd.DataFrame(meta)
status = remove_status_outliers(status)
weather = pd.DataFrame([flattenWeatherDict(w) for w in weather.values()])
# merge time data
merged = pd.merge(
status,
pd.DataFrame(
[transform_time(t) for t in status.last_reported.to_list()]
),
on='last_reported'
)
merged = | pd.merge(merged, meta, on='station_id') | pandas.merge |
# Author: <NAME>
"""Trains the models and saves the training and validation scores to a csv file.
Usage: model_selection.py --csv_path=<csv_path>
Options:
--csv_path=<csv_path> path and file name of the model scores csv file
"""
import os
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.dummy import DummyRegressor
from sklearn.ensemble import RandomForestRegressor
from catboost import CatBoostRegressor
from sklearn.linear_model import Ridge, Lasso
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.model_selection import (
cross_val_score,
cross_validate
)
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler
from docopt import docopt
opt = docopt(__doc__)
def main(csv_path):
# read train data
X_train = pd.read_csv("data/raw/X_train.csv", parse_dates=['year'])
X_train['year'] = X_train['year'].dt.year
y_train = pd.read_csv("data/raw/y_train.csv")
train_df = X_train.join(y_train.set_index('carID'), on = "carID")
# read test data
X_test = | pd.read_csv("data/raw/X_test.csv", parse_dates=['year']) | pandas.read_csv |
import numpy as np
import pandas as pd
import pathlib
from sklearn.model_selection import train_test_split
file_path = pathlib.WindowsPath(__file__).parent.parent.parent.joinpath('data/')
test_path = file_path.joinpath('test.csv')
train_path = file_path.joinpath('train.csv')
# Importing datasets
def load_test_dataset() -> pd.DataFrame:
_data = pd.read_csv(test_path)
return _data
# Importing datasets
def load_train_dataset() -> pd.DataFrame:
_data = pd.read_csv(train_path)
return _data
def get_countid_enocde(train, test, cols, name):
temp = train.groupby(cols)['case_id'].count().reset_index().rename(columns = {'case_id': name})
temp2 = test.groupby(cols)['case_id'].count().reset_index().rename(columns = {'case_id': name})
train = pd.merge(train, temp, how='left', on= cols)
test = | pd.merge(test,temp2, how='left', on= cols) | pandas.merge |
import numpy as np
import pandas as pd
class DataTransform:
def __init__(self, data):
self.data = data
def columns_pattern(self):
# get columns name from dataframe
columns = list(self.data.columns)
# remove spaces, specified characters and format to snake case
columns = list(map( lambda x: x.strip().lower().replace(' ', '_').replace(':', '').replace('.', ''), columns))
self.data.columns = columns
# for col in columns:
# self.data[col] = self.data[col].apply(
# lambda x: x.strip().lower().replace(' ', '_').replace(':', '')
# )
return self.data
def price_format(self):
# format price columns to float
self.data['product_price'] = self.data['product_price'].apply(
lambda x: float(x.replace('$', '').strip()) if pd.notnull(x) else x
)
return self.data
def str_values_pattern(self):
# select string subset of the dataframe
#data_cat = self.data.select_dtypes(include='str')
# get string columns name from dataframe
columns = list(self.data.select_dtypes(include='object').columns)
# remove spaces, specified characters and format to snake case
for col in columns:
self.data[col] = self.data[col].apply(
lambda x: x.strip().lower().replace(' ', '_') if pd.notnull(x) else x
)
return self.data
def model_code_feature(self):
# feature unique model code without color info
self.data['model_code'] = self.data['product_id'].apply(
lambda x: str(x[:-3])
)
# feature color code from products
self.data['color_code'] = self.data['product_id'].apply(
lambda x: str(x[-3:])
)
return self.data
def showroom_transform(self):
# format columns name if necessary
self.columns_pattern()
# format price columns to float
self.data = self.price_format()
# format string to pattern
self.str_values_pattern()
# feature model and color code to assist in merge
self.model_code_feature()
# rearrange columns in dataframe
self.data = self.data[['product_id', 'model_code', 'color_code', 'product_category',
'product_name', 'product_price', 'product_link']]
return self.data
def color_transform(self):
# format columns name if necessary
self.data = self.columns_pattern()
# format string to snake case
self.data = self.str_values_pattern()
# feature model and color code to assist in merge
self.data = self.model_code_feature()
# rearrange columns in dataframe
self.data = self.data[['product_id', 'model_code', 'color_code',
'color_name', 'product_link']]
return self.data
def composition_feature(self):
# composition data feature
data_composition = pd.DataFrame(
self.data['Composition'].str.strip().str.split('\n', expand=True)
).fillna(np.nan)
# empty list to fill with all compositions type
compositions_type = []
# iterable to find all compositions types
for i in range(len(data_composition.columns)):
compositions_type += data_composition[i].str.extract('(.+:)')[0].unique().tolist()
# result from all unique compositions type find in dataset
compositions_type = list(filter(pd.notna, list(set(compositions_type))))
# empty dataframe to store all composition type data
df_composition = pd.DataFrame(index=range(len(data_composition)))
# iterable to go through all composition dataframe columns
for i in range(len(data_composition.columns)):
# empty list to fill with all boolean that contain certain compositions type
all_comp_type = []
# iterable to create all composition type columns in df_composition
for comp_type in compositions_type:
contain_comp_type = data_composition[i].str.contains(comp_type, na=True)
all_comp_type.append(contain_comp_type)
df_composition.loc[contain_comp_type, comp_type] = data_composition.loc[contain_comp_type, i]
# create material columns from composition without type
contain_all_types = pd.DataFrame(all_comp_type).sum().apply(lambda x: False if x == 0 else True)
df_composition.loc[(~contain_all_types) &
(pd.notnull(data_composition[i])),
'material'] = data_composition.loc[(~contain_all_types) &
( | pd.notnull(data_composition[i]) | pandas.notnull |
"""
Functions used for pre-processing
"""
#import math
import pickle
#import copy
#import config
import os
# for multiprocessing
from functools import partial
from multiprocessing import Pool, cpu_count
from joblib import Parallel, delayed
import joblib
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
def load_results(filename):
""" Load a pickle file
"""
with open(filename, 'rb') as file_to_load:
data = pickle.load(file_to_load, encoding='bytes')
return data
def save_results(rootpath, filename, results):
""" Save results as a pickle file
"""
if not os.path.exists(rootpath):
os.makedirs(rootpath)
with open(rootpath + filename, 'wb') as file_to_save:
pickle.dump(results, file_to_save)
########## Code to perform Principal Component Analysis (PCA) on a covariate ###################
def do_pca_on_covariate(df_train, df_test, n_components=10, location='pacific', var_id='sst'):
""" Do PCA: learn PC loadings from training data,
and project test data onto corresponding directions
Args:
df_train: multi-index (spatial-temporal) pandas dataframe
-- Training data used to compute Principal axes in feature space
df_test: multi-index pandas dataframe -- Test data
n_components: int -- Number of components to keep
location: str -- location indicator of the climate variable
var_id: str -- climate variable to process
Returns:
df1: pandas dataframe -- PCs for training data
df2: pandas dataframe -- PCs for test data
"""
# check the legitimate of the given parameters
if not isinstance(df_train, pd.DataFrame):
if isinstance(df_train, pd.Series):
df_train = df_train.to_frame() # convert pd.series to pd.dataframe
else:
raise ValueError("Training data needs to be a pandas dataframe")
if not isinstance(df_test, pd.DataFrame):
if isinstance(df_test, pd.Series):
df_test = df_test.to_frame() # convert pd.series to pd.dataframe
else:
raise ValueError("Test data needs to be a pandas dataframe")
# check dataframe level!
if len(df_train.index.names) < 3 or len(df_test.index.names) < 3:
raise ValueError("Multiindex dataframe includes 3 levels: [lat,lon,start_date]")
# flatten the dataframe such that the number of
# samples equals the number of dates in the dataframe
# and the number of features equals to lat x lon
df_train_flat = df_train.unstack(level=[0, 1])
df_test_flat = df_test.unstack(level=[0, 1])
x_train = df_train_flat.to_numpy()
x_test = df_test_flat.to_numpy()
# make sure no NAN
if np.isnan(x_train).sum() > 0:
np.nan_to_num(x_train, 0)
if np.isnan(x_test).sum() > 0:
np.nan_to_num(x_test, 0)
# Initialize the PCA model such that it will reture the top n_components
pca = PCA(n_components=n_components)
# Fit the model with Xtrain and apply the dimensionality reduction on Xtrain.
pca_train = pca.fit_transform(x_train)
# Apply dimensionality reduction to Xtest
pca_test = pca.transform(x_test)
# Convert PCs of Xtrain and Xtest to pandas dataframe
col = ['{}_{}_pca_{}'.format(location, var_id, i) for i in range(n_components)]
df1 = pd.DataFrame(data=pca_train,
columns=col,
index=df_train_flat.index)
df2 = pd.DataFrame(data=pca_test,
columns=col,
index=df_test_flat.index)
return(df1, df2)
def get_pca_from_covariate(rootpath,
data,
var_name, var_location,
train_start, train_end,
test_start, test_end,
n_components=10):
""" Apply PCA on spatial-temporal Climate variables (Covariates),
e.g., Sea surface temperature (SST)
Args:
data: multi-index pandas dataframe -- raw covariate to apply PCA
var_name: str -- covariance name
var_location: str -- covariance location (pacific, atlantic, us, and global)
rootpath: str -- directory to save the results
train_start, train_end: pd.Timestamp() -- the start date and the end date of the training set
test_start, test_end: pd.Timestamp() -- the start date and the end date of the test set
"""
idx = pd.IndexSlice
# check the legitimate of the given parameters
if not isinstance(data, pd.DataFrame):
if isinstance(data, pd.Series):
data = data.to_frame() # convert pd.series to pd.dataframe
else:
raise ValueError("Covariate needs to be a pandas multiindex dataframe")
# check if the train start date and the train end date is out of range
if train_start < data.index.get_level_values('start_date')[0]:
raise ValueError("Train start date is out of range!")
if train_end > data.index.get_level_values('start_date')[-1]:
raise ValueError("Train end date is out of range!")
# check if the test start date and the test end date is out of range
if test_start < train_start:
raise ValueError("Test start date is out of range!")
if test_end < train_end or test_end > data.index.get_level_values('start_date')[-1]:
raise ValueError("Test end date is out of range!")
print('create training-test split')
train_x = data.loc[idx[:, :, train_start:train_end], :]
test_x = data.loc[idx[:, :, test_start:test_end], :]
# start PCA
print('start pca')
train_x_pca, test_x_pca = do_pca_on_covariate(train_x[var_name], test_x[var_name],
n_components, var_location, var_name)
# save PCA data
all_x_pca = train_x_pca.append(test_x_pca)
all_x_pca.to_hdf(rootpath + '{}_{}_pca_all.h5'.format(var_location, var_name),
key=var_name, mode='w')
########## Code to perform z-score on a time-series using long-term mean and std ############################
def get_mean(df1, var_id='tmp2m', date_id='start_date'):
""" Compute the mean and standard deviation of a covariate on the given period
Args:
d1: multi-index pandas dataframe -- covariate
var_id: str -- covariate name
date_id: str -- index column name for date
Return(s):
df1: multi-index pandas dataframe -- with month-day-mean-std added
"""
indexnames = df1.index.names
idxlevel = indexnames.index(date_id)
df1 = df1.assign(month=df1.index.get_level_values(idxlevel).month)
df1 = df1.assign(day=df1.index.get_level_values(idxlevel).day)
# get mean of each date
df1['{}_daily_mean'.format(var_id)] = df1.groupby(['month', 'day'])[var_id].transform('mean')
# get std of each date
df1['{}_daily_std'.format(var_id)] = df1.groupby(['month', 'day'])[var_id].transform('std')
return df1.fillna(0)
def add_month_day(df1, date_id='start_date'):
""" Extract the month-of-year and day-of-year from the date index,
and add it to the datafram
Args:
d1: multi-index pandas dataframe -- covariate
date_id: str -- index column name for date
"""
indexnames = df1.index.names
idxlevel = indexnames.index(date_id)
df1 = df1.assign(month=df1.index.get_level_values(idxlevel).month)
df1 = df1.assign(day=df1.index.get_level_values(idxlevel).day)
return(df1)
def zscore_temporal(rootpath,
data,
var,
train_start='1986-01-01', train_end='2016-12-31',
test_start='2017-01-01', test_end='2018-12-31',
date_id='start_date'):
""" Do zscore on time series only (no spatial information), e.g., pca of a covariate
Args:
rootpath: directory to save the results
data: pd.Dataframe -- dataframe contains data that is about to apply zscore
var: str -- variable name
train_start, train_end: str -- the start date and the end date of the training set
test_start, test_end: str -- the start date and the end date of the test set
date_id: str -- index column name for date
"""
# check the legitimate of the given parameters
if not isinstance(data, pd.DataFrame) and not isinstance(data, pd.Series):
raise ValueError("Data needs to be a pandas dataframe/series.")
idx = pd.IndexSlice
target = data[var].to_frame()
print('pre-process: {}'.format(var))
df1 = target.loc[idx[train_start:train_end], :] # train
df2 = target.loc[idx[test_start:test_end], :] # test
df1 = get_mean(df1, var)
# get first element of each group: mean for each location each month-day
month_day = df1.groupby(['month', 'day']).first()
month_day = month_day.reset_index()
# add month-day column to second dataframe
df2 = add_month_day(df2)
df2.reset_index(level=0, inplace=True)
var_cols = ['{}_daily_{}'.format(var, col_type) for col_type in ['mean', 'std']]
# add mean and std get from df1
df2 = df2.merge(month_day[['month', 'day'] + var_cols], how='left', on=['month', 'day'])
df2 = df2.sort_values(by=[date_id])
df2 = df2.set_index([date_id]) # add multi-index back
df1[var + '_zscore'] = (df1[var] - df1['{}_daily_mean'.format(var)]) / df1['{}_daily_std'.format(var)]
df2[var + '_zscore'] = (df2[var] - df2['{}_daily_mean'.format(var)]) / df2['{}_daily_std'.format(var)]
df_all = df1.append(df2)
df_all.to_hdf(rootpath + '{}_zscore.h5'.format(var), key=var, mode='w')
def zscore_spatial_temporal(rootpath,
target, var_id='tmp2m',
train_start='1986-01-01', train_end='2016-12-31',
test_start='2017-01-01', test_end='2018-12-31',
date_id='start_date'):
""" Apply zscore on spatial-temporal climate variable, e.g., the target variable tmp2m
Args:
rootpath: directory to save the results
data: pd.Dataframe -- dataframe contains data that is about to apply zscore
var_id: variable name
train_start, train_end: str -- the start date and the end date of the training set
test_start, test_end: str -- the start date and the end date of the test set
date_id: column name for time/date
"""
idx = pd.IndexSlice
df1 = target.loc[idx[:, :, train_start:train_end], :] # train
df2 = target.loc[idx[:, :, test_start:test_end], :]# test
# ---- Day-Month Mean of each location ---- #
# Add 'month', 'day' column, and get mean and std of each date, each location
df1 = df1.groupby(['lat', 'lon']).apply(lambda df: get_mean(df, var_id, date_id))
# get first element of each group: mean for each location each month-day
month_day = df1.groupby(['lat', 'lon', 'month', 'day']).first()
month_day = month_day.reset_index()
# add month-day column to second dataframe
df2 = df2.groupby(['lat', 'lon']).apply(lambda df: add_month_day(df, date_id))
df2.reset_index(level=2, inplace=True)
var_cols = ['{}_daily_{}'.format(var_id, col_type) for col_type in ['mean', 'std']]
# add mean and std get from df1
df2 = df2.merge(month_day[['lat', 'lon', 'month', 'day'] + var_cols],
how='left', on=['lat', 'lon', 'month', 'day'])
df2 = df2.sort_values(by=['lat', 'lon', date_id])
df2 = df2.set_index(['lat', 'lon', date_id]) # add multi-index back
df1[var_id+'_zscore'] = (df1[var_id] - df1['{}_daily_mean'.format(var_id)])/df1['{}_daily_std'.format(var_id)]
df2[var_id+'_zscore'] = (df2[var_id] - df2['{}_daily_mean'.format(var_id)])/df2['{}_daily_std'.format(var_id)]
df_all = df1.append(df2)
df_all.sort_index(level=['lat', 'lon'], inplace=True)
df_all.to_hdf(rootpath + 'target_{}_multitask_zscore.h5'.format(var_id), key=var_id, mode='w')
############## train-validation split ##################
def create_sequence_custom(today, time_frame, covariate_map, past_years=2,
curr_shift_days=[7, 14, 28], past_shift_days=[7, 14, 28]):
""" Feature aggregation: add features from past dates
Args:
today: pd.Timestamp() -- the date we want to aggregate feature
time_frame: pandas dataframe -- corresponding dates for covariate map
covariate_map: numpy array -- data/feature we use to aggregate
past_years: int -- number of years in the past to be included
curr_shift_days: list of int -- past dates/neighbors in the current year/most recent year to be included
past_shift_days: list of int -- both past and future dates/neighbors in the past year to be included
Return:
agg_x: numpy array -- the aggragated feature for the date provided by "today"
"""
combine = [today] + [today - pd.DateOffset(days=day) for day in curr_shift_days]
for k in range(past_years): # go to the past k years
today = today - pd.DateOffset(years=1)
past = [today - pd.DateOffset(days=day) for day in past_shift_days]
future = [today + pd.DateOffset(days=day) for day in past_shift_days[::-1]]
time_index_next = future + [today] + past
combine = combine + time_index_next # combine.union(time_index_next)
combine.reverse() # reverse the sequenc from oldest to newest
location = time_frame.loc[combine]
agg_x = covariate_map[location.values].squeeze()
return agg_x
def get_test_train_index_seasonal(test_start, test_end, train_range=10, past_years=2, gap=28):
""" Construct train/test time index used to split training and test dataset
Args:
test_start, test_end: pd.Timestamp() -- the start date and the end date of the test set
train_range: int -- the length (years) to be included in the training set
past_years: int -- the length (years) of features in the past to be included
gap: int -- number of days between the date in X and date in y
Return:
test_start_shift: pd.Timestamp() -- new start date for test
after including # of years in the past
train_start_shift:pd.Timestamp() -- new start date for training
after including # of years in the past
train_time_index: list of pd.Timestamp() -- time index for training set
"""
test_start_shift = test_start - pd.DateOffset(years=train_range + past_years, days=gap)
# handles the train time indices
# you need to gap 28 days to predict Feb-01 standing on Jan-03
train_end = test_start - | pd.DateOffset(days=gap) | pandas.DateOffset |
from . import logger
import pandas as pd
from neslter.parsing.files import Resolver, find_file
def read_product_csv(path):
"""file must exist and be a CSV file"""
df = | pd.read_csv(path, index_col=None, encoding='utf-8') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# TODO: сделать детализацию счета и заказать в html/excel
# замаскировать телефоны
# сделать обработку excel на pandas: Analysis of account detail (excel)
import zipfile
with zipfile.ZipFile('Doc_df7c89c378c04e8daf69257ea95d9a2e.zip') as f:
data_file = f.read('Doc_df7c89c378c04e8daf69257ea95d9a2e.html')
from bs4 import BeautifulSoup
root = BeautifulSoup(data_file, 'lxml')
records = []
for tr in root.select("table > tbody > tr"):
td_list = tr.select("td")
record = [
td_list[1].text, # 'Дата'
td_list[2].text, # 'Время'
td_list[3].text, # 'GMT'
td_list[4].text, # 'Номер'
# td_list[5].text, # 'Зона вызова'
# td_list[6].text, # 'Зона направления вызова/номер сессии'
td_list[7].text, # 'Услуга'
td_list[9].text, # 'Длительность/Объем (мин.:сек.)/(Kb)'
float(td_list[10].text.replace(',', '.')), # 'Стоимость руб. без НДС'
]
records.append(record)
columns = [
'Дата', 'Время', 'GMT', 'Номер',
# 'Зона вызова', 'Зона направления вызова/номер сессии',
'Услуга', 'Длительность/Объем (мин.:сек.)/(Kb)', 'Стоимость руб. без НДС'
]
import pandas as pd
df = | pd.DataFrame(data=records, columns=columns) | pandas.DataFrame |
# ----------------------------------------------------------------------------------
# # Presenting Word Frequency Results
# ----------------------------------------------------------------------------------
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
print("TOTAL RESULTS")
tot_results = | pd.read_csv('/mnt/c/Users/charl/Desktop/finance_perso/BurnieYilmazRS19/resultsData/percent_totals_long_terms_Bitcoin.csv') | pandas.read_csv |
import pandas
import os
import config.config_reader as cr
class workload_info_connector(object):
"""description of class"""
# workload_data = None
# file_path = ""
def __init__(self, file_path):
temp_cr = cr.config_reader()
self.workload_mix = temp_cr.group_name_vec
self.workload_header = ["model_info",
"total_population",
"status"] + self.workload_mix
self.predict_header = ["pred_result"]
self.result_header = ["fitness",
"output_mean",
"result"]
self.workload_info_header = self.workload_header + self.predict_header + self.result_header
if os.path.exists(file_path):
self.file_path = file_path
self.workload_data = | pandas.read_csv(file_path) | pandas.read_csv |
"""
Tests for CBMonthEnd CBMonthBegin, SemiMonthEnd, and SemiMonthBegin in offsets
"""
from datetime import (
date,
datetime,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas._libs.tslibs.offsets import (
CBMonthBegin,
CBMonthEnd,
CDay,
SemiMonthBegin,
SemiMonthEnd,
)
from pandas import (
DatetimeIndex,
Series,
_testing as tm,
date_range,
)
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
from pandas.tseries import offsets as offsets
from pandas.tseries.holiday import USFederalHolidayCalendar
class CustomBusinessMonthBase:
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._offset())
_check_roundtrip(self._offset(2))
_check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
off = self._offset(weekmask="Mon Wed Fri")
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset = CBMonthEnd
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthEnd>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthEnds>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
def testRollforward1(self):
assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 8, 31)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 28)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, d, expected = case
assert_is_on_offset(offset, d, expected)
apply_cases: _ApplyCases = [
(
CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
(
2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31),
},
),
(
-CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31),
},
),
(
-2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 11, 30),
datetime(2008, 2, 9): datetime(2007, 12, 31),
},
),
(
CBMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthEnd(10)
assert result == datetime(2013, 7, 31)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthEnd() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 29)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 5, 31)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-01-31", datetime(2012, 2, 28), np.datetime64("2012-02-29")]
bm_offset = CBMonthEnd(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 30)
assert dt + 2 * bm_offset == datetime(2012, 2, 27)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
hcal = USFederalHolidayCalendar()
freq = CBMonthEnd(calendar=hcal)
assert date_range(start="20120101", end="20130101", freq=freq).tolist()[
0
] == datetime(2012, 1, 31)
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_offset = CBMonthBegin
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthBegin>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthBegins>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 3, 3)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
def testRollforward1(self):
assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 3)
result = offset.rollforward(dt)
assert result == datetime(2012, 10, 1)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
apply_cases: _ApplyCases = [
(
CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3),
},
),
(
2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1),
},
),
(
-CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1),
},
),
(
-2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1),
},
),
(
CBMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
| assert_offset_equal(offset, base, expected) | pandas.tests.tseries.offsets.common.assert_offset_equal |
r"""
Baseline Calculation
"""
# Standard Library imports
import argparse
import cartopy.crs as ccrs
import datetime
import h5py
import json
import matplotlib.colors
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import netCDF4
import numpy as np
import os
import pandas as pd
import re
import scipy.optimize
import warnings
import sys
import xarray as xr
# Third party imports
from collections import OrderedDict
# Semi-local imports
import name_qch4_couple.io
import name_qch4_couple.name
import name_qch4_couple.plot
import name_qch4_couple.plot_h2
import name_qch4_couple.region_EU
import name_qch4_couple.routines
import name_qch4_couple.util
# Local imports
import routines
import chem_co
# =============================================================================
# Settings
# =============================================================================
# Argument Parser
parser = argparse.ArgumentParser()
parser.add_argument("-site", required=True)
parser.add_argument("-species", required=True)
parser.add_argument("-year", required=True, type=int)
parser.add_argument("-window1", required=True, type=int)
parser.add_argument("-window2", required=True, type=int)
parser.add_argument("-force", required=True, type=int)
parser.add_argument("-odir", required=True)
args = parser.parse_args()
site = args.site
year = args.year
species = args.species
window1 = args.window1
window2 = args.window2
force_compute = bool(args.force)
odir = args.odir
#mode = 'all'
site_ref = 'mhd' # HDF reference key
p = 101325 # Pa
T = 288.15 # K
ofile_con = f'condition-{site}-{species}-{year}.nc'
ofile_fil = f'filtered-{site}-{species}-{year}.nc'
ofile_fin = f'baseline-{site}-{species}-{year}.nc'
long_names = OrderedDict()
locations = OrderedDict()
with open(f'inputs/baseline/{site}.json', 'r') as f:
st_info = json.load(f)
long_names[site] = st_info['long_name']
locations[site] = st_info['location']
site1 = site.replace('_', '-')
date_nodash = 'REPLACE'
if species == 'ch4':
#from chem_co import read_Q, read_obs, read_baseline
var_name="chi_CH4"
Dfile = (
f'inputs/baseline/footprints_mhd/'
f'{site1}_UKV_EUROPE_{date_nodash}.nc'
)
Q2obs = 1.e9 / 16.043 # M_H2 (g mol-1) - IUPAC
ylabel = u'$\chi$ CH$_{4}$ (nmol mol$^{-1}$)'
ylim = [1800., 2400.]
yticks = np.arange(1800., 2400., 50.)
var_long_name = 'mole_fraction_of_hydrogen'
var_units = 'nmol mol-1'
# =============================================================================
# Pre-processing
# =============================================================================
print(f'Initialising')
print(f' site = {site}')
print(f' year = {year}')
# Dates
dt1 = pd.to_timedelta(window1//2, 'H')
dt2 = | pd.to_timedelta(window2//2, 'H') | pandas.to_timedelta |
import numpy as np
import pandas as pd
def generate_dataset(coeffs, n, std_dev, intercept=0., distribution='normal', binary=False, seed=None, **kwargs):
"""Generate an artificial dataset
:param coeffs: List of coefficients to use for computing the ouytput variable.
:type coeffs: :obj:`list`
:param n: Number of observations to generate.
:type n: :obj:`int`
:param std_dev: Standard deviation of the distribution.
:type std_dev: :obj:`list`
:param intercept: Value of the intercept to be set, defaults to 0.
:type intercept: :obj:`float`, optional
:param distribution: Type of distribution to use for generating the input variables, defaults to 'normal'. Can be:
* `normal`: :math:`X \\sim \\mathcal{N}(\\mu, \\sigma^{2})`
* `unirform`: :math:`X \\sim \\mathcal{U}_{[\\text{low}, \\text{high}]}`
:type distribution: :obj:`str`, optional
:param binary: Define if output is binary, defaults to False.
:type binary: :obj:`bool`, optional
:param seed: Random seed, defaults to None.
:type seed: :obj:`int`, optional
:param \*\*kwargs: Arguments to be passed in the distribution function. Can be:
* `normal`: :obj:`loc` = :math:`\\mu` and :obj:`scale` = :math:`\\sigma^{2}`
* `uniform`: :obj:`low` and :obj:`high`
:return: DataFrame with output variable named as :obj:`Y` and covariates as :obj:`X0`, :obj:`X1`, :obj:`X2`, ...
:rtype: :obj:`pandas.DataFrame`
"""
rdm = np.random.RandomState(seed) if seed else np.random
# We calculate the number of predictors, and create a coefficient matrix
# With `p` rows and 1 column, for matrix multiplication
p = len(coeffs)
params = | pd.DataFrame({'coeff': coeffs, 'std_dev': std_dev}) | pandas.DataFrame |
"""
STATUS:OK for 1sec timeframe. NOK for 1Min timeframe but the failure cases are skipped.
BUG:issue with leakage with timeframe=1Min for some conditions (cf. tests symbols= [S_TEST_F1, S_TEST_F2])
NOTE: to run tests with expected failure, add the --runxfail option to pytest
"""
import pytest
import random
import numpy as np
import pandas as pd
import os
import pymarketstore as pymkts
from . import utils
client = pymkts.Client(f"http://127.0.0.1:{os.getenv('MARKETSTORE_PORT',5993)}/rpc",
grpc=(os.getenv("USE_GRPC", "false") == "true"))
@pytest.mark.parametrize(
"symbol, timeframe, data, index, nanoseconds, start, end",
[
################################################################################
# 1Min timeframe
################################################################################
# without nanoseconds
################################################################################
(
"S_TEST_1",
"1Min",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:01:00", "2016-01-01 10:01:30", "2016-01-01 10:01:59"],
None,
"2016-01-01 10:01:00",
"2016-01-01 10:01:40",
),
pytest.param(
# BUG
# for 1Min timeframe, the query will return all the ticks from start_dt to
# the end of timeframe of end_dt
# input df
# Bid Ask
# Epoch
# 2016-01-01 10:01:30+00:00 0 0
# 2016-01-01 10:01:50+00:00 1 1
# 2016-01-01 10:02:10+00:00 2 2
#
# filtered input df
# Empty DataFrame
# Columns: [Bid, Ask]
# Index: []
#
# output df, postprocessed
# Bid Ask
# Epoch
# 2016-01-01 10:01:30+00:00 0 0
# 2016-01-01 10:01:50.999999995+00:00 1 1
#
# output df, raw
# Bid Ask Nanoseconds
# Epoch
# 2016-01-01 10:01:30+00:00 0 0 0
# 2016-01-01 10:01:50+00:00 1 1 999999995
# lengths do not match, inspect manually
# query before 1st timeframe and same year in same bucket
"S_TEST_F1",
"1Min",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:01:30", "2016-01-01 10:01:50", "2016-01-01 10:02:10"],
None,
"2016-01-01 10:01:00",
"2016-01-01 10:01:20",
marks=pytest.mark.xfail(reason="Known issue with 1Min timeframe.")
),
pytest.param(
# BUG same bug as above
# for 1Min timeframe, the query will return all the ticks from start_dt to
# the end of timeframe of end_dt
# input df
# Bid Ask
# Epoch
# 2016-01-01 10:00:00+00:00 0 0
# 2016-01-01 10:00:59+00:00 1 1
#
# filtered input df
# Empty DataFrame
# Columns: [Bid, Ask]
# Index: []
#
# output df, postprocessed
# Bid Ask
# Epoch
# 2016-01-01 10:00:59.999999990+00:00 1 1
#
# output df, raw
# Bid Ask Nanoseconds
# Epoch
# 2016-01-01 10:00:59+00:00 1 1 999999990
# lengths do not match, inspect manually
"S_TEST_F2",
"1Min",
dict(Bid=np.arange(2), Ask=np.arange(2)),
["2016-01-01 10:00:00", "2016-01-01 10:00:59"],
None,
"2016-01-01 10:00:10",
"2016-01-01 10:00:40",
marks=pytest.mark.xfail(reason="Known issue with 1Min timeframe.")
),
# tests cases close to the timeframe border
################################################################################
(
"S_TEST_4",
"1Min",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:01:00", "2016-01-01 10:01:00", "2016-01-01 10:02:00"],
None,
"2016-01-01 10:01:00",
"2016-01-01 10:01:00",
),
(
"S_TEST_5",
"1Min",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:01:00", "2016-01-01 10:01:00", "2016-01-01 10:02:00"],
None,
"2016-01-01 10:01:00",
"2016-01-01 10:02:00",
),
(
"S_TEST_6",
"1Min",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:01:00", "2016-01-01 10:01:00", "2016-01-01 10:02:01"],
None,
"2016-01-01 10:01:00",
"2016-01-01 10:02:00",
),
(
"S_TEST_7",
"1Min",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:01:00", "2016-01-01 10:02:01", "2016-01-01 10:02:01"],
None,
"2016-01-01 10:01:00",
"2016-01-01 10:02:00",
),
# with nanoseconds
################################################################################
(
"S_TEST_11",
"1Min",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:01:00", "2016-01-01 10:01:30", "2016-01-01 10:01:59"],
[0, 0, 0],
"2016-01-01 10:01:00",
"2016-01-01 10:01:40",
),
pytest.param(
"S_TEST_13",
"1Min",
dict(Bid=np.arange(4), Ask=np.arange(4)),
[
"2016-01-01 10:01:00",
"2016-01-01 10:01:59",
"2016-01-01 10:02:00",
"2016-01-01 10:02:01",
],
[0, 0, 0, 0],
"2016-01-01 10:01:00",
"2016-01-01 10:02:00",
marks=pytest.mark.xfail(reason="Known issue with 1Min timeframe.")
),
# tests cases close to the timeframe border
################################################################################
(
"S_TEST_14",
"1Min",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:01:00", "2016-01-01 10:01:00", "2016-01-01 10:02:00"],
[0, 0, 0],
"2016-01-01 10:01:00",
"2016-01-01 10:01:00",
),
(
"S_TEST_15",
"1Min",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:01:00", "2016-01-01 10:01:00", "2016-01-01 10:02:00"],
[0, 0, 0],
"2016-01-01 10:01:00",
"2016-01-01 10:02:00",
),
(
"S_TEST_16",
"1Min",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:01:00", "2016-01-01 10:01:00", "2016-01-01 10:02:01"],
[0, 0, 0],
"2016-01-01 10:01:00",
"2016-01-01 10:02:00",
),
(
"S_TEST_17",
"1Min",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:01:00", "2016-01-01 10:02:01", "2016-01-01 10:02:01"],
[0, 0, 0],
"2016-01-01 10:01:00",
"2016-01-01 10:02:00",
),
],
)
def test_overflow_query_with_simple_data_1Min(
symbol, timeframe, data, index, nanoseconds, start, end
):
client.destroy(tbk=f"{symbol}/{timeframe}/TICK")
start = pd.Timestamp(start, tz="utc")
end = pd.Timestamp(end, tz="utc")
in_df = utils.build_dataframe(
data,
pd.to_datetime(index, format="%Y-%m-%d %H:%M:%S").tz_localize("utc"),
nanoseconds=nanoseconds,
)
with_nanoseconds = nanoseconds is not None
ret = write_with_pymkts(
in_df, symbol, timeframe, extract_nanoseconds=with_nanoseconds
)
print(ret)
build_test(in_df, symbol, timeframe, start, end)
@pytest.mark.parametrize(
"symbol, timeframe, data, index, nanoseconds, start, end",
[
# without nanoseconds
################################################################################
(
"S_TEST_20",
"1Sec",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:00:01", "2016-01-01 10:00:01", "2016-01-01 10:00:02"],
None,
"2016-01-01 10:00:01",
"2016-01-01 10:00:01",
),
(
"S_TEST_21",
"1Sec",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:00:01", "2016-01-01 10:00:01", "2016-01-01 10:00:02"],
None,
"2016-01-01 10:00:01",
"2016-01-01 10:00:02",
),
(
"S_TEST_22",
"1Sec",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:00:01", "2016-01-01 10:00:01", "2016-01-01 10:00:02"],
None,
"2016-01-01 10:00:01",
"2016-01-01 10:00:02",
),
(
"S_TEST_23",
"1Sec",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:00:01", "2016-01-01 10:00:02", "2016-01-01 10:00:02"],
None,
"2016-01-01 10:00:01",
"2016-01-01 10:00:02",
),
# with nanoseconds
################################################################################
(
"S_TEST_30",
"1Sec",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:00:01", "2016-01-01 10:00:01", "2016-01-01 10:00:02"],
[0, 0, 0],
"2016-01-01 10:00:01",
"2016-01-01 10:00:01",
),
(
"S_TEST_31",
"1Sec",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:00:01", "2016-01-01 10:00:01", "2016-01-01 10:00:02"],
[0, 0, 0],
"2016-01-01 10:00:01",
"2016-01-01 10:00:02",
),
(
# SUCCESS of S_TEST_32: this was previously a BUG with 1Sec timeframe
# The old behaviour was not considering the nanosecond in the query parameter:
# input df
# Bid Ask
# Epoch
# 2016-01-01 10:00:01+00:00 0 0
# 2016-01-01 10:00:01+00:00 1 1
#
# output df, postprocessed
# Bid Ask
# Epoch
# 2016-01-01 10:00:01+00:00 0 0
# 2016-01-01 10:00:01+00:00 1 1
# 2016-01-01 10:00:02.000000001+00:00 2 2
#
# output df, raw
# Bid Ask Nanoseconds
# Epoch
# 2016-01-01 10:00:01+00:00 0 0 0
# 2016-01-01 10:00:01+00:00 1 1 0
# 2016-01-01 10:00:02+00:00 2 2 1
"S_TEST_32",
"1Sec",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:00:01", "2016-01-01 10:00:01", "2016-01-01 10:00:02"],
[0, 0, 1],
"2016-01-01 10:00:01",
"2016-01-01 10:00:02",
),
(
# SUCCESS of S_TEST_33: this was previously a BUG with 1Sec timeframe
# The old behaviour was not considering the nanosecond in the query parameter:
# input df
# Bid Ask
# Epoch
# 2016-01-01 10:00:01+00:00 0 0
#
# output df, postprocessed
# Bid Ask
# Epoch
# 2016-01-01 10:00:01+00:00 0 0
# 2016-01-01 10:00:02.000000001+00:00 1 1
# 2016-01-01 10:00:02.000000001+00:00 2 2
#
# output df, raw
# Bid Ask Nanoseconds
# Epoch
# 2016-01-01 10:00:01+00:00 0 0 0
# 2016-01-01 10:00:02+00:00 1 1 1
# 2016-01-01 10:00:02+00:00 2 2 1
"S_TEST_33",
"1Sec",
dict(Bid=np.arange(3), Ask=np.arange(3)),
["2016-01-01 10:00:01", "2016-01-01 10:00:02", "2016-01-01 10:00:02"],
[0, 1, 1],
"2016-01-01 10:00:01",
"2016-01-01 10:00:02",
),
],
)
def test_overflow_query_with_simple_data_1Sec(
symbol, timeframe, data, index, nanoseconds, start, end
):
"""
NOTE
If nanoseconds==None, it will not be written. However, it might be implied by
AttributeGroup=TICK. The default nanoseconds value is to be investigated on the
marketsore side
"""
client.destroy(tbk=f"{symbol}/{timeframe}/TICK")
start = pd.Timestamp(start, tz="utc")
end = | pd.Timestamp(end, tz="utc") | pandas.Timestamp |
import sys
import random
import os
from lifelines import KaplanMeierFitter
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from lifelines import CoxPHFitter
from sklearn.metrics import average_precision_score, precision_recall_curve, roc_auc_score, roc_curve, auc, \
brier_score_loss, precision_score, recall_score, f1_score
from sklearn.linear_model import LogisticRegressionCV
from sklearn.metrics import roc_auc_score, make_scorer
from CI_Configs import runs
from UKBB_Functions import Filter_CZ,to_pickle,from_pickle
from sklearn.utils import resample
from LabData import config_global as config
from LabUtils.addloglevels import sethandlers
from LabQueue.qp import fakeqp
import os
USE_FAKE_QUE=True
CALC_CI_ONLY = False
DEBUG = True
run_name = "SA_Antro_neto_whr"
if USE_FAKE_QUE:
qp=fakeqp
else:
qp=config.qp
sethandlers(file_dir=config.log_dir)
os.chdir('/net/mraid08/export/genie/LabData/Analyses/Yochai/Jobs')
def calc_TTE(row):
"""
Returns either the time between the first visit to the first appearance of diabetes, or
if diabetes was not diagnosed, or the time of diagnosis is not given - return the time of last visit"""
if pd.isnull(row["TTE"]):
return row["21003-4.0"]
else:
return row["TTE"]
def plot_ROC_curve(y_test_val, y_pred_val, AUC):
fpr, tpr, _ = roc_curve(y_test_val, y_pred_val)
fig = plt.figure(figsize=(16, 9))
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve')
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating AUC={0:0.2f}'.format(AUC))
plt.legend(loc="lower right")
plt.show()
# pdf.savefig(fig, dpi=DPI)
# plt.close(fig)
def plot_precision_recall(y_test_val, y_pred_val, APS):
precision, recall, _ = precision_recall_curve(y_test_val, y_pred_val)
fig = plt.figure(figsize=(16, 9))
plt.step(recall, precision, color='b', alpha=0.2,
where='post')
plt.fill_between(recall, precision, step='post', alpha=0.2,
color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(APS))
plt.show()
# Plotting ratio graph for precision recall
rel_prec = precision / precision[0]
# fig = plt.figure()
# plt.step(recall, rel_prec, color='b', alpha=0.2, where='post')
# plt.fill_between(recall, rel_prec, step='post', alpha=0.2, color='b')
# plt.xlabel('Recall')
# plt.ylabel('Relative Precision')
# # plt.ylim([0.0, 1.05 * np.percentile(rel_prec,99.97)])
# plt.ylim([0.0, 1.05 * max(rel_prec)])
# plt.xlim([0.0, 1.0])
# plt.title('2-class Relative-Precision-Recall curve: AP={0:0.2f}'.format(APS))
# plt.show()
# # Plotting ratio graph for precision recallwith removed maximum value
fig = plt.figure(figsize=(16, 9))
plt.step(recall, rel_prec, color='b', alpha=0.2, where='post')
plt.fill_between(recall, rel_prec, step='post', alpha=0.2,
color='b')
plt.xlabel('Recall')
plt.ylabel('Relative Precision')
plt.ylim([0.0, 1.05 * max(np.delete(rel_prec, np.argmax(rel_prec)))])
plt.xlim([0.0, 1.0])
plt.title('2-class Relative-Precision-Recall trimmed max: AP={0:0.2f}'.format(APS))
plt.show()
# Show graph of True positive Vs.quantiles of predicted probabilities.
def get_rel_score(row):
"""
A function that is used in apply on Dataframes
Returns the predicted Survival rate at the visit time
"""
return row[row.loc["21003-4.0"]]
def get_event_n_duration(path):
"""
Calculates the time passed from visit to event, or' if no event occurs - to the last known visit
return durations,event_observed,Diab_age_df.loc[:,['TTE',"2443-3.0"]],Diab_age_df
"""
diab_age_data_path="/net/mraid08/export/jafar/UKBioBank/Data/ukb29741.csv"
diab_data_col=pd.read_csv(diab_age_data_path, nrows=0).columns.values
data_col = pd.read_csv(path, nrows=0).columns.values
diab_age_col = [x for x in diab_data_col if x.startswith("2976-")] # Aged when diabetes first diagnosed
# diab_col = [x for x in data_col if x.startswith("2443-")] # 1 if diabetes diagnosed
Init_age_col = "21003-0.0"
all_ages_cols = [col for col in data_col if col.startswith("21003-")]
all_ages_df = pd.read_csv(path, usecols=["eid"] + all_ages_cols, index_col="eid")
Diab_age_df = pd.read_csv(diab_age_data_path, usecols=diab_age_col + ["eid"], index_col="eid")
Diab_age_df["Min_diab_age"] = Diab_age_df.min(axis=1)
Diab_age_df = Diab_age_df.join(all_ages_df[Init_age_col],how="right")
Diab_age_df["TTE"] = Diab_age_df["Min_diab_age"] - Diab_age_df[
"21003-0.0"] # Calculating time from first visit to diab onset
neg_diab_age_ind = Diab_age_df.loc[
Diab_age_df["TTE"] < 0, "TTE"].index # Getting indexes of events with negative values, to filter them out
diab_ind = [ind for ind in Diab_age_df.index if ind not in neg_diab_age_ind]
Diab_age_df = Diab_age_df.loc[diab_ind, :]
diab = | pd.read_csv(path, usecols=["eid", "2443-3.0"], index_col="eid") | pandas.read_csv |
from datetime import datetime
import pandas as pd
import subprocess
codes_file = "/home/gaza/Documents/sportsbook/sportsbook/codenames.csv"
games_file = "/home/gaza/Documents/sportsbook/sportsbook/dailybets.csv"
def load_codes():
data = pd.read_csv(codes_file)
df = pd.DataFrame(data, columns=['code','league','name'])
codes = df.set_index('code').T.to_dict('list')
return codes
def load_games():
data = pd.read_csv(games_file)
df = | pd.DataFrame(data) | pandas.DataFrame |
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import period as libperiod
import pandas as pd
from pandas import DatetimeIndex, Period, PeriodIndex, Series, notna, period_range
import pandas._testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem(self):
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx[0]
assert result == pd.Period("2011-01-01", freq="D")
result = idx[-1]
assert result == pd.Period("2011-01-31", freq="D")
result = idx[0:5]
expected = pd.period_range("2011-01-01", "2011-01-05", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[0:10:2]
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-07", "2011-01-09"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[-20:-5:3]
expected = pd.PeriodIndex(
["2011-01-12", "2011-01-15", "2011-01-18", "2011-01-21", "2011-01-24"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[4::-1]
expected = PeriodIndex(
["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_getitem_index(self):
idx = period_range("2007-01", periods=10, freq="M", name="x")
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(["2007-02", "2007-04", "2007-06"], freq="M", name="x")
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False, True, True, False, False, False]]
exp = pd.PeriodIndex(
["2007-01", "2007-02", "2007-06", "2007-07"], freq="M", name="x"
)
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range("2007-01", periods=50, freq="M")
ts = Series(np.random.randn(len(rng)), rng)
with pytest.raises(KeyError, match=r"^'2006'$"):
ts["2006"]
result = ts["2008"]
assert (result.index.year == 2008).all()
result = ts["2008":"2009"]
assert len(result) == 24
result = ts["2008-1":"2009-12"]
assert len(result) == 24
result = ts["2008Q1":"2009Q4"]
assert len(result) == 24
result = ts[:"2009"]
assert len(result) == 36
result = ts["2009":]
assert len(result) == 50 - 24
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
msg = "left slice bound for non-unique label: '2008'"
with pytest.raises(KeyError, match=msg):
ts[slice("2008", "2009")]
def test_getitem_datetime(self):
rng = period_range(start="2012-01-01", periods=10, freq="W-MON")
ts = Series(range(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M")
assert idx[0] == pd.Period("2011-01", freq="M")
assert idx[1] is pd.NaT
s = pd.Series([0, 1, 2], index=idx)
assert s[pd.NaT] == 1
s = pd.Series(idx, index=idx)
assert s[pd.Period("2011-01", freq="M")] == pd.Period("2011-01", freq="M")
assert s[pd.NaT] is pd.NaT
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start="2012-01-01", periods=10, freq="D")
ts = Series(range(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[Period("2012-01-02", freq="D")]], exp)
def test_getitem_seconds(self):
# GH#6716
didx = pd.date_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
pidx = period_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01/01 10:00"], s[3600:3660])
tm.assert_series_equal(s["2013/01/01 9H"], s[:3600])
for d in ["2013/01/01", "2013/01", "2013"]:
tm.assert_series_equal(s[d], s)
def test_getitem_day(self):
# GH#6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = pd.date_range(start="2013/01/01", freq="D", periods=400)
pidx = period_range(start="2013/01/01", freq="D", periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01"], s[0:31])
tm.assert_series_equal(s["2013/02"], s[31:59])
tm.assert_series_equal(s["2014"], s[365:])
invalid = ["2013/02/01 9H", "2013/02/01 09:00"]
for v in invalid:
with pytest.raises(KeyError, match=v):
s[v]
class TestWhere:
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass):
i = period_range("20130101", periods=5, freq="D")
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = PeriodIndex([pd.NaT] + i[1:].tolist(), freq="D")
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = period_range("20130101", periods=5, freq="D")
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_invalid_dtypes(self):
pi = period_range("20130101", periods=5, freq="D")
i2 = pi.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + pi[2:].tolist(), freq="D")
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8)
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8.view("timedelta64[ns]"))
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.to_timestamp("S"))
class TestTake:
def test_take(self):
# GH#10295
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Period("2011-01-01", freq="D")
result = idx.take([5])
assert result == pd.Period("2011-01-06", freq="D")
result = idx.take([0, 1, 2])
expected = pd.period_range("2011-01-01", "2011-01-03", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == "D"
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(
["2011-01-08", "2011-01-05", "2011-01-02"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([3, 2, 5])
expected = PeriodIndex(
["2011-01-04", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([-3, 2, 5])
expected = PeriodIndex(
["2011-01-29", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_take_misc(self):
index = period_range(start="1/1/10", end="12/31/12", freq="D", name="idx")
expected = PeriodIndex(
[
datetime(2010, 1, 6),
datetime(2010, 1, 7),
datetime(2010, 1, 9),
datetime(2010, 1, 13),
],
freq="D",
name="idx",
)
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, PeriodIndex)
assert taken.freq == index.freq
assert taken.name == expected.name
def test_take_fill_value(self):
# GH#12631
idx = pd.PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", freq="D"
)
result = idx.take(np.array([1, 0, -1]))
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "NaT"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for( axis 0 with)? size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestIndexing:
def test_get_loc_msg(self):
idx = period_range("2000-1-1", freq="A", periods=10)
bad_period = Period("2012", "A")
with pytest.raises(KeyError, match=r"^Period\('2012', 'A-DEC'\)$"):
idx.get_loc(bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
assert inst.args[0] == bad_period
def test_get_loc_nat(self):
didx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-03"])
pidx = PeriodIndex(["2011-01-01", "NaT", "2011-01-03"], freq="M")
# check DatetimeIndex compat
for idx in [didx, pidx]:
assert idx.get_loc(pd.NaT) == 1
assert idx.get_loc(None) == 1
assert idx.get_loc(float("nan")) == 1
assert idx.get_loc(np.nan) == 1
def test_get_loc(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with non-duplicate
idx0 = pd.PeriodIndex([p0, p1, p2])
expected_idx1_p1 = 1
expected_idx1_p2 = 2
assert idx0.get_loc(p1) == expected_idx1_p1
assert idx0.get_loc(str(p1)) == expected_idx1_p1
assert idx0.get_loc(p2) == expected_idx1_p2
assert idx0.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx0.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx0.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-01', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx0.get_loc(idx0)
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with duplicate
idx1 = pd.PeriodIndex([p1, p1, p2])
expected_idx1_p1 = slice(0, 2)
expected_idx1_p2 = 2
assert idx1.get_loc(p1) == expected_idx1_p1
assert idx1.get_loc(str(p1)) == expected_idx1_p1
assert idx1.get_loc(p2) == expected_idx1_p2
assert idx1.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx1.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx1.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-02', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx1.get_loc(idx1)
# get the location of p1/p2 from
# non-monotonic increasing/decreasing PeriodIndex with duplicate
idx2 = pd.PeriodIndex([p2, p1, p2])
expected_idx2_p1 = 1
expected_idx2_p2 = np.array([True, False, True])
assert idx2.get_loc(p1) == expected_idx2_p1
assert idx2.get_loc(str(p1)) == expected_idx2_p1
tm.assert_numpy_array_equal(idx2.get_loc(p2), expected_idx2_p2)
tm.assert_numpy_array_equal(idx2.get_loc(str(p2)), expected_idx2_p2)
def test_is_monotonic_increasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_increasing is True
assert idx_inc1.is_monotonic_increasing is True
assert idx_dec0.is_monotonic_increasing is False
assert idx_dec1.is_monotonic_increasing is False
assert idx.is_monotonic_increasing is False
def test_is_monotonic_decreasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_decreasing is False
assert idx_inc1.is_monotonic_decreasing is False
assert idx_dec0.is_monotonic_decreasing is True
assert idx_dec1.is_monotonic_decreasing is True
assert idx.is_monotonic_decreasing is False
def test_contains(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
p3 = pd.Period("2017-09-04")
ps0 = [p0, p1, p2]
idx0 = pd.PeriodIndex(ps0)
for p in ps0:
assert p in idx0
assert str(p) in idx0
assert "2017-09-01 00:00:01" in idx0
assert "2017-09" in idx0
assert p3 not in idx0
def test_get_value(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx0 = pd.PeriodIndex([p0, p1, p2])
input0 = np.array([1, 2, 3])
expected0 = 2
result0 = idx0.get_value(input0, p1)
assert result0 == expected0
idx1 = pd.PeriodIndex([p1, p1, p2])
input1 = np.array([1, 2, 3])
expected1 = np.array([1, 2])
result1 = idx1.get_value(input1, p1)
tm.assert_numpy_array_equal(result1, expected1)
idx2 = pd.PeriodIndex([p1, p2, p1])
input2 = np.array([1, 2, 3])
expected2 = np.array([1, 3])
result2 = idx2.get_value(input2, p1)
tm.assert_numpy_array_equal(result2, expected2)
def test_get_indexer(self):
# GH 17717
p1 = pd.Period("2017-09-01")
p2 = pd.Period("2017-09-04")
p3 = pd.Period("2017-09-07")
tp0 = pd.Period("2017-08-31")
tp1 = pd.Period("2017-09-02")
tp2 = pd.Period("2017-09-05")
tp3 = pd.Period("2017-09-09")
idx = pd.PeriodIndex([p1, p2, p3])
tm.assert_numpy_array_equal(
idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)
)
target = pd.PeriodIndex([tp0, tp1, tp2, tp3])
tm.assert_numpy_array_equal(
idx.get_indexer(target, "pad"), np.array([-1, 0, 1, 2], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "backfill"), np.array([0, 1, 2, -1], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "nearest"), np.array([0, 0, 1, 2], dtype=np.intp)
)
res = idx.get_indexer(target, "nearest", tolerance=pd.Timedelta("1 day"))
tm.assert_numpy_array_equal(res, np.array([0, 0, 1, -1], dtype=np.intp))
def test_get_indexer_mismatched_dtype(self):
# Check that we return all -1s and do not raise or cast incorrectly
dti = pd.date_range("2016-01-01", periods=3)
pi = dti.to_period("D")
pi2 = dti.to_period("W")
expected = np.array([-1, -1, -1], dtype=np.intp)
result = pi.get_indexer(dti)
tm.assert_numpy_array_equal(result, expected)
# This should work in both directions
result = dti.get_indexer(pi)
tm.assert_numpy_array_equal(result, expected)
result = pi.get_indexer(pi2)
tm.assert_numpy_array_equal(result, expected)
# We expect the same from get_indexer_non_unique
result = pi.get_indexer_non_unique(dti)[0]
tm.assert_numpy_array_equal(result, expected)
result = dti.get_indexer_non_unique(pi)[0]
tm.assert_numpy_array_equal(result, expected)
result = pi.get_indexer_non_unique(pi2)[0]
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_non_unique(self):
# GH 17717
p1 = | pd.Period("2017-09-02") | pandas.Period |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from collections import OrderedDict
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import lrange
from pandas import DataFrame, MultiIndex, Series, date_range, notna
import pandas.core.panel as panelm
from pandas.core.panel import Panel
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal,
makeCustomDataframe as mkdf, makeMixedDataFrame)
from pandas.tseries.offsets import MonthEnd
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class PanelTests(object):
panel = None
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
pytest.raises(TypeError, hash, c_empty)
pytest.raises(TypeError, hash, c)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class SafeForSparse(object):
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).iloc[0]
ops = ['add', 'sub', 'mul', 'truediv',
'floordiv', 'div', 'mod', 'pow']
for op in ops:
with pytest.raises(NotImplementedError):
getattr(p, op)(d, axis=0)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class CheckIndexing(object):
def test_delitem_and_pop(self):
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
tm.assert_frame_equal(panelc[0], panel[0])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# bad shape
p = Panel(np.random.randn(4, 3, 2))
msg = (r"shape of value must be \(3, 2\), "
r"shape of given object was \(4, 2\)")
with pytest.raises(ValueError, match=msg):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notna(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notna(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_getitem_fancy_slice(self):
pass
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.loc[:, 22, [111, 333]] = b
assert_frame_equal(a.loc[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort_values()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.loc[0, :, 0] = b
assert_series_equal(df.loc[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.loc[:, 0, 0] = b
assert_series_equal(df.loc[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.loc[0, 0, :] = b
assert_series_equal(df.loc[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
| tm.assert_frame_equal(panel.loc['a2'], df2) | pandas.util.testing.assert_frame_equal |
"""
Test cases for the wiutils.transformers.compute_detection_by_deployment function.
"""
import pandas as pd
import pytest
from wiutils.transformers import compute_detection_by_deployment
@pytest.fixture(scope="function")
def images():
return pd.DataFrame(
{
"deployment_id": ["001", "001", "001", "001", "002", "002"],
"scientific_name": [
"Zentrygon linearis",
"Zentrygon linearis",
"Galictis vittata",
"Galictis vittata",
"Zentrygon linearis",
"Eira barbara",
],
}
)
def test_compute_abundance(images):
result = compute_detection_by_deployment(images, species_col="scientific_name")
expected = pd.DataFrame(
{
"scientific_name": [
"Eira barbara",
"Eira barbara",
"Galictis vittata",
"Galictis vittata",
"Zentrygon linearis",
"Zentrygon linearis",
],
"deployment_id": ["001", "002", "001", "002", "001", "002"],
"value": [0, 1, 2, 0, 2, 1],
}
)
pd.testing.assert_frame_equal(result, expected)
def test_compute_presence(images):
result = compute_detection_by_deployment(
images, compute_abundance=False, species_col="scientific_name"
)
expected = pd.DataFrame(
{
"scientific_name": [
"Eira barbara",
"Eira barbara",
"Galictis vittata",
"Galictis vittata",
"Zentrygon linearis",
"Zentrygon linearis",
],
"deployment_id": ["001", "002", "001", "002", "001", "002"],
"value": [0, 1, 1, 0, 1, 1],
}
)
pd.testing.assert_frame_equal(result, expected)
def test_pivot(images):
result = compute_detection_by_deployment(
images, pivot=True, species_col="scientific_name"
)
expected = pd.DataFrame(
{
"scientific_name": [
"<NAME>",
"<NAME>",
"<NAME>",
],
"001": [0, 2, 2],
"002": [1, 0, 1],
}
)
pd.testing.assert_frame_equal(result, expected)
def test_intact_input(images):
images_original = images.copy()
compute_detection_by_deployment(images, species_col="scientific_name")
| pd.testing.assert_frame_equal(images_original, images) | pandas.testing.assert_frame_equal |
PATH_ROOT='C:/Users/<NAME>/Desktop/ICoDSA 2020/SENN/'
print('==================== Importing Packages ====================')
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
import pandas as pd
import re
import json
import math
import string
import numpy as np
from bs4 import BeautifulSoup
import gensim
import contractions
import inflect
import string
import stanfordnlp
stanfordnlp.download('en')
import nltk
nltk.download('wordnet')
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('maxent_ne_chunker')
nltk.download('words')
from nltk import pos_tag
from nltk.corpus import wordnet as wn
from nltk.tokenize import wordpunct_tokenize,TweetTokenizer
from nltk import word_tokenize, pos_tag, ne_chunk
from nltk.stem.porter import *
from nltk.stem import WordNetLemmatizer
from scipy import sparse as sp
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.impute import KNNImputer
from textblob import TextBlob
from sklearn.preprocessing import StandardScaler,MinMaxScaler
#------------------------------------------------------------------------------------------
print("==================== Importing Supporting Files ====================")
#RF N-Grams Dictionary
rf_1_gram_df = pd.read_csv(PATH_ROOT+'Supporting_Files/'+'rf_1_gram_df.csv', engine='python',index_col='Unnamed: 0')
rf_2_gram_df = pd.read_csv(PATH_ROOT+'Supporting_Files/'+'rf_2_gram_df.csv', engine='python',index_col='Unnamed: 0')
rf_3_gram_df = pd.read_csv(PATH_ROOT+'Supporting_Files/'+'rf_3_gram_df.csv', engine='python',index_col='Unnamed: 0')
rf_4_gram_df = pd.read_csv(PATH_ROOT+'Supporting_Files/'+'rf_4_gram_df.csv', engine='python',index_col='Unnamed: 0')
#PMI Dictionary
PMI_df = pd.read_csv(PATH_ROOT+'Supporting_Files/'+'PMI_df.csv', engine='python',index_col='Unnamed: 0')
#Abbreviation & Slang Dict
abb_df = pd.read_table(PATH_ROOT+'Supporting_Files/'+'emnlp_dict.txt', sep='\s+', names=('Abbreviation', 'Normal'))
abb=pd.Series(abb_df['Normal'])
abb.index=abb_df['Abbreviation']
abb_dict=dict(abb)
#AFINN Sentiment Lexicon
AFINN_df = pd.read_table(PATH_ROOT+'Supporting_Files/'+'AFINN/'+'AFINN-111.txt', names=('Word', 'Sentiment'))
AFINN=pd.Series(AFINN_df['Sentiment'])
AFINN=((AFINN-AFINN.min())/(AFINN.max()-AFINN.min()))*(1-(-1))+(-1) #Rescaling in [-1,1]
AFINN.index=AFINN_df['Word']
AFINN_dict=dict(AFINN)
#Bing-Liu Sentiment Lexicon
pos = pd.read_table(PATH_ROOT+'Supporting_Files/'+'Bing-Liu-opinion-lexicon-English/'+'positive-words.txt',names='P')
neg=pd.read_table(PATH_ROOT+'Supporting_Files/'+'Bing-Liu-opinion-lexicon-English/'+'negative-words.txt',names='N',encoding='latin-1')
BingLiu_dict={'pos':pos['P'].tolist(),'neg':neg['N'].tolist()}
#General Enquirer Sentiment Lexicon
General_Inquirer_df=pd.read_csv(PATH_ROOT+'Supporting_Files/'+'General Inquirer Lexicon/'+'inquirerbasic.csv',index_col='Entry')
General_Inquirer_df=General_Inquirer_df[['Positiv','Negativ']]
General_Inquirer_dict={'pos':General_Inquirer_df[pd.isnull(General_Inquirer_df['Positiv'])==False]['Positiv'].index.tolist(),
'neg':General_Inquirer_df[pd.isnull(General_Inquirer_df['Negativ'])==False]['Positiv'].index.tolist()}
#NRC Hashtag Sentiment Lexicon
hs=pd.read_table(PATH_ROOT+'Supporting_Files/'+'NRC-Sentiment-Emotion-Lexicons/AutomaticallyGeneratedLexicons/NRC-Hashtag-Sentiment-Lexicon-v1.0/'+'HS-unigrams.txt',names=('Hashtag','PMI(w, pos) -PMI(w, neg)','n_pos','n_neg'),encoding='latin-1')
hs=hs[pd.isnull(hs.Hashtag)==False]
hs['PMI(w, pos) -PMI(w, neg)']=((hs['PMI(w, pos) -PMI(w, neg)']-hs['PMI(w, pos) -PMI(w, neg)'].min())/(hs['PMI(w, pos) -PMI(w, neg)'].max()-hs['PMI(w, pos) -PMI(w, neg)'].min()))*(1-(-1))+(-1) #Rescaling in [-1,1]
nrc=hs['PMI(w, pos) -PMI(w, neg)']
nrc.index=hs['Hashtag']
NRC_hashtag_dict=dict(nrc)
#Sentiwordnet Sentiment Lexicon
sentiwordnet=pd.read_table(PATH_ROOT+'Supporting_Files/'+'SentiWordNet/'+'SentiWordNet_3.0.0.txt',names=('POS','ID','PosScore','NegScore','SynsetTerms','Gloss'),encoding='latin-1')
sentiwordnet=sentiwordnet[pd.isnull(sentiwordnet.POS)==False]
sentiwordnet['score']=sentiwordnet['PosScore']-sentiwordnet['NegScore']
#------------------------------------------------------------------------------------------
print("==================== Importing Data ====================")
df_stocktwits_full_BA = pd.read_csv(PATH_ROOT+'Dataset/'+'df_stocktwits_full_BA.csv', engine='python')
df_stocktwits_full_BA=df_stocktwits_full_BA[~pd.isnull(df_stocktwits_full_BA.id)]
#------------------------------------------------------------------------------------------
def tokenize(sentence):
'''
tokenize input sentence into token
'''
token_list=nltk.regexp_tokenize(sentence, pattern=r"\s|[\.,;]\D", gaps=True)
return(token_list)
def clean_data(concat_df):
print("==================== Cleaning Data ====================")
#Inspired by:
# https://www.dotnetperls.com/punctuation-python
# https://github.com/tthustla/twitter_sentiment_analysis_part1/blob/master/Capstone_part2.ipynb
# https://github.com/Deffro/text-preprocessing-techniques/blob/master/techniques.py
#Word ordinal encoding
p = inflect.engine()
word_to_number_mapping = {}
for i in range(1, 2000):
word_form = p.number_to_words(i) # 1 -> 'one'
ordinal_word = p.ordinal(word_form) # 'one' -> 'first'
ordinal_number = p.ordinal(i) # 1 -> '1st'
word_to_number_mapping[ordinal_word] = ordinal_number # 'first': '1st'
def elongated_word(word):
"""
Replaces an elongated word with its basic form, unless the word exists in the lexicon
"""
repeat_regexp = re.compile(r'(\w*)(\w)\2(\w*)')
repl = r'\1\2\3'
if (len(word)>2 and word[0] != '$'):#if not Stock Market symbol
if wn.synsets(word):
return word
repl_word = repeat_regexp.sub(repl, word)
if repl_word != word:
return elongated_word(repl_word)
else:
return repl_word
else:
return word
def isfloat(value):
'''
Check if value is float or not
'''
try:
float(value)
return True
except ValueError:
return False
def deEmojify(inputString):
'''
Remove Emoji
'''
return inputString.encode('ascii', 'ignore').decode('ascii')
def sentences_cleaner(sentence):
'''
clean input sentence
'''
try:
mention_pat= r'@[A-Za-z0-9_]+'
mention_2_pat=r'@[A-Za-z0-9_]+:\s'
retweet_pat=r'^RT +'
dollars_pat=r'\$ +'
http_pat = r'https?://[^ ]+'
www_pat = r'www.[^ ]+'
apos_pat=r'"+|"$|"+"$'
ticker_pat=r'\$[A-Za]+ '
#Transform any url into '_url'
sentence = re.sub(http_pat, '_url', sentence)
sentence = re.sub(www_pat, '_url', sentence)
#Delete Ticker
sentence = re.sub(ticker_pat,"", sentence)
#HTML decoding remove BOM
soup = BeautifulSoup(sentence, 'lxml')
souped = soup.get_text()
try:
bom_removed = souped.decode("utf-8-sig").replace(u"\ufffd", "?")
except:
bom_removed = souped
#Delete Emoji
stripped=deEmojify(bom_removed)
#Delete mention
stripped = re.sub(mention_2_pat,"", stripped)
stripped = re.sub(mention_pat,"", stripped)
#Delete retweet
stripped=re.sub(retweet_pat,"",stripped)
#Transfrom abbreviation & slang word into normal words based on abb_dict corpus
abbreviation_handled=' '.join(pd.Series(stripped.split()).apply(lambda x: abb_dict[x] if x in abb_dict.keys() else x).to_list())
#Transform contracted words into normal words
contraction_handled =contractions.fix(abbreviation_handled)
#Join the stock symbol
dollars_handled=re.sub(dollars_pat,'$',contraction_handled)
#Transform elongated words into normal words
elongated_handled=' '.join(pd.Series(dollars_handled.split()).apply(lambda x: elongated_word(x[:-1])+x[-1] if (x[-1] in string.punctuation and not isfloat(x)) else elongated_word(x) if not isfloat(x) else x))
#Transform ordinal number
ordinal_handled=' '.join(pd.Series(elongated_handled.split()).apply(lambda x: word_to_number_mapping[x.lower()] if x.lower() in word_to_number_mapping.keys() else x))
#Remove unnecesary apostrophes
apos_handled=re.sub(apos_pat,'',ordinal_handled)
#Split Last Word Punctuation
wordpunct=wordpunct_tokenize(apos_handled)
if (len(wordpunct[-1])>1 and wordpunct[-1][-1] in string.punctuation and wordpunct[-2] not in string.punctuation) or (wordpunct[-1] in string.punctuation and wordpunct[-2] not in string.punctuation):
words =tokenize(apos_handled)
words[-1]=wordpunct[-2]
words.append(wordpunct[-1])
else:
words =tokenize(apos_handled)
return (" ".join(words)).strip()
except:
return sentence
concat_df['clean_text']=concat_df['text'].apply(lambda x: sentences_cleaner(x))
#Remove rows with len(clean_text) < 3
concat_df['text_length']=concat_df['clean_text'].apply(lambda x: len(x))
concat_df=concat_df[concat_df.text_length>3]
concat_df=concat_df.reset_index(drop=True)
concat_df=concat_df.drop(columns=['text_length'])
return(concat_df)
#dictionary that contains pos tags and their explanations
# 'CC': 'coordinating conjunction','CD': 'cardinal digit','DT': 'determiner',
# 'EX': 'existential there (like: \"there is\" ... think of it like \"there exists\")',
# 'FW': 'foreign word','IN': 'preposition/subordinating conjunction','
# JJ': 'adjective \'big\'','JJR': 'adjective, comparative \'bigger\'',
# 'JJS': 'adjective, superlative \'biggest\'', 'LS': 'list marker 1)', 'MD': 'modal could, will',
# 'NN': 'noun, singular \'desk\'', 'NNS': 'noun plural \'desks\'',
#'NNP': 'proper noun, singular \'Harrison\'','NNPS': 'proper noun, plural \'Americans\'',
# 'PDT': 'predeterminer \'all the kids\'','POS': 'possessive ending parent\'s',
# 'PRP': 'personal pronoun I, he, she','PRP$': 'possessive pronoun my, his, hers',
# 'RB': 'adverb very, silently,', 'RBR': 'adverb, comparative better',
# 'RBS': 'adverb, superlative best','RP': 'particle give up', 'TO': 'to go \'to\' the store.',
# 'UH': 'interjection errrrrrrrm','VB': 'verb, base form take','VBD': 'verb, past tense took',
# 'VBG': 'verb, gerund/present participle taking','VBN': 'verb, past participle taken',
# 'VBP': 'verb, sing. present, non-3d take','VBZ': 'verb, 3rd person sing. present takes',
# 'WDT': 'wh-determiner which','WP': 'wh-pronoun who, what','WP$': 'possessive wh-pronoun whose',
# 'WRB': 'wh-abverb where, when','QF' : 'quantifier, bahut, thoda, kam (Hindi)',
# 'VM' : 'main verb','PSP' : 'postposition, common in indian langs','DEM' : 'demonstrative, common in indian langs'
#Extract Parts of Speech as BOW
def extract_pos(doc):
#pos_dict = {'CC':0, 'CD':0,'DT':0,'EX':0,'FW':0,'JJ':0,'JJR':0,'JJS':0,'LS':0,'MD':0,
# 'NN':0,'NNS':0,'NNP':0,'NNPS':0,'PDT':0,'POS':0,'PRP':0,'PRP$':0,'RB':0,
# 'RBR':0,'RBS':0,'RP':0,'TO':0,'UH':0,'VB':0,'VBD':0,'VBG':0,'VBN':0,'VBP':0,
# 'VBZ':0,'VM':0,'WDT':0,'WP':0,'WP$':0,'WRB':0,'QF':0,'PSP':0,'DEM':0}
pos_dict = {'VB':0,'VBD':0,'VBG':0,'VBN':0,'VBP':0,'VBZ':0,'VM':0}
try:
for sent in doc.sentences:
for wrd in sent.words:
if wrd.pos in pos_dict.keys():
pos_dict[wrd.pos]+=1
#return BOW of POS
return pos_dict
except:
return pos_dict
def n_grams_handled(sentence):
'''
Filter before generate n-gram
'''
try:
tk=TweetTokenizer()
cashtag_pat=r'\$[^\s]+'
hashtag_pat=r'#([^\s]+)'
word_number_pat=r'\w*\d\w*'
#Remove word which has length < 2
stripped=' '.join([word for word in sentence.split() if len(word)>=2])
#Remove hashtag
hashtag_handled= re.sub(hashtag_pat,"", stripped)
#Remove cashtag
cashtag_handled= re.sub(cashtag_pat,"", hashtag_handled)
#Remove word with number
number_handled= re.sub(word_number_pat,"", cashtag_handled)
#Remove unnecesary white spaces
words = tk.tokenize(number_handled)
words = [x for x in words if x not in string.punctuation]
clean_sentence=(" ".join(words)).strip()
return clean_sentence
except:
return sentence
def rf_ngram(dict_source,df,gram):
'''
create rf-ngram
'''
def sentence_sparse(sentence,gram,rf_ngram,sparse_rf_ngram):
#Initiate Linke List Sparse Matrix
zero_sparse=sp.lil_matrix( (1,len(rf_ngram)), dtype=float)
#Assign Value of rf_ngram to each word in sentence
splitted_text=tokenize(n_grams_handled(sentence))
#Unigram
if gram==1:
for word in splitted_text:
if word in rf_ngram.index:
zero_sparse[0,rf_ngram.index.get_loc(word)]+=sparse_rf_ngram[0,rf_ngram.index.get_loc(word)]
#Convert LinkedList Sparse Matrix into CSR Sparse Matrix
sparse=zero_sparse.tocsr()
#Bigram
elif gram==2:
bigram=lambda x: splitted_text[x]+' '+splitted_text[x+1]
it_2_gram=range(len(splitted_text)-1)
for i in it_2_gram:
if bigram(i) in rf_ngram.index:
zero_sparse[0,rf_ngram.index.get_loc(bigram(i))]+=sparse_rf_ngram[0,rf_ngram.index.get_loc(bigram(i))]
#Convert LinkedList Sparse Matrix into CSR Sparse Matrix
sparse=zero_sparse.tocsr()
#Trigram
elif gram==3:
trigram=lambda x: splitted_text[x]+' '+splitted_text[x+1]+' '+splitted_text[x+2]
it_3_gram=range(len(splitted_text)-2)
for i in it_3_gram:
if trigram(i) in rf_ngram.index:
zero_sparse[0,rf_ngram.index.get_loc(trigram(i))]+=sparse_rf_ngram[0,rf_ngram.index.get_loc(trigram(i))]
#Convert LinkedList Sparse Matrix into CSR Sparse Matrix
sparse=zero_sparse.tocsr()
#4grams
elif gram==4:
fourgram=lambda x: splitted_text[x]+' '+splitted_text[x+1]+' '+splitted_text[x+2]+' '+splitted_text[x+3]
it_4_gram=range(len(splitted_text)-3)
for i in it_4_gram:
if fourgram(i) in rf_ngram.index:
zero_sparse[0,rf_ngram.index.get_loc(fourgram(i))]+=sparse_rf_ngram[0,rf_ngram.index.get_loc(fourgram(i))]
#Convert LinkedList Sparse Matrix into CSR Sparse Matrix
sparse=zero_sparse.tocsr()
return(sparse)
BOW_df= dict_source
#Calculate rf_ngram for each word
series_1=pd.Series([1 for x in range(len(BOW_df))])
series_1.index=BOW_df.index
series_2=pd.Series([2 for x in range(len(BOW_df))])
series_2.index=BOW_df.index
frac_1=np.log(series_2+(BOW_df['pos']/pd.concat([series_1,BOW_df['neg']],1).max(axis=1)))
frac_2=np.log(series_2+(BOW_df['neg']/pd.concat([series_1,BOW_df['pos']],1).max(axis=1)))
rf_ngram_series= pd.concat([frac_1,frac_2],1).max(axis=1)
sparse_rf_ngram=sp.csr_matrix(rf_ngram_series)
def rf_ngram_calculate(x):
lst=[i for i in sentence_sparse(x,gram,rf_ngram_series,sparse_rf_ngram).toarray()[0].tolist() if i!=0]
if type(x)!=str:
return(np.nan)
else:
if len(lst)>0:
return(np.mean(lst))
else:
return(np.nan)
rf_ngram_avg_list=df['clean_text'].apply(lambda x: rf_ngram_calculate(x))
return(rf_ngram_avg_list)
def PMI(dict_source,df):
'''
create PMI variable
'''
BOW_df= dict_source
N=len(BOW_df) #Number of unique tokens in the corpus
pos_N=len(BOW_df[BOW_df.pos!=0]) #Number of unique positive tokens in the corpus
neg_N=len(BOW_df[BOW_df.neg!=0]) #Number of unique positive tokens in the corpus
total=BOW_df.sum().sum() #Number of tokens in the corpus
pos_total=BOW_df.sum()['pos'] #Number of tokens in the positive corpus
neg_total=BOW_df.sum()['neg'] #Number of tokens in the negative corpus
PMI_df=pd.DataFrame(columns=['freq_word','freq_word_pos','freq_word_neg'])
PMI_df['freq_word']=pd.Series(BOW_df.index).apply(lambda x: (BOW_df.loc[x,'pos']+BOW_df.loc[x,'neutral']+BOW_df.loc[x,'neg'])/total)
PMI_df['freq_word_pos']=pd.Series(BOW_df.index).apply(lambda x: BOW_df.loc[x,'pos']/pos_total) #Freq of word w in positive text
PMI_df['freq_word_neg']=pd.Series(BOW_df.index).apply(lambda x: BOW_df.loc[x,'neg']/neg_total) #Freq of word w in negative text
PMI_df.index=BOW_df.index
#Calculate PMI for each word
PMI_df['PMI_pos']=np.log2(1+((PMI_df['freq_word_pos']*N)/(PMI_df['freq_word']*pos_N)))
PMI_df['PMI_neg']=np.log2(1+((PMI_df['freq_word_neg']*N)/(PMI_df['freq_word']*neg_N)))
PMI_df['PMI']=PMI_df['PMI_pos']-PMI_df['PMI_neg']
def PMI_calculate(x):
lst=[PMI_df.loc[i,'PMI'] for i in tokenize(n_grams_handled(x)) if i in PMI_df.index]
if type(x)!=str:
return(np.nan)
else:
if len(lst)>0:
return(np.mean(lst))
else:
return(np.nan)
PMI_avg_list=df['clean_text'].apply(lambda x: PMI_calculate(x))
return(PMI_avg_list)
def countAllCaps(text):
""" Input: a text, Output: how many words are all caps """
return len(re.findall("[A-Z]{2,}", text))
def countHashtag(text):
""" Input: a text, Output: how many hastags in front of a word """
return len(re.findall(r'#([^\s]+)', text))
def is_ordinal_numbers(sentences):
occur=0
for word in tokenize(sentences):
if ((word[-2:] in ['st','nd','rd','th']) and (isfloat(word[:-2]))):
occur=1
return(occur)
def countMultiExclamationMarks(sentences):
""" Replaces repetitions of exlamation marks """
return len(re.findall(r"(\!)\1+", sentences))
def countMultiQuestionMarks(sentences):
""" Count repetitions of question marks """
return len(re.findall(r"(\?)\1+", sentences))
def sentence_synset(sentence):
'''
return the wordnet synset of each word in the sentence
'''
def penn_to_wn(tag):
if tag.startswith('J'):
return wn.ADJ
elif tag.startswith('N'):
return wn.NOUN
elif tag.startswith('R'):
return wn.ADV
elif tag.startswith('V'):
return wn.VERB
return None
tagged = pos_tag(tokenize(sentence))
synsets_list = []
lemmatzr = WordNetLemmatizer()
for token in tagged:
wn_tag = penn_to_wn(token[1])
if not wn_tag:
continue
lemma = lemmatzr.lemmatize(token[0], pos=wn_tag)
try:
synsets_list.append(wn.synsets(lemma, pos=wn_tag)[0])
except:
None
return synsets_list
def min_multiple_list(S):
'''
Minimum pooling
'''
it=range(len(S)-1)
minim=S[0]
for i in it:
minim=np.minimum(minim,S[i])
return(minim)
def max_multiple_list(S):
'''
Maximum pooling
'''
it=range(len(S)-1)
maxim=S[0]
for i in it:
maxim=np.maximum(maxim,S[i])
return(maxim)
def rescaling(df,columns,scale_type='Standard'):
'''
Function for Feature Scaling
'''
scale_type=scale_type.lower()
scaled_X=df.drop(columns,1)
X=df[columns]
if scale_type=='minmax':
scaler=MinMaxScaler(feature_range=(0,1))
elif scale_type=='standard':
scaler=StandardScaler()
scaled_column=scaler.fit_transform(X)
scaled_column=pd.DataFrame(scaled_column,columns=columns)
for column in columns:
scaled_X[column]=scaled_column[column].tolist()
return(scaled_X)
def feature_engineering_split(df):
print("==================== Feature Engineering by Splitting ====================")
#List of POS-Tag
#pos_key = ['CC', 'CD','DT','EX','FW','JJ','JJR','JJS','LS','MD', 'NN','NNS','NNP','NNPS','PDT'
# ,'POS','PRP','PRP$','RB', 'RBR','RBS','RP','TO','UH','VB','VBD','VBG','VBN','VBP',
# 'VBZ','VM','WDT','WP','WP$','WRB','QF','PSP','DEM']
pos_key = ['<KEY>']
#Initiate pipeline for POS-Tagging
nlp = stanfordnlp.Pipeline(processors = "tokenize,pos")
#Inititate class for Lemmatization
lemmatizer = WordNetLemmatizer()
#Initiate class for Stemming
stemmer = PorterStemmer()
#Lemmatization+Stemming
df['base_text']=df['clean_text'].apply(lambda x: ' '.join(pd.Series(tokenize(x)).apply(lambda wrd: stemmer.stem(lemmatizer.lemmatize(wrd)) if wrd[0]!='$' else wrd).to_list()) if type(x)==str else np.nan)
print('Done Base Text')
#Create POS-Tag features
for tag in pos_key:
df['POS_'+tag]=df['clean_text'].apply(lambda x: extract_pos(nlp(x))[tag] if type(x)==str else np.nan)
print('Done POS Tag')
#Binary Feature '+num'
df["'+num"]=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\+\d+\s|\+\d+[!,.;:?/]|\+\d+$',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature '-num'
df["'-num"]=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\-\d+\s|\-\d+[!,.;:?/]|\-\d+$',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature 'num%'
df["num%"]=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r' \d.\d*%+|^\d.\d*%+|[!,.;:?/]\d.\d*%+| \d*%+|^\d*%+|[!,.;:?/]\d*%+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature '+num%'
df["'+num%"]=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\+\d.\d*%+|\+\d*%+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature '-num%'
df["'-num%'"]=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\-\d.\d*%+|\-\d*%+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Features '$num'
df['$num']=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\$\d*%+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature mixed number and word
df['word_num']=df['clean_text'].apply(lambda x: 1 if (type(x)==str and len(re.findall(r'\w*\d\w*',x))>0) else 0 if (type(x)==str) else np.nan)
#Binary Feature ordinal number
df['ordinal_num']=df['clean_text'].apply(lambda x: is_ordinal_numbers(x) if type(x)==str else np.nan)
#Binary Feature 'num-num'
df['num-num']=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\d*-\d+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature 'num-num%'
df['num-num%']=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\d*-\d%+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature 'num-num-num'
df['num-num-num']=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\d*-\d-\d+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature 'num/num'
df['num/num']=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\d*/\d+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature 'num/num/num'
df['num/num/num']=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\d*/\d/\d+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature only numbers(no symbol and characters)
df['only_number']=df['clean_text'].apply(lambda x:1 if (type(x)==str and any(isfloat(wrd) for wrd in tokenize(x))) else 0 if type(x)==str else np.nan)
print('Done Keyword+num')
f_plus=lambda x: pd.Series(tokenize(x)).apply(lambda wrd: 1 if len(re.findall(r'\+\d.\d*%+|\+\d*%+',wrd))>0 else 0)
g_plus=lambda y: f_plus(y)[f_plus(y)==1].index.tolist()
f_min=lambda x: pd.Series(tokenize(x)).apply(lambda wrd: 1 if len(re.findall(r'\-\d.\d*%+|\-\d*%+',wrd))>0 else 0)
g_min=lambda y: f_min(y)[f_min(y)==1].index.tolist()
#Binary Feature 'call'(or 'calls' or 'called') before '+num%'
df['call_+num%']=df['clean_text'].apply(lambda z: 1 if (type(z)==str and any((tokenize(z)[i-1]=='call' or tokenize(z)[i-1]=='calls' or tokenize(z)[i-1]=='called') for i in g_plus(z))) else 0 if type(z)==str else np.nan)
#Binary Feature 'call'(or 'calls' or 'called') before '-num%'
df['call_-num%']=df['clean_text'].apply(lambda z: 1 if (type(z)==str and any((tokenize(z)[i-1]=='call' or tokenize(z)[i-1]=='calls' or tokenize(z)[i-1]=='called') for i in g_min(z))) else 0 if type(z)==str else np.nan)
#Binary Feature 'put'(or 'puts') before '+num%'
df['put_+num%']=df['clean_text'].apply(lambda z: 1 if (type(z)==str and any((tokenize(z)[i-1]=='put' or tokenize(z)[i-1]=='puts') for i in g_plus(z))) else 0 if type(z)==str else np.nan)
#Binary Feature 'put'(or 'puts') before '-num%'
df['put_-num%']=df['clean_text'].apply(lambda z: 1 if (type(z)==str and any((tokenize(z)[i-1]=='put' or tokenize(z)[i-1]=='puts') for i in g_min(z))) else 0 if type(z)==str else np.nan)
#Binary Feature 'Bull' or 'Bullish'
df['bull']=df['clean_text'].apply(lambda x: 1 if (type(x)==str and ('bull' or 'bullish') in x.split()) else 0 if type(x)==str else np.nan)
#Binary Feature 'Bear' or 'Bearish'
df['bear']=df['clean_text'].apply(lambda x: 1 if (type(x)==str and ('bear' or 'bearish') in x.split()) else 0 if type(x)==str else np.nan)
print('Done Specific Keyword')
tk=TweetTokenizer()
#Calculate the number of '!'
df['number_of_!']=df['clean_text'].apply(lambda x: tk.tokenize(x).count('!') if type(x)==str else np.nan)
#Calculate the number of '?'
df['number_of_?']=df['clean_text'].apply(lambda x: tk.tokenize(x).count('?') if type(x)==str else np.nan)
#Calculate the number of '$'
df['number_of_$']=df['clean_text'].apply(lambda x: tk.tokenize(x).count('$') if type(x)==str else np.nan)
#Calculate the number of continuous '!'
df['continous_!']=df['clean_text'].apply(lambda x: countMultiExclamationMarks(x) if type(x)==str else np.nan)
#Calculate the number of continuous '?'
df['continous_?']=df['clean_text'].apply(lambda x: countMultiQuestionMarks(x) if type(x)==str else np.nan)
print('Done Punctation Count')
#Calculate the number of Caps word
df['caps_word']=df['clean_text'].apply(lambda x: countAllCaps(' '.join([i for i in x.split() if i[0]!='$'])) if type(x)==str else np.nan)
print('Done Caps words')
#Calculate the number of Hashtags
df['hashtags']=df['clean_text'].apply(lambda x: countHashtag(x) if type(x)==str else np.nan)
print('Done Hashtags')
#AFINN Sentiment Lexicon
affin_sent_score=lambda x: pd.Series(tokenize(x)).apply(lambda wrd: AFINN_dict[wrd.lower()] if wrd.lower() in AFINN_dict.keys() else 0)
affin_sent_binary=lambda x: pd.Series(tokenize(x)).apply(lambda wrd: 1 if (wrd.lower() in AFINN_dict.keys() and AFINN_dict[wrd.lower()]>0) else
-1 if (wrd.lower() in AFINN_dict.keys() and AFINN_dict[wrd.lower()]<0) else 0)
#Sum Score
df['AFINN_sum_score']=df['clean_text'].apply(lambda x: affin_sent_score(x).sum() if type(x)==str else np.nan)
#Max Score
df['AFINN_max_score']=df['clean_text'].apply(lambda x: affin_sent_score(x).max() if type(x)==str else np.nan)
#Min Score
df['AFINN_min_score']=df['clean_text'].apply(lambda x: affin_sent_score(x).min() if type(x)==str else np.nan)
#Ratio of Positive Words
df['AFINN_pos_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in affin_sent_binary(x) if i==1)/len(tokenize(x)) if type(x)==str else np.nan)
#Ratio of Negatiive Words
df['AFINN_neg_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in affin_sent_binary(x) if i==-1)/len(tokenize(x)) if type(x)==str else np.nan)
print('Done AFIIN Lexicon')
#BingLiu Sentiment Lexicon
bingliu_sent_binary=lambda x: pd.Series(tokenize(x)).apply(lambda wrd: 1 if wrd.lower() in BingLiu_dict['pos'] else -1 if wrd.lower() in BingLiu_dict['neg'] else 0)
#Ratio of Positive Words
df['BingLiu_pos_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in bingliu_sent_binary(x) if i==1)/len(tokenize(x)) if type(x)==str else np.nan)
#Ratio of Negative Words
df['BingLiu_neg_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in bingliu_sent_binary(x) if i==-1)/len(tokenize(x)) if type(x)==str else np.nan)
print('Done BingLiu Lexicon')
#General Inquirer Sentiment Lexicon
general_inquirer_binary=lambda x: pd.Series(tokenize(x)).apply(lambda wrd: 1 if wrd.lower() in General_Inquirer_dict['pos'] else -1 if wrd.lower() in General_Inquirer_dict['neg'] else 0)
#Ratio of Positive Words
df['General_Inquirer_pos_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in general_inquirer_binary(x) if i==1)/len(tokenize(x)) if type(x)==str else np.nan)
#Ratio of Negative Words
df['General_Inquirer_neg_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in general_inquirer_binary(x) if i==-1)/len(tokenize(x)) if type(x)==str else np.nan)
print('Done General Inquirer Lexicon')
#NRC Hashtag Sentiment Lexicon
nrc_hashtag_sent_score=lambda x: pd.Series(tokenize(x)).apply(lambda wrd: NRC_hashtag_dict[wrd.lower()] if wrd.lower() in NRC_hashtag_dict.keys() else 0)
nrc_hashtag_sent_binary=lambda x: pd.Series(tokenize(x)).apply(lambda wrd: 1 if (wrd.lower() in NRC_hashtag_dict.keys() and NRC_hashtag_dict[wrd.lower()]>0) else
-1 if (wrd.lower() in NRC_hashtag_dict.keys() and NRC_hashtag_dict[wrd.lower()]<0) else 0)
#Sum Score
df['NRC_Hashtag_sum_score']=df['clean_text'].apply(lambda x: nrc_hashtag_sent_score(x).sum() if type(x)==str else np.nan)
#Max Score
df['NRC_Hashtag_max_score']=df['clean_text'].apply(lambda x: nrc_hashtag_sent_score(x).max() if type(x)==str else np.nan)
#Min Score
df['NRC_Hashtag_min_score']=df['clean_text'].apply(lambda x: nrc_hashtag_sent_score(x).min() if type(x)==str else np.nan)
#Ratio of Positive Words
df['NRC_Hashtag_pos_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in nrc_hashtag_sent_binary(x) if i==1)/len(tokenize(x)) if type(x)==str else np.nan)
#Ratio of Negatiive Words
df['NRC_Hashtag_neg_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in nrc_hashtag_sent_binary(x) if i==-1)/len(tokenize(x)) if type(x)==str else np.nan)
print('Done NRC Hashtag Sentiment Lexicon')
#SentiWordNet Sentiment Lexicon
sentiwordnet_list=sentiwordnet.ID.tolist()
sent_to_synset=lambda x: pd.Series(sentence_synset(x))
synset_to_offset=lambda x: int(str(x.offset()).zfill(8))
get_value=lambda x: sentiwordnet[sentiwordnet.ID==synset_to_offset(x)]['score'].values[0]
score_offset_check=lambda x: get_value(x) if (synset_to_offset(x) in sentiwordnet_list) else 0
binary_offset_check=lambda x: 1 if (synset_to_offset(x) in sentiwordnet_list and get_value(x)>0) else -1 if (synset_to_offset(x) in sentiwordnet_list and get_value(x)<0) else 0
sentiwordnet_score=lambda sent: sent_to_synset(sent).apply(lambda z: score_offset_check(z))
sentiwordnet_binary=lambda sent: sent_to_synset(sent).apply(lambda z: binary_offset_check(z))
#Sum Score
df['SentiWordNet_sum_score']=df['clean_text'].apply(lambda x: sentiwordnet_score(x).sum() if type(x)==str else np.nan)
#Max Score
df['SentiWordNet_max_score']=df['clean_text'].apply(lambda x: sentiwordnet_score(x).max() if type(x)==str else np.nan)
#Min Score
df['SentiWordNet_min_score']=df['clean_text'].apply(lambda x: sentiwordnet_score(x).min() if type(x)==str else np.nan)
#Ratio of Positive Words
df['SentiWordNet_pos_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in sentiwordnet_binary(x) if i==1)/len(sent_to_synset(x)) if (type(x)==str and len(sent_to_synset(x))>0) else 0 if type(x)==str else np.nan)
#Ratio of Negatiive Words
df['SentiWordNet_neg_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in sentiwordnet_binary(x) if i==-1)/len(sent_to_synset(x)) if (type(x)==str and len(sent_to_synset(x))>0) else 0 if type(x)==str else np.nan)
print('Done SentiWordNet Lexicon')
return(df)
def feature_engineering(df):
print("==================== Feature Engineering ====================")
#n-grams
for grams in [1,2,3,4]:
nan_checker=lambda x: x if type(x)==str else ''
#Initiate class for BOW
bow_vectorizer= CountVectorizer(ngram_range=(grams,grams))
#Initiate class for TF-IDF
tfidf_vectorizer = TfidfVectorizer(norm=None, ngram_range=(grams,grams))
#Create docs
docs=df['clean_text'].apply(lambda x: n_grams_handled(x))
#Create TF-IDF matrix
tfidf_matrix = tfidf_vectorizer.fit_transform(docs.apply(lambda x: nan_checker(x)).to_list())
#Create TF-IDF n-grams
df['Avg_TFIDF_'+str(grams)+'-grams']=[np.mean([x for x in tfidf_matrix[i].toarray()[0].tolist() if x!=0]) for i in range(len(df))]
#Calculate sum of RF n-gram
if grams==1:
df['Avg_rf_'+str(grams)+'-grams']=rf_ngram(rf_1_gram_df,df,grams)
elif grams==2:
df['Avg_rf_'+str(grams)+'-grams']=rf_ngram(rf_2_gram_df,df,grams)
elif grams==3:
df['Avg_rf_'+str(grams)+'-grams']=rf_ngram(rf_3_gram_df,df,grams)
elif grams==4:
df['Avg_rf_'+str(grams)+'-grams']=rf_ngram(rf_4_gram_df,df,grams)
print('Done n-grams')
#Calculate PMI
df['PMI_score']=PMI(PMI_df,df)
#Impute missing value
imputer = KNNImputer(n_neighbors=3)
df_imputed=pd.DataFrame(imputer.fit_transform(df.drop(columns=['id','created_at','body','clean_text','base_text'])))
df_imputed.columns=df.drop(columns=['id','created_at','body','clean_text','base_text']).columns
df['Avg_rf_1-grams']=df_imputed['Avg_rf_1-grams']
df['Avg_rf_2-grams']=df_imputed['Avg_rf_2-grams']
df['Avg_rf_3-grams']=df_imputed['Avg_rf_3-grams']
df['Avg_rf_4-grams']=df_imputed['Avg_rf_4-grams']
df['PMI_score']=df_imputed['PMI_score']
return(df)
def parallelize_dataframe(df, func, n_split):
'''
Function to parallelize a dataframe
'''
df_split = np.array_split(df, n_split)
df_pool=func(df_split[0])
for i in range(n_split-1):
x=df_split[i+1]
x=func(x.copy())
df_pool = | pd.concat([df_pool,x],ignore_index=True) | pandas.concat |
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
pio.templates.default = "simple_white"
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
df: pd.DataFrame = | pd.read_csv(filename, parse_dates=["Date"]) | pandas.read_csv |
import wandb
from wandb import data_types
import numpy as np
import pytest
import os
import sys
import datetime
from wandb.sdk.data_types._dtypes import *
class_labels = {1: "tree", 2: "car", 3: "road"}
test_folder = os.path.dirname(os.path.realpath(__file__))
im_path = os.path.join(test_folder, "..", "assets", "test.png")
def test_none_type():
assert TypeRegistry.type_of(None) == NoneType()
assert TypeRegistry.type_of(None).assign(None) == NoneType()
assert TypeRegistry.type_of(None).assign(1) == InvalidType()
def test_string_type():
assert TypeRegistry.type_of("Hello") == StringType()
assert TypeRegistry.type_of("Hello").assign("World") == StringType()
assert TypeRegistry.type_of("Hello").assign(None) == InvalidType()
assert TypeRegistry.type_of("Hello").assign(1) == InvalidType()
def test_number_type():
assert TypeRegistry.type_of(1.2) == NumberType()
assert TypeRegistry.type_of(1.2).assign(1) == NumberType()
assert TypeRegistry.type_of(1.2).assign(None) == InvalidType()
assert TypeRegistry.type_of(1.2).assign("hi") == InvalidType()
def make_datetime():
return datetime.datetime(2000, 12, 1)
def make_date():
return datetime.date(2000, 12, 1)
def make_datetime64():
return np.datetime64("2000-12-01")
def test_timestamp_type():
assert TypeRegistry.type_of(make_datetime()) == TimestampType()
assert (
TypeRegistry.type_of(make_datetime())
.assign(make_date())
.assign(make_datetime64())
== TimestampType()
)
assert TypeRegistry.type_of(make_datetime()).assign(None) == InvalidType()
assert TypeRegistry.type_of(make_datetime()).assign(1) == InvalidType()
def test_boolean_type():
assert TypeRegistry.type_of(True) == BooleanType()
assert TypeRegistry.type_of(True).assign(False) == BooleanType()
assert TypeRegistry.type_of(True).assign(None) == InvalidType()
assert TypeRegistry.type_of(True).assign(1) == InvalidType()
def test_any_type():
assert AnyType() == AnyType().assign(1)
assert AnyType().assign(None) == InvalidType()
def test_never_type():
assert InvalidType().assign(1) == InvalidType()
assert InvalidType().assign("a") == InvalidType()
assert InvalidType().assign(True) == InvalidType()
assert InvalidType().assign(None) == InvalidType()
def test_unknown_type():
assert UnknownType().assign(1) == NumberType()
assert UnknownType().assign(None) == InvalidType()
def test_union_type():
wb_type = UnionType([float, str])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == wb_type
assert wb_type.assign(True) == InvalidType()
wb_type = UnionType([float, AnyType()])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == wb_type
assert wb_type.assign(True) == wb_type
wb_type = UnionType([float, UnknownType()])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == UnionType([float, StringType()])
assert wb_type.assign(None) == InvalidType()
wb_type = UnionType([float, OptionalType(UnknownType())])
assert wb_type.assign(None).assign(True) == UnionType(
[float, OptionalType(BooleanType())]
)
wb_type = UnionType([float, UnionType([str, UnknownType()])])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == wb_type
assert wb_type.assign(True) == UnionType([float, str, bool])
assert wb_type.assign(None) == InvalidType()
def test_const_type():
wb_type = ConstType(1)
assert wb_type.assign(1) == wb_type
assert wb_type.assign("a") == InvalidType()
assert wb_type.assign(2) == InvalidType()
def test_set_const_type():
wb_type = ConstType(set())
assert wb_type.assign(set()) == wb_type
assert wb_type.assign(None) == InvalidType()
assert wb_type.assign({1}) == InvalidType()
assert wb_type.assign([]) == InvalidType()
wb_type = ConstType({1, 2, 3})
assert wb_type.assign(set()) == InvalidType()
assert wb_type.assign(None) == InvalidType()
assert wb_type.assign({1, 2, 3}) == wb_type
assert wb_type.assign([1, 2, 3]) == InvalidType()
def test_object_type():
wb_type = TypeRegistry.type_of(np.random.rand(30))
assert wb_type.assign(np.random.rand(30)) == wb_type
assert wb_type.assign(4) == InvalidType()
def test_list_type():
assert ListType(int).assign([]) == ListType(int, 0)
assert ListType(int).assign([1, 2, 3]) == ListType(int, 3)
assert ListType(int).assign([1, "a", 3]) == InvalidType()
def test_dict_type():
spec = {
"number": float,
"nested": {
"list_str": [str],
},
}
exact = {
"number": 1,
"nested": {
"list_str": ["hello", "world"],
},
}
subset = {"nested": {"list_str": ["hi"]}}
narrow = {"number": 1, "string": "hi"}
wb_type = TypeRegistry.type_of(exact)
assert wb_type.assign(exact) == wb_type
assert wb_type.assign(subset) == InvalidType()
assert wb_type.assign(narrow) == InvalidType()
spec = {
"optional_number": OptionalType(float),
"optional_unknown": OptionalType(UnknownType()),
}
wb_type = TypedDictType(spec)
assert wb_type.assign({}) == wb_type
assert wb_type.assign({"optional_number": 1}) == wb_type
assert wb_type.assign({"optional_number": "1"}) == InvalidType()
assert wb_type.assign({"optional_unknown": "hi"}) == TypedDictType(
{
"optional_number": OptionalType(float),
"optional_unknown": OptionalType(str),
}
)
assert wb_type.assign({"optional_unknown": None}) == TypedDictType(
{
"optional_number": OptionalType(float),
"optional_unknown": OptionalType(UnknownType()),
}
)
wb_type = TypedDictType({"unknown": UnknownType()})
assert wb_type.assign({}) == InvalidType()
assert wb_type.assign({"unknown": None}) == InvalidType()
assert wb_type.assign({"unknown": 1}) == TypedDictType(
{"unknown": float},
)
def test_nested_dict():
notation_type = TypedDictType(
{
"a": float,
"b": bool,
"c": str,
"d": UnknownType(),
"e": {},
"f": [],
"g": [
[
{
"a": float,
"b": bool,
"c": str,
"d": UnknownType(),
"e": {},
"f": [],
"g": [[]],
}
]
],
}
)
expanded_type = TypedDictType(
{
"a": NumberType(),
"b": BooleanType(),
"c": StringType(),
"d": UnknownType(),
"e": TypedDictType({}),
"f": ListType(),
"g": ListType(
ListType(
TypedDictType(
{
"a": NumberType(),
"b": BooleanType(),
"c": StringType(),
"d": UnknownType(),
"e": TypedDictType({}),
"f": ListType(),
"g": ListType(ListType()),
}
)
)
),
}
)
example = {
"a": 1,
"b": True,
"c": "StringType()",
"d": "hi",
"e": {},
"f": [1],
"g": [
[
{
"a": 2,
"b": False,
"c": "StringType()",
"d": 3,
"e": {},
"f": [],
"g": [[5]],
}
]
],
}
real_type = TypedDictType.from_obj(example)
assert notation_type == expanded_type
assert notation_type.assign(example) == real_type
def test_image_type():
wb_type = data_types._ImageFileType()
image_simple = data_types.Image(np.random.rand(10, 10))
wb_type_simple = data_types._ImageFileType.from_obj(image_simple)
image_annotated = data_types.Image(
np.random.rand(10, 10),
boxes={
"box_predictions": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
"box_ground_truth": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
},
masks={
"mask_predictions": {
"mask_data": np.random.randint(0, 4, size=(30, 30)),
"class_labels": class_labels,
},
"mask_ground_truth": {"path": im_path, "class_labels": class_labels},
},
)
wb_type_annotated = data_types._ImageFileType.from_obj(image_annotated)
image_annotated_differently = data_types.Image(
np.random.rand(10, 10),
boxes={
"box_predictions": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
},
masks={
"mask_predictions": {
"mask_data": np.random.randint(0, 4, size=(30, 30)),
"class_labels": class_labels,
},
"mask_ground_truth_2": {"path": im_path, "class_labels": class_labels},
},
)
assert wb_type.assign(image_simple) == wb_type_simple
assert wb_type.assign(image_annotated) == wb_type_annotated
# OK to assign Images with disjoint class set
assert wb_type_annotated.assign(image_simple) == wb_type_annotated
# Merge when disjoint
assert wb_type_annotated.assign(
image_annotated_differently
) == data_types._ImageFileType(
box_layers={"box_predictions": {1, 2, 3}, "box_ground_truth": {1, 2, 3}},
box_score_keys={"loss", "acc"},
mask_layers={
"mask_ground_truth_2": set(),
"mask_ground_truth": set(),
"mask_predictions": {1, 2, 3},
},
class_map={"1": "tree", "2": "car", "3": "road"},
)
def test_classes_type():
wb_classes = data_types.Classes(
[
{"id": 1, "name": "cat"},
{"id": 2, "name": "dog"},
{"id": 3, "name": "horse"},
]
)
wb_class_type = (
wandb.wandb_sdk.data_types.helper_types.classes._ClassesIdType.from_obj(
wb_classes
)
)
assert wb_class_type.assign(1) == wb_class_type
assert wb_class_type.assign(0) == InvalidType()
def test_table_type():
table_1 = wandb.Table(columns=["col"], data=[[1]])
t1 = data_types._TableType.from_obj(table_1)
table_2 = wandb.Table(columns=["col"], data=[[1.3]])
table_3 = wandb.Table(columns=["col"], data=[["a"]])
assert t1.assign(table_2) == t1
assert t1.assign(table_3) == InvalidType()
def test_table_implicit_types():
table = wandb.Table(columns=["col"])
table.add_data(None)
table.add_data(1)
with pytest.raises(TypeError):
table.add_data("a")
table = wandb.Table(columns=["col"], optional=False)
with pytest.raises(TypeError):
table.add_data(None)
table.add_data(1)
with pytest.raises(TypeError):
table.add_data("a")
def test_table_allow_mixed_types():
table = wandb.Table(columns=["col"], allow_mixed_types=True)
table.add_data(None)
table.add_data(1)
table.add_data("a") # No error with allow_mixed_types
table = wandb.Table(columns=["col"], optional=False, allow_mixed_types=True)
with pytest.raises(TypeError):
table.add_data(None) # Still errors since optional is false
table.add_data(1)
table.add_data("a") # No error with allow_mixed_types
def test_tables_with_dicts():
good_data = [
[None],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
"e": wandb.Image(
np.random.randint(255, size=(10, 10))
),
}
]
],
}
]
}
],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
"e": wandb.Image(
np.random.randint(255, size=(10, 10))
),
}
]
],
}
]
}
],
]
bad_data = [
[None],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
"e": wandb.Image(
np.random.randint(255, size=(10, 10))
),
}
]
],
}
]
}
],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
}
]
],
}
]
}
],
]
table = wandb.Table(columns=["A"], data=good_data, allow_mixed_types=True)
table = wandb.Table(columns=["A"], data=bad_data, allow_mixed_types=True)
table = wandb.Table(columns=["A"], data=good_data)
with pytest.raises(TypeError):
table = wandb.Table(columns=["A"], data=bad_data)
def test_table_explicit_types():
table = wandb.Table(columns=["a", "b"], dtype=int)
table.add_data(None, None)
table.add_data(1, 2)
with pytest.raises(TypeError):
table.add_data(1, "a")
table = wandb.Table(columns=["a", "b"], optional=False, dtype=[int, str])
with pytest.raises(TypeError):
table.add_data(None, None)
table.add_data(1, "a")
with pytest.raises(TypeError):
table.add_data("a", "a")
table = wandb.Table(columns=["a", "b"], optional=[False, True], dtype=[int, str])
with pytest.raises(TypeError):
table.add_data(None, None)
with pytest.raises(TypeError):
table.add_data(None, "a")
table.add_data(1, None)
table.add_data(1, "a")
with pytest.raises(TypeError):
table.add_data("a", "a")
def test_table_type_cast():
table = wandb.Table(columns=["type_col"])
table.add_data(1)
wb_classes = data_types.Classes(
[
{"id": 1, "name": "cat"},
{"id": 2, "name": "dog"},
{"id": 3, "name": "horse"},
]
)
table.cast("type_col", wb_classes.get_type())
table.add_data(2)
with pytest.raises(TypeError):
table.add_data(4)
box_annotation = {
"box_predictions": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
"box_ground_truth": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
}
mask_annotation = {
"mask_predictions": {
"mask_data": np.random.randint(0, 4, size=(30, 30)),
"class_labels": class_labels,
},
"mask_ground_truth": {"path": im_path, "class_labels": class_labels},
}
def test_table_specials():
table = wandb.Table(
columns=["image", "table"],
optional=False,
dtype=[data_types.Image, data_types.Table],
)
with pytest.raises(TypeError):
table.add_data(None, None)
# Infers specific types from first valid row
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, True, None]]),
)
# Denies conflict
with pytest.raises(TypeError):
table.add_data(
"hello",
data_types.Table(data=[[1, True, None]]),
)
# Denies conflict
with pytest.raises(TypeError):
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, "True", None]]),
)
# allows further refinement
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, True, 1]]),
)
# allows addition
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, True, 1]]),
)
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="no pandas py3.10 wheel")
def test_nan_non_float():
import pandas as pd
wandb.Table(dataframe=pd.DataFrame(data=[["A"], [np.nan]], columns=["a"]))
def test_table_typing_numpy():
# Pulled from https://numpy.org/devdocs/user/basics.types.html
# Numerics
table = wandb.Table(columns=["A"], dtype=[NumberType])
table.add_data(None)
table.add_data(42)
table.add_data(np.byte(1))
table.add_data(np.short(42))
table.add_data(np.ushort(42))
table.add_data(np.intc(42))
table.add_data(np.uintc(42))
table.add_data(np.int_(42))
table.add_data(np.uint(42))
table.add_data(np.longlong(42))
table.add_data(np.ulonglong(42))
table.add_data(np.half(42))
table.add_data(np.float16(42))
table.add_data(np.single(42))
table.add_data(np.double(42))
table.add_data(np.longdouble(42))
table.add_data(np.csingle(42))
table.add_data(np.cdouble(42))
table.add_data(np.clongdouble(42))
table.add_data(np.int8(42))
table.add_data(np.int16(42))
table.add_data(np.int32(42))
table.add_data(np.int64(42))
table.add_data(np.uint8(42))
table.add_data(np.uint16(42))
table.add_data(np.uint32(42))
table.add_data(np.uint64(42))
table.add_data(np.intp(42))
table.add_data(np.uintp(42))
table.add_data(np.float32(42))
table.add_data(np.float64(42))
table.add_data(np.float_(42))
table.add_data(np.complex64(42))
table.add_data(np.complex128(42))
table.add_data(np.complex_(42))
# Booleans
table = wandb.Table(columns=["A"], dtype=[BooleanType])
table.add_data(None)
table.add_data(True)
table.add_data(False)
table.add_data(np.bool_(True))
# Array of Numerics
table = wandb.Table(columns=["A"], dtype=[[NumberType]])
table.add_data(None)
table.add_data([42])
table.add_data(np.array([1, 0], dtype=np.byte))
table.add_data(np.array([42, 42], dtype=np.short))
table.add_data(np.array([42, 42], dtype=np.ushort))
table.add_data(np.array([42, 42], dtype=np.intc))
table.add_data(np.array([42, 42], dtype=np.uintc))
table.add_data(np.array([42, 42], dtype=np.int_))
table.add_data(np.array([42, 42], dtype=np.uint))
table.add_data(np.array([42, 42], dtype=np.longlong))
table.add_data(np.array([42, 42], dtype=np.ulonglong))
table.add_data(np.array([42, 42], dtype=np.half))
table.add_data(np.array([42, 42], dtype=np.float16))
table.add_data(np.array([42, 42], dtype=np.single))
table.add_data(np.array([42, 42], dtype=np.double))
table.add_data(np.array([42, 42], dtype=np.longdouble))
table.add_data(np.array([42, 42], dtype=np.csingle))
table.add_data(np.array([42, 42], dtype=np.cdouble))
table.add_data(np.array([42, 42], dtype=np.clongdouble))
table.add_data(np.array([42, 42], dtype=np.int8))
table.add_data(np.array([42, 42], dtype=np.int16))
table.add_data(np.array([42, 42], dtype=np.int32))
table.add_data(np.array([42, 42], dtype=np.int64))
table.add_data(np.array([42, 42], dtype=np.uint8))
table.add_data(np.array([42, 42], dtype=np.uint16))
table.add_data(np.array([42, 42], dtype=np.uint32))
table.add_data(np.array([42, 42], dtype=np.uint64))
table.add_data(np.array([42, 42], dtype=np.intp))
table.add_data(np.array([42, 42], dtype=np.uintp))
table.add_data(np.array([42, 42], dtype=np.float32))
table.add_data(np.array([42, 42], dtype=np.float64))
table.add_data(np.array([42, 42], dtype=np.float_))
table.add_data(np.array([42, 42], dtype=np.complex64))
table.add_data(np.array([42, 42], dtype=np.complex128))
table.add_data(np.array([42, 42], dtype=np.complex_))
# Array of Booleans
table = wandb.Table(columns=["A"], dtype=[[BooleanType]])
table.add_data(None)
table.add_data([True])
table.add_data([False])
table.add_data(np.array([True, False], dtype=np.bool_))
# Nested arrays
table = wandb.Table(columns=["A"])
table.add_data([[[[1, 2, 3]]]])
table.add_data(np.array([[[[1, 2, 3]]]]))
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="no pandas py3.10 wheel")
def test_table_typing_pandas():
import pandas as pd
# TODO: Pandas https://pandas.pydata.org/pandas-docs/stable/user_guide/basics.html#basics-dtypes
# Numerics
table = wandb.Table(dataframe=pd.DataFrame([[1], [0]]).astype(np.byte))
table.add_data(1)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.short))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.ushort))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.intc))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uintc))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.int_))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uint))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.longlong))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.ulonglong))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.half))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.float16))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.single))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.double))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.longdouble))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.csingle))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.cdouble))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.clongdouble))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.int8))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.int16))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.int32))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.int64))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uint8))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uint16))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uint32))
table.add_data(42)
table = wandb.Table(dataframe= | pd.DataFrame([[42], [42]]) | pandas.DataFrame |
# http://github.com/timestocome
# take a look at the differences in daily returns for recent bull and bear markets
# http://afoysal.blogspot.com/2016/08/arma-and-arima-timeseries-prediction.html
# predictions appear to increase and decrease with actual returns but scale is much smaller
# of course if it was this easy there'd be a lot of rich statisticians in the world.
import numpy as np
import pandas as pd
from statsmodels.tsa.arima_model import ARMA, ARIMA
from statsmodels.tsa.stattools import adfuller, arma_order_select_ic
import matplotlib.pyplot as plt
# pandas display options
pd.options.display.max_rows = 1000
pd.options.display.max_columns = 25
pd.options.display.width = 1000
######################################################################
# data
########################################################################
# read in datafile created in LoadAndMatchDates.py
data = pd.read_csv('StockDataWithVolume.csv', index_col='Date', parse_dates=True)
features = [data.columns.values]
# create target --- let's try Nasdaq value 1 day change
data['returns'] = (data['NASDAQ'] - data['NASDAQ'].shift(1)) / data['NASDAQ']
# remove nan row from target creation
data = data.dropna()
#########################################################################
# split into bear and bull markets
##########################################################################
bull1_start = pd.to_datetime('01-01-1990') # beginning of this dataset
bull1_end = pd.to_datetime('07-16-1990')
iraq_bear_start = pd.to_datetime('07-17-1990')
iraq_bear_end = pd.to_datetime('10-11-1990')
bull2_start = pd.to_datetime('10-12-1990')
bull2_end = pd.to_datetime('01-13-2000')
dotcom_bear_start = pd.to_datetime('01-14-2000')
dotcom_bear_end = pd.to_datetime('10-09-2002')
bull3_start = pd.to_datetime('10-10-2002')
bull3_end = pd.to_datetime('10-08-2007')
housing_bear_start = | pd.to_datetime('10-09-2007') | pandas.to_datetime |
from datetime import datetime
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from scipy.stats import zscore
import matplotlib.pyplot as pyplot
def drawHist(x):
#创建散点图
#第一个参数为点的横坐标
#第二个参数为点的纵坐标
pyplot.hist(x, 100)
pyplot.xlabel('x')
pyplot.ylabel('y')
pyplot.title('gaosi')
pyplot.show()
def read():
return pd.read_csv('./google-play-store-apps/googleplaystore.csv')
def readComment():
return | pd.read_csv('./google-play-store-apps/googleplaystore_user_reviews.csv') | pandas.read_csv |
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import matplotlib.pyplot as plt
import xgboost as xgb
import shap
from sklearn.model_selection import ParameterGrid
from sklearn.preprocessing import MinMaxScaler
'''
Feature selection is done using XGBoost and SHAP.
XGBoost is an optimized distributed gradient boosting library designed to be highly efficient, flexible and portable.
It implements machine learning algorithms under the Gradient Boosting framework.
XGBoost provides a parallel tree boosting (also known as GBDT) that solve many data science problems in a fast and accurate way.
SHAP (SHapley Additive exPlanations) is a game theoretic approach to explain the output of any machine learning model.
It connects optimal credit allocation with local explanations using the classic Shapley values from game theory and their related extensions.
'''
class FeatureSelector:
def __init__(self, data_handler, max_num_features, show_plot):
self.data_handler = data_handler
self.select_features_using_xgboost(max_num_features, show_plot)
def select_features_using_xgboost(self, max_num_features, show_plot):
data_handler = self.data_handler
scaler = MinMaxScaler()
X_train, y_train = data_handler.X_train, data_handler.y_train
X_val, y_val = data_handler.X_val, data_handler.y_val
y_train = scaler.fit_transform(y_train.values.reshape(-1, 1)).reshape(-1)
y_val = scaler.transform(y_val.values.reshape(-1, 1)).reshape(-1)
gsearch_params={
# colsample_bytree: the fraction of features (randomly selected) that will be used to train each tree.
# gamma: Minimum loss reduction required to make a further partition on a leaf node of the tree.
'max_depth': [1,2,3,4], 'learning_rate': [0.001, 0.005, 0.01], 'colsample_bytree': [0.5, 0.75],
'n_estimators': [250, 500, 750], 'objective': ['reg:squarederror'], 'gamma':[0, 0.1, 0.2]
}
param_grid = ParameterGrid(gsearch_params)
best_score, best_score_train = (-10000, -10000)
best_params, best_params_train = (0, 0)
print('Start: gridsearch using xgboost')
for params in param_grid: # iterate params until best score is found
# print(f"Testing {params}...")
# init xgboost regressor on each param set
xgb_regressor = xgb.XGBRegressor(**params)
trained_model = xgb_regressor.fit(X_train, y_train)
val_score = trained_model.score(X_val, y_val)
train_score = trained_model.score(X_train, y_train)
#print(f"Test Score: {test_score}")
#print(f"Train Score: {train_score}")
if val_score > best_score:
best_score = val_score
best_params = params
best_train = train_score
best_model = trained_model
print(f"Best VALIDATION R^2 is {best_score} with params: {best_params}")
print(f"TRAIN R^2 for best test params is {best_train}")
xgb_best = best_model
#plot_res(xgb_best.predict(X_val), y_val)
feature_importance_df = get_shap_importances(xgb_best, X_val, data_handler.features, show_plot)
feature_importance_dict = feature_importance_df.abs().sum().sort_values(ascending=False).to_dict() # sum over columns
self.important_features = [] # choose the most important features based on the sum of absolute SHAP values
max_feature_length = max_num_features
for key, val in feature_importance_dict.items():
if val <= 0 or len(self.important_features) > (max_feature_length-1):
break
self.important_features.append(key)
return
# Tree SHAP feature importance generation
def get_shap_importances(model, X_val, features, show_plot=False):
"""Generates feature importances based on tree shap"""
# intialize xgb regressor with best params
# initialize treeshap explainer with fitted model
explainer = shap.TreeExplainer(model)
# predict test data with the model's explainer
shap_values = explainer.shap_values(X_val)
# create summary feature importance chart
shap.summary_plot(shap_values, X_val, plot_type="bar", max_display=20, show=show_plot)
feature_importance_df = pd.DataFrame(shap_values, columns=features)
return feature_importance_df
def plot_res(y_hat, y_val):
# Helper function used for plotting residuals during training and testing
rolling_window = 7
y_hat_rolling = pd.DataFrame(y_hat).rolling(rolling_window).mean()
y_val_rolling = | pd.DataFrame(y_val) | pandas.DataFrame |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == "datetime64[ns]"
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
tm.assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp("20130101"), "NOV"], dtype=object)
assert s.iloc[0] == Timestamp("20130101")
assert s.iloc[1] == "NOV"
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = "216 3T19".split()
wing1 = "2T15 4H19".split()
wing2 = "416 4T20".split()
mat = pd.to_datetime("2016-01-22 2019-09-07".split())
df = pd.DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly)
result = df.loc["3T19"]
assert result.dtype == object
result = df.loc["216"]
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
]:
result = Series(arr)
assert result.dtype == "M8[ns]"
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=range(5))
assert not isna(s).all()
s = Series(np.nan, dtype="M8[ns]", index=range(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == "M8[ns]"
s.iloc[0] = np.nan
assert s.dtype == "M8[ns]"
# GH3414 related
expected = Series(
[datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)],
dtype="datetime64[ns]",
)
result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]")
tm.assert_series_equal(result, expected)
result = Series(dates, dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
expected = Series(
[pd.NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]"
)
result = Series([np.nan] + dates[1:], dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
dts = Series(dates, dtype="datetime64[ns]")
# valid astype
dts.astype("int64")
# invalid casting
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
dts.astype("int32")
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(dts, dtype=np.int64)
expected = Series(dts.astype(np.int64))
tm.assert_series_equal(result, expected)
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp("20130101"), 1], index=["a", "b"])
assert result["a"] == Timestamp("20130101")
assert result["b"] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range("01-Jan-2015", "01-Dec-2015", freq="M")
values2 = dates.view(np.ndarray).astype("datetime64[ns]")
expected = Series(values2, index=dates)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, dates)
tm.assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, index=dates, dtype=object)
tm.assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([np.nan, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, None, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, np.nan, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
assert str(Series(dr).iloc[0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
assert str(Series(dr).iloc[0].tz) == "US/Eastern"
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == "object"
assert s[2] is np.nan
assert "NaN" in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr)
assert s.dtype.name == "datetime64[ns, US/Eastern]"
assert s.dtype == "datetime64[ns, US/Eastern]"
assert is_datetime64tz_dtype(s.dtype)
assert "datetime64[ns, US/Eastern]" in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == "datetime64[ns]"
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[Series([True, True, False], index=s.index)]
tm.assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
tm.assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
tm.assert_series_equal(result, s)
# short str
assert "datetime64[ns, US/Eastern]" in str(s)
# formatting with NaT
result = s.shift()
assert "datetime64[ns, US/Eastern]" in str(result)
assert "NaT" in str(result)
# long str
t = Series(date_range("20130101", periods=1000, tz="US/Eastern"))
assert "datetime64[ns, US/Eastern]" in str(t)
result = pd.DatetimeIndex(s, freq="infer")
tm.assert_index_equal(result, dr)
# inference
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
)
assert s.dtype == "datetime64[ns, US/Pacific]"
assert lib.infer_dtype(s, skipna=True) == "datetime64"
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"),
]
)
assert s.dtype == "object"
assert lib.infer_dtype(s, skipna=True) == "datetime"
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
expected = Series(pd.DatetimeIndex(["NaT", "NaT"], tz="US/Eastern"))
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units
# gh-19223
dtype = f"{dtype}[{unit}]"
arr = np.array([1, 2, 3], dtype=arr_dtype)
s = Series(arr)
result = s.astype(dtype)
expected = Series(arr.astype(dtype))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", pd.NaT, np.nan, None])
def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
# GH 17415: With naive string
result = Series([arg], dtype="datetime64[ns, CET]")
expected = Series(pd.Timestamp(arg)).dt.tz_localize("CET")
tm.assert_series_equal(result, expected)
def test_constructor_datetime64_bigendian(self):
# GH#30976
ms = np.datetime64(1, "ms")
arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
result = Series(arr)
expected = Series([Timestamp(ms)])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray])
def test_construction_interval(self, interval_constructor):
# construction from interval & array of intervals
intervals = interval_constructor.from_breaks(np.arange(3), closed="right")
result = Series(intervals)
assert result.dtype == "interval[int64]"
tm.assert_index_equal(Index(result.values), Index(intervals))
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_interval(self, data_constructor):
# GH 23563: consistent closed results in interval dtype
data = [pd.Interval(0, 1), pd.Interval(0, 2), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(IntervalArray(data))
assert result.dtype == "interval[float64]"
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_interval_mixed_closed(self, data_constructor):
# GH 23563: mixed closed results in object dtype (not interval dtype)
data = [pd.Interval(0, 1, closed="both"), pd.Interval(0, 2, closed="neither")]
result = Series(data_constructor(data))
assert result.dtype == object
assert result.tolist() == data
def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
s = Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
result = Series(s, dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.dt.tz_convert("UTC"), dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.values, dtype=s.dtype)
tm.assert_series_equal(result, s)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_period(self, data_constructor):
data = [pd.Period("2000", "D"), pd.Period("2001", "D"), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(period_array(data))
tm.assert_series_equal(result, expected)
assert result.dtype == "Period[D]"
def test_constructor_period_incompatible_frequency(self):
data = [pd.Period("2000", "D"), pd.Period("2001", "A")]
result = pd.Series(data)
assert result.dtype == object
assert result.tolist() == data
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range("20130101", periods=5, freq="D")
s = Series(pi)
assert s.dtype == "Period[D]"
expected = Series(pi.astype(object))
tm.assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d, index=["b", "c", "d", "a"])
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
pidx = | tm.makePeriodIndex(100) | pandas._testing.makePeriodIndex |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 13 22:37:36 2020
@author: arti
"""
import pandas as pd
df = pd.read_csv('./titanic.csv')
print(df.head())
print('--')
| pd.set_option('display.max_columns', 15) | pandas.set_option |
import numpy as np
import pandas as pd
import os
import time
import shutil
from numpy import array
from numpy import argmax
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
import indoor_location.getWordVector as getWordVector
import indoor_location.globalConfig as globalConfig
from sklearn.utils import shuffle
from indoor_location import utils
# 样品数据集目标路径,可以设置为训练集或测试集
# sampleDataSetFilePath = '.\\sample_train.csv'
sampleDataSetFilePath = globalConfig.sample_dataset_file_path
sample_count = 0 # 数据量统计
initVal = -128 # 维度数据初始化
timeInterval = globalConfig.timeInterval # 取样时间间隔
valid_ibeacon_end_index = 0 # 作为可用维度的mac地址在ibeacon统计中的下标
WholeTimeInterval = 1000 * 360 # 暂时未用到
# offset = 0 # TODO 含义?
# stableCount = globalConfig.stableCount # 维度设置,暂时改为直接确定选取的mac数目
ibeacon_column_tags = ["scan_index", "count", "mac", "uuid", ] # ibeacon列标签
ibeacon_dataset = pd.DataFrame(columns=ibeacon_column_tags)
out_file_created = os.path.exists(sampleDataSetFilePath) # 标记目标文件是否已创建
valid_ibeacon_file = globalConfig.valid_ibeacon_file
train_dataset_file_path = globalConfig.train_dataset_file_path
valid_dataset_file_path = globalConfig.valid_dataset_file_path
test_dataset_file_path = globalConfig.test_dataset_file_path
def load_all_txt2csv(txt_rootdir, csv_rootdir):
"""
:param txt_rootdir: string
txt文件目录
:param csv_rootdir: string
生成的csv保存目录
:return: nothing
txt需用类别目录区分数据,生成的csv路径若目录不存在将创建对应目录
转换的目标文件名会加上时间戳(转换日期)防止重名
"""
paths = os.listdir(txt_rootdir) # 列出文件夹下所有的目录
for classDir in paths:
classPath = os.path.join(txt_rootdir, classDir) #每一个类别的基础路径
dist_class_path = os.path.join(csv_rootdir, classDir) # 每一个类别生成的csv的基础路径
if os.path.isdir(classPath): # 对每个文件夹作为一个类别生成对应的csv
files = os.listdir(classPath)
for txt in files:
# TODO 检查
rawPoints_file_path = os.path.join(classPath, txt)
txt2csv(rawPoints_file_path, dist_class_path)
def merge_dataset(set_file_path1,set_file_path2,merged_file_path):
df1 = pd.read_csv(set_file_path1)
df2 = pd.read_csv(set_file_path2)
df = pd.concat([df1, df2])
shuffle(df)
df.to_csv(merged_file_path)
def merge_all_csv(original_csv_rootdir, plus_csv_rootdir):
"""
将新表格加入总数据的方法
:param original_csv_rootdir: string
合并的目标目录
:param plus_csv_rootdir: string
合并入总数据的新数据源目录(两个目录均不包含类别目录)
:return: noting
注意目标路径需包含所有可能的类目录,若不存在将报错
"""
dirs = os.listdir(plus_csv_rootdir) # 列出文件夹下所有的目录与文件 reference_point_count
for class_dir in dirs:
class_base_dir = os.path.join(plus_csv_rootdir, class_dir)
dist_path = os.path.join(original_csv_rootdir, class_dir)
if os.path.isdir(class_base_dir):
csvs = os.listdir(class_base_dir) # 对所有类别目录下的文件进行拷贝
for csv in csvs:
csv_path = os.path.join(class_base_dir, csv)
dist_path = os.path.join(dist_path, csv) # 目标文件路径
shutil.copy(csv_path, dist_path) # 此处若目标路径不存在将报错
def update_all_ibeacon_dataset(csv_rootdir, ibeacon_file_path):
"""
更新ibeacon统计表的方法
:param csv_rootdir: string
csv源目录,使用路径下所有类别数据统计mac
:param ibeacon_file_path:
生成的ibeacon统计表,包含文件名,若原文件存在将被替换
:return: noting
"""
dirs = os.listdir(csv_rootdir) # 列出文件夹下所有的目录与文件
for csv_dir in dirs:
class_dir = os.path.join(csv_rootdir, csv_dir)
files = os.listdir(class_dir)
for file in files:
file_path = os.path.join(class_dir, file)
data = pd.read_csv(file_path, names=['time', 'uuid', 'mac', 'rssi'])
update_ibeacon_dataset(data)
ibeacon_dataset.sort_values("count", inplace=True, ascending=False)
ibeacon_dataset.to_csv(ibeacon_file_path, index=0) # 保存ibeacon_dataset到csv文件
ibeacon_csv = pd.read_csv(ibeacon_file_path)
print('ibeacon_csv:')
print(ibeacon_csv)
print(ibeacon_csv.dtypes)
def divide_sample_dataset(sample_dataset):
train_dataset = sample_dataset.sample(frac=0.8, random_state=0)
sub_dataset = sample_dataset.drop(train_dataset.index)
valid_dataset = sub_dataset.sample(frac=0.5, random_state=0)
test_dataset = sub_dataset.drop(valid_dataset.index)
train_dataset.to_csv(train_dataset_file_path, index=False, encoding='utf-8')
valid_dataset.to_csv(valid_dataset_file_path, index=False, encoding='utf-8')
test_dataset.to_csv(test_dataset_file_path, index=False, encoding='utf-8')
def load_dataset(dataset):
reference_tag = dataset.values[:, 0]
data_input = dataset.values[:, 5] #包括index=5
# data_input = data_input.reshape(data_input.shape[0],data_input.shape[1],1)
coordinates = dataset.values[:, 1:3] #包括index=1,不包括index=3
return data_input, coordinates, reference_tag
def load_data(data_file, divide_dataset_flag):
dataset = pd.read_csv(data_file)
shuffled_dataset = shuffle(dataset)
if divide_dataset_flag:
divide_sample_dataset(shuffled_dataset)
train_dataset = pd.read_csv(train_dataset_file_path)
valid_dataset = pd.read_csv(valid_dataset_file_path)
train_input, train_coordinates, train_reference_tag = load_dataset(train_dataset)
valid_input, valid_coordinates, valid_reference_tag = load_dataset(valid_dataset)
train_coordinates = train_coordinates.astype(float)
valid_coordinates = valid_coordinates.astype(float)
return train_input, train_coordinates, train_reference_tag, valid_input, valid_coordinates, valid_reference_tag
# generate sample dataset
def create_sample_dataset(pointCsvRootDir,
mac_rssi_word_flag = True,
onehot_mac_rssi_flag = False,
slice_and_average_flag = False):
"""
创建数据样本集的方法,通过ibeacon统计表筛选维度并
:param pointCsvRootDir: string
csv数据目录
:param ibeaconFilePath:
ibeacon统计表文件路径
:return:
"""
ibeacon_csv = pd.read_csv(globalConfig.valid_ibeacon_file)
if mac_rssi_word_flag:
## 建立word_id map
word2id_dict, id2word_dict = utils.gen_word_id_map_from_valid_ap(globalConfig.valid_ibeacon_file,
globalConfig.word_id_map_file_path)
## 设置样本集csv文件的列名
column_tags = config_column_tags()
## 生成样本集
load_all_csv_file2sample_set(pointCsvRootDir, column_tags,
word2id_dict=word2id_dict, id2word_dict=id2word_dict,
mac_rssi_word_flag=mac_rssi_word_flag,
onehot_mac_rssi_flag=onehot_mac_rssi_flag,
slice_and_average_flag=slice_and_average_flag)
## 把生成的样本集划分成:训练集、验证集、测试集
dataset = pd.read_csv(globalConfig.sample_dataset_file_path)
shuffled_dataset = shuffle(dataset)
divide_sample_dataset(shuffled_dataset)
def update_ibeacon_dataset(point_data):
"""
根据数据更新ibeacon表的方法
:param point_data: DataFrame or TextParser
:return: nothing
"""
global ibeacon_dataset
groupby_data = point_data.groupby(['mac']) # 按照id进行分类
for mac_value, group_data in groupby_data:
print("group_data: \n{}".format(group_data))
print("group_data.count(): \n{}".format(group_data.count()))
print("group_data['mac'].count()\n{}".format(group_data['mac'].count()))
macDF = ibeacon_dataset[ibeacon_dataset['mac'].isin([mac_value])]
mac_count = group_data['mac'].count()
if macDF.empty == True:
data = ({'count': mac_count, 'mac': mac_value, 'uuid': -1, },)
mac_dataDF = pd.DataFrame(data)
ibeacon_dataset = ibeacon_dataset.append(mac_dataDF, ignore_index=True)
else:
index = macDF.index.values[0]
ibeacon_dataset.loc[index:index, 'count'] = macDF['count'] + mac_count
def config_column_tags(valid_ibeacon_csv=None, onehotmac_rssi_flag=False, mac_rssi_word_flag=True):
"""
确定样本集维度标签的方法
:param ibeacon_csv: DataFrame or TextParser
ibeacon
:return: list<string>
"""
if mac_rssi_word_flag:
column_tags = ["reference_tag", "coordinate_x", "coordinate_y", "cluster_tag", "direction_tag",
"mac_rssi_sentence"]
elif onehotmac_rssi_flag:
column_tags = ["reference_tag", "coordinate_x", "coordinate_y", "cluster_tag", "direction_tag", "onehotmac_rssi_sentence"]
elif valid_ibeacon_csv:
column_tags = ["reference_tag", "coordinate_x", "coordinate_y", "cluster_tag", "direction_tag"]
for index, row in valid_ibeacon_csv.iterrows():
tag = row["mac"] # 这里改为直接使用mac地址作为维度标签以减少使用ibeacon统计表的次数
column_tags.append(tag)
return column_tags
def txt2csv(referenceRawPointFile, dist_dir):
"""
转换txt格式到csv格式文件
:param referenceRawPointFile: string
txt文件路径
:param dist_dir: string
目标文件目录
:return: nothing
生成的文件名为txt文件名加生成的时间戳
"""
txt_name = referenceRawPointFile.split("\\")[-1] # 获取不带路径的txt文件名
rawPoints_file_name = txt_name.split('.')[0] # 不带格式后缀的文件名
newPoints_file_name = rawPoints_file_name + time.strftime("_%Y_%m_%d", time.localtime()) + ".csv" # 加上转换的时间戳
dist_file_path = '\\'.join([dist_dir, newPoints_file_name])
txt = pd.read_table(referenceRawPointFile, header=None, sep=',', names=['time', 'uuid', 'mac', 'rssi'])
# txtDF = pd.DataFrame(data=txtDF, columns=['time', 'uuid', 'mac', 'rssi'])
if not os.path.exists(dist_dir): # 目标目录不存在时创建目录
os.makedirs(dist_dir)
txt.to_csv(dist_file_path, index=False, encoding='utf-8')
def deprecated_slice_and_average(referencePoint_csv, reference_tag, cluster_tag, direction_tag,
coordinate_x, coordinate_y, column_tags, samples_dataset):
"""
将数据按设定的时间片分割并求平均的方法
:param referencePoint_csv: DataFrame
读取的待处理数据
:param ibeacon_csv: DataFrame
ibeacon统计表数据
:param reference_tag: string
类型标签
:param direction_tag: string
方向标签
:param column_tags: list<string>
维度标签
:return: number
返回生成的数据条数
"""
i = 0
j = 0
# 访问csv文件的每一行的数据,每一个循环里处理 1s 的数据,
# 每轮循环结束更新 i 的值时,注意要让它指向下一秒的数据起点
rownum = referencePoint_csv.shape[0] # 取数据行数
tag_map = {}
for idx in range(len(column_tags)): # 使用dict提高查找下标的速度
tag_map[column_tags[idx]] = idx
while i < rownum:
# if referencePoint_csv['time'][i] > referencePoint_csv['time'][0] + WholeTimeInterval / 2:
# break
macs_rssi = [initVal for i in range(len(column_tags))] # 用固定值(-128)初始化列表
time_col = referencePoint_csv['time']
while j < rownum and time_col[j] < time_col[i] + timeInterval:
j += 1
if j >= rownum: # 移除文件末尾不足一秒的数据
break
# 同一 mac 地址出现多次,则对多次的 rssi 求平均
groupby_data = referencePoint_csv.iloc[i: j].groupby(['mac']) # 按照mac进行分类
for mac_value, group_data in groupby_data:
tag_idx = tag_map.get(mac_value, -1)
if tag_idx > -1:
rrsi_mean = group_data['rssi'].mean()
macs_rssi[tag_idx] = rrsi_mean
#### 参考点标签 和 采样方向标签 赋值给 macs_rssi[index] #####
# column_tags 前5列分别是:"reference_tag", "coordinate_x", "coordinate_y", "cluster_tag", "direction_tag"
macs_rssi[0] = reference_tag
macs_rssi[1] , macs_rssi[2] = coordinate_x,coordinate_y
macs_rssi[3] = cluster_tag
macs_rssi[4] = direction_tag
macs_rssi = np.array(macs_rssi).reshape(1, len(macs_rssi))
macs_rssiDF = pd.DataFrame(data=macs_rssi, columns=column_tags)
#### 将这 1s 的样本加入到样本集中 ####
samples_dataset = samples_dataset.append(macs_rssiDF, ignore_index=True)
i = j # 更新 i 的值,让它指向下一秒的数据起点
return samples_dataset
def slice_and_average(referencePoint_csv, column_tags, reference_tag,
cluster_tag, direction_tag, coordinate_x, coordinate_y):
"""
将数据按设定的时间片分割并求平均的方法
:param referencePoint_csv: DataFrame
读取的待处理数据
:param ibeacon_csv: DataFrame
ibeacon统计表数据
:param reference_tag: string
类型标签
:param direction_tag: string
方向标签
:param column_tags: list<string>
维度标签
:return: number
返回生成的数据条数
"""
i = 0
j = 0
# 访问csv文件的每一行的数据,每一个循环里处理 1s 的数据,
# 每轮循环结束更新 i 的值时,注意要让它指向下一秒的数据起点
rownum = referencePoint_csv.shape[0] # 取数据行数
all_samples = []
tag_map = {}
for idx in range(len(column_tags)): # 使用dict提高查找下标的速度
tag_map[column_tags[idx]] = idx
while i < rownum:
one_sample = [initVal for i in range(len(column_tags))] # 用固定值(-128)初始化列表
time_col = referencePoint_csv['time']
while j < rownum and time_col[j] < time_col[i] + timeInterval:
j += 1
if j >= rownum: # 移除文件末尾不足一秒的数据
break
# 同一 mac 地址出现多次,则对多次的 rssi 求平均
groupby_data = referencePoint_csv.iloc[i: j].groupby(['mac']) # 按照mac进行分类
for mac_value, group_data in groupby_data:
tag_idx = tag_map.get(mac_value, -1)
if tag_idx > -1:
rrsi_mean = group_data['rssi'].mean()
one_sample[tag_idx] = rrsi_mean
#### 参考点标签 和 采样方向标签 赋值给 macs_rssi[index] #####
# column_tags 前5列分别是:"reference_tag", "coordinate_x", "coordinate_y", "cluster_tag", "direction_tag"
one_sample[0] = reference_tag
one_sample[1] , one_sample[2] = coordinate_x,coordinate_y
one_sample[3] = cluster_tag
one_sample[4] = direction_tag
#### 将这 1s 的样本加入到样本集中 ####
all_samples.append(one_sample)
i = j # 更新 i 的值,让它指向下一秒的数据起点
samples_dataset = pd.DataFrame(all_samples, columns=column_tags)
return samples_dataset
def config_coordinate(reference_tag):
reference_point_csv = pd.read_csv(globalConfig.reference_points_coordinates_file)
df = reference_point_csv[reference_point_csv['reference_tag'].isin([reference_tag, ])]
x = df['coordinate_x'].values # 返回的x 和 y 都分别是一个list
y = df['coordinate_y'].values
return x[0], y[0] # 所以后面用到它们的值要返回 x[0] y[0]
def csv2sample_data_mac_rssi_word(referencePoint_csv, reference_tag, coordinate_x, coordinate_y,
cluster_tag, direction_tag, column_tags, timeInterval,
word2id_dict, id2word_dict):
all_samples = []
one_sample = [] # 记录每一个样本包括一些标签数据和多个mac的信号强度
one_sample_mac_rssi = [] # 记录每一个样本中的多个 mac_rssi 的组合索引
valid_mac_list = pd.read_csv(globalConfig.valid_ibeacon_file)['mac'].values.tolist()
# 访问csv文件的每一行的数据,每一个循环里处理 1s 的数据,
# 每轮循环结束更新 i 的值时,注意要让它指向下一秒的数据起点
begin_time = referencePoint_csv.iloc[0][0] # 分段的开始时间
for row in referencePoint_csv.itertuples():
######### 数据超出一个样本的时间,就保存一个样本到样本集 #########
if row.time > begin_time + timeInterval and len(one_sample_mac_rssi) >= 10: #控制一个样本中有效的ap包数大于10
one_sample.append(reference_tag)
one_sample.append(coordinate_x)
one_sample.append(coordinate_y)
one_sample.append(cluster_tag)
one_sample.append(direction_tag)
one_sample.append(one_sample_mac_rssi)
all_samples.append(one_sample) # 将一个样本加入样本集
one_sample = []
one_sample_mac_rssi = []
begin_time = row.time # 重置时间段的开始值
###### 在一个样本的时间内,更新记录 one_sample_mac_rssi的数据 #######
if row.mac in valid_mac_list:
word = row.mac + '_' + str(row.rssi) # mac_value是mac和该mac对应的信号强度value的组合, mac_value表示一个word
# id = word2id_dict[word]
id = word2id_dict.get(word, word2id_dict['[UNK]'])
one_sample_mac_rssi.append(id)
samples_dataset = pd.DataFrame(all_samples, columns=column_tags)
return samples_dataset
def csv2sample_data_onehot_mac_rssi(referencePoint_csv, reference_tag, coordinate_x, coordinate_y,
cluster_tag, direction_tag, column_tags, timeInterval
):
all_samples = []
one_sample = [] # 记录每一个样本包括一些标签数据和多个mac的信号强度
one_sample_mac_rssi = [] # 记录每一个样本中的多个mac的信号强度
# 访问csv文件的每一行的数据,每一个循环里处理 1s 的数据,
# 每轮循环结束更新 i 的值时,注意要让它指向下一秒的数据起点
begin_time = referencePoint_csv.iloc[0][0] # 分段的开始时间
for row in referencePoint_csv.itertuples():
######### 数据超出一个样本的时间,就保存一个样本到样本集 #########
if row.time > begin_time + timeInterval:
one_sample.append(reference_tag)
one_sample.append(coordinate_x)
one_sample.append(coordinate_y)
one_sample.append(cluster_tag)
one_sample.append(direction_tag)
one_sample.append(one_sample_mac_rssi)
all_samples.append(one_sample) # 将一个样本加入样本集
one_sample = []
one_sample_mac_rssi = []
begin_time = row.time # 重置时间段的开始值
###### 在一个样本的时间内,更新记录 one_sample_mac_rssi的数据 #######
onehot_mac = getWordVector.get_onehot(row.mac)
if onehot_mac is None:
continue
else:
word = row.mac + '_' + str(row.rssi) # mac_value是mac和该mac对应的信号强度value的组合, mac_value表示一个word
id = getWordVector.word2id_map[word]
one_sample_mac_rssi.append(id)
samples_dataset = | pd.DataFrame(all_samples, columns=column_tags) | pandas.DataFrame |
#%%
import os
from typing import Dict
from pandas.core.frame import DataFrame
import pandas
import seaborn as sns
import matplotlib.pyplot as plt
#%%
outDir = "results"
chunkDir = "chunk_data"
miningDir = "mining_data"
def chunk_data():
files = []
container: Dict[str, DataFrame] = {}
for filename in os.listdir(chunkDir):
with open('chunk_data/' + filename) as file:
data = pandas.read_csv(file)
files.append(data)
print("Files added")
combined = pandas.concat(files)
print("Files combined")
for col in combined.columns[3:]:
container[col] = DataFrame(columns=['y', 'avgBlocksPerChunk'])
print("Columns added")
for y in range(-64, 320):
y_df = combined[combined['y'] == y]
for col in y_df.columns[3:]:
container[col] = container[col].append({'y': y, 'avgBlocksPerChunk': y_df.mean()[col]}, ignore_index=True)
print("Range looped")
air = container["air"];
air.to_csv("results/chunks_air_full_range.csv", index=False)
for subset in container:
blockData = container[subset]
blockData[blockData['y'] <= 65].to_csv("results/" + subset + "_chunks.csv", index=False)
def chunk_graphs():
sns.set_theme()
for filename in os.listdir(outDir):
if filename.count("chunks") > 0: # Only want the chunk data for this
plt.figure(figsize=(35, 10))
dataset: DataFrame = pandas.read_csv('results/' + filename)
figure = sns.lineplot(data=dataset, x="y", y="avgBlocksPerChunk")
if filename.count("full") > 0: # The full range needs slight changes
figure.set_xlim([-64, 320])
figure.set_ylim([0, 260])
figure.set_title("Air Blocks Full Range")
plt.savefig("graphical_results/chunks_air_full.png")
else:
figure.set_xlim([-64, 65])
figure.set_title(filename.split("_")[0])
plt.savefig("graphical_results/chunks_" + filename.split("_")[0] + ".png")
def techniqueData():
t1 = []
t2 = []
container1: Dict[str, DataFrame] = {}
container2: Dict[str, DataFrame] = {}
for filename in os.listdir(miningDir):
with open('mining_data/' + filename) as file:
data = pandas.read_csv(file)
if filename.count("branch") > 0:
t1.append(data)
else:
t2.append(data)
print("Files added")
combined1 = pandas.concat(t1)
combined2 = | pandas.concat(t2) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # Predicting Student Academic Performance
# ## an exploration in data visualiation and machine learning efffectiveness
# #### The goal of this project was to examine a number of ML algorithms that were capable of adjusting to categorical data and attempt to predict student performance. Some parts about our problem that make it unique are: There are 3 classes and most of our data is categorical data and not purely quantitative. Our goal with this was to perform some initial data visualzation and to determine which classifier handles this data the best.
# ##### Our project used the Kaggle.com dataset found [here](https://www.kaggle.com/aljarah/xAPI-Edu-Data).
# ## Reading in the data
# In[1]:
import pandas as pd # a wonderful dataframe to work with
import numpy as np # adding a number of mathematical and science functions
import seaborn as sns # a very easy to use statistical data visualization package
import matplotlib.pyplot as plt # a required plotting tool
import warnings
# sklearn is a big source of pre-written and mostly optimized ML algorithms.
# Here we use their Decision trees, Support Vector Machines, and the classic Perceptron.
from sklearn import preprocessing, svm
from sklearn.linear_model import Perceptron
from sklearn.tree import DecisionTreeClassifier
#ignore warnings
warnings.filterwarnings('ignore')
data = pd.read_csv("../../../input/aljarah_xAPI-Edu-Data/xAPI-Edu-Data.csv")
data.head()
# In[2]:
data.tail()
# ## Data Fields
# <table>
# <tr>
# <th>Data Field</th>
# <th>Description</th>
# </tr>
# <tr>
# <th>gender</th>
# <td>The student's gender.</td>
# </tr>
# <tr>
# <th>NationalITy</th>
# <td>The student's nationality.</td>
# </tr>
# <tr>
# <th>PlaceofBirth</th>
# <td>The student's country of birth.</td>
# </tr>
# <tr>
# <th>StageID</th>
# <td>Educational level student belongs to (Elementary, Middle, or High School).</td>
# </tr>
# <tr>
# <th>GradeID</th>
# <td>The grade year of the student.</td>
# </tr>
# <tr>
# <th>SectionID</th>
# <td>The classroom the student is in.</td>
# </tr>
# <tr>
# <th>Topic</th>
# <td>The topic of the course.</td>
# </tr>
# <tr>
# <th>Semester</th>
# <td>The semester of the school year. (F for Fall, S for Spring)</td>
# </tr>
# <tr>
# <th>Relation</th>
# <td>The parent responsible for student.</td>
# </tr>
# <tr>
# <th>raisedhands</th>
# <td>How many times the student raises his/her hand on classroom</td>
# </tr>
# <tr>
# <th>VisITedResources</th>
# <td>How many times the student visits a course content</td>
# </tr>
# <tr>
# <th>AnnouncementsView</th>
# <td>How many times the student checks the new announcements</td>
# </tr>
# <tr>
# <th>Discussion</th>
# <td>How many times the student participate on discussion groups</td>
# </tr>
# <tr>
# <th>ParentAnsweringSurvey</th>
# <td>Parent answered the surveys which are provided from school or not</td>
# </tr>
# <tr>
# <th>ParentschoolSatisfaction</th>
# <td>Whether or not the parents were satisfied. "Good" or "Bad". Oddly this was not null for parents who did not answer the survey. It is unclear how this value was filled in.</td>
# </tr>
# <tr>
# <th>StudentAbsenceDays</th>
# <td>Whether or not a student was absent for more than 7 days</td>
# </tr>
# <tr>
# <th>Class</th>
# <th>Our classification field. 'L' is for students who got a failing percentage (Less than 69%), 'M' for students who got a low passing grade (Between 70% and 89%), and 'H' for students who achieved high marks in their course (90% to 100%)</th>
# </tr>
# </table>
#
# ## Preliminary Data Visuialization
# #### Our goal with our data visuialization is to get an idea of the shape of our data and to see if we can easily identify any possible outliers. Because this is primarily categorical data we look mostly at countplots of the datafields and our classes. We also look to see if any of our data is unclear or redundant.
# In[3]:
ax = sns.countplot(x='Class', data=data, order=['L', 'M', 'H'])
for p in ax.patches:
ax.annotate('{:.2f}%'.format((p.get_height() * 100) / len(data)), (p.get_x() + 0.24, p.get_height() + 2))
print()
# In[4]:
fig, axarr = plt.subplots(2,figsize=(10,10))
sns.countplot(x='gender', data=data, order=['M','F'], ax=axarr[0])
sns.countplot(x='gender', hue='Class', data=data, order=['M', 'F'],hue_order = ['L', 'M', 'H'], ax=axarr[1])
print()
# In[5]:
fig, axarr = plt.subplots(2,figsize=(10,10))
sns.countplot(x='NationalITy', data=data, ax=axarr[0])
sns.countplot(x='NationalITy', hue='Class', data=data,hue_order = ['L', 'M', 'H'], ax=axarr[1])
print()
# In[6]:
fig, axarr = plt.subplots(2,figsize=(10,10))
sns.countplot(x='PlaceofBirth', data=data, ax=axarr[0])
sns.countplot(x='PlaceofBirth', hue='Class', data=data,hue_order = ['L', 'M', 'H'], ax=axarr[1])
print()
# In[7]:
fig, axarr = plt.subplots(2,figsize=(10,10))
sns.countplot(x='StageID', data=data, ax=axarr[0])
sns.countplot(x='StageID', hue='Class', data=data, hue_order = ['L', 'M', 'H'], ax=axarr[1])
print()
# In[8]:
fig, axarr = plt.subplots(2,figsize=(10,10))
sns.countplot(x='GradeID', data=data, order=['G-02', 'G-04', 'G-05', 'G-06', 'G-07', 'G-08', 'G-09', 'G-10', 'G-11', 'G-12'], ax=axarr[0])
sns.countplot(x='GradeID', hue='Class', data=data, order=['G-02', 'G-04', 'G-05', 'G-06', 'G-07', 'G-08', 'G-09', 'G-10', 'G-11', 'G-12'], hue_order = ['L', 'M', 'H'], ax=axarr[1])
print()
# #### Looking at these results, Grades 5, 9, and 10 have very few counts. In addition to that, no 5th grade students pass and no 9th grade students achieve high marks. Perhaps these are outliers?
# In[9]:
#Students in Grade 5
data.loc[data['GradeID'] == 'G-05']
# In[10]:
#Students in Grade 9
data.loc[data['GradeID'] == 'G-09']
# #### After looking at the rows themselves, The grade 5 students appear to have similar data to all other students who did not pass (missed more than 7 days, low numerical values, no school survey, etc.)
# #### And again, after examining the data for the grade 9 students it also looks like what we would likely come to expect for each category.
# #### We will look a bit further at these later.
# In[11]:
fig, axarr = plt.subplots(2,figsize=(10,10))
sns.countplot(x='SectionID', data=data, order=['A', 'B', 'C'], ax = axarr[0])
sns.countplot(x='SectionID', hue='Class', data=data, order=['A', 'B', 'C'],hue_order = ['L', 'M', 'H'], ax = axarr[1])
print()
# In[12]:
fig, axarr = plt.subplots(2,figsize=(10,10))
sns.countplot(x='Topic', data=data, ax = axarr[0])
sns.countplot(x='Topic', hue='Class', data=data,hue_order = ['L', 'M', 'H'], ax = axarr[1])
print()
# #### An interesting thing to note is that no Geology students fail. We will look into this in a second.
# In[13]:
fig, axarr = plt.subplots(2,figsize=(10,10))
sns.countplot(x='Semester', data=data, ax = axarr[0])
sns.countplot(x='Semester', hue='Class', data=data,hue_order = ['L', 'M', 'H'], ax = axarr[1])
print()
# In[14]:
fig, axarr = plt.subplots(2,figsize=(10,10))
sns.countplot(x='Relation', data=data, ax = axarr[0])
sns.countplot(x='Relation', hue='Class', data=data,hue_order = ['L', 'M', 'H'], ax = axarr[1])
print()
# #### Just looking at this there seems to be a correlation betwen students who have mothers as their primary caregiver and students who are less likely to fail.
#
# ### Next, we take a look at our measurable data. The recorded number of times a student: Raised their hand, Visited the course's resources, Viewed the online course's Anouncement's page, and Visited the Discussion pages. For easier visual comparison, we plot these together:
# In[15]:
print()
print()
# In[16]:
data.groupby('Topic').median()
# #### Here we can see part of the likely reason why the all of the geology students pass. They have far higher median numerical values than most other courses.
# In[17]:
data.groupby('GradeID').median()
# #### Here, looking at the median data again we can see part of the likely reason why the 5th and 9th grade students performed as they did as well.
# In[18]:
fig, axarr = plt.subplots(2,figsize=(10,10))
sns.countplot(x='ParentAnsweringSurvey', data=data, order=['Yes', 'No'], ax = axarr[0])
sns.countplot(x='ParentAnsweringSurvey', hue='Class', data=data, order=['Yes', 'No'], hue_order = ['L', 'M', 'H'], ax = axarr[1])
print()
# #### Looking at this graph brings a number of questions regarding the causation of this to mind. Were the paents more likely to answer the survey because their student did well, or did the students performance influence the responses? Unfortunately, like many times, this is one of the questions that arises while looking at data visualizations that we just don't have access to the answer with the data.
# In[19]:
fig, axarr = plt.subplots(2,figsize=(10,10))
sns.countplot(x='ParentschoolSatisfaction', data=data, order=['Good', 'Bad'], ax = axarr[0])
sns.countplot(x='ParentschoolSatisfaction', hue='Class', data=data, order=['Good', 'Bad'],hue_order = ['L', 'M', 'H'], ax = axarr[1])
print()
# #### The same kind of causation questions arise when looking at the result of the parent's satisfaction with the school.
# In[20]:
fig, axarr = plt.subplots(2,figsize=(10,10))
sns.countplot(x='StudentAbsenceDays', data=data, order=['Under-7', 'Above-7'], ax = axarr[0])
sns.countplot(x='StudentAbsenceDays', hue='Class', data=data, order=['Under-7', 'Above-7'],hue_order = ['L', 'M', 'H'], ax = axarr[1])
print()
# #### StudentAbsenceDays seems to have a strong correlation with our Class variable. Very few students who missed more than 7 days managed to achieve high marks and very few students who missed less than 7 days failed their course.
#
# ## Preprocessing the Data
# #### Our goal with prerocessing is to change our numerical fields that have a value like GradeID to a numerical only value in a way that we preserve that distance in a meningful way. Additionally, we want to assign our three classes to numerical outcomes with a preserved distance. There are a couple of ways to do this. We went with setting L = -1, M = 0, and H = 1. Additionally, you could set each to the middle value of their category on the 100% scale (L = 34.5, M = 79.5, and H = 95). We chose to preserve the distance between the categorical values. Additionally, we decided to scale our numerical fields so that they would be more meaningful when compared together. For this we used scikit learn's built in pre-processing scaling ability.
# In[21]:
# Translate GradeID from categorical to numerical
gradeID_dict = {"G-01" : 1, "G-02" : 2, "G-03" : 3, "G-04" : 4, "G-05" : 5, "G-06" : 6, "G-07" : 7, "G-08" : 8, "G-09" : 9, "G-10" : 10, "G-11" : 11, "G-12" : 12}
data = data.replace({"GradeID" : gradeID_dict})
class_dict = {"L" : -1, "M" : 0, "H" : 1}
data = data.replace({"Class" : class_dict})
# Scale numerical fields
data["GradeID"] = preprocessing.scale(data["GradeID"])
data["raisedhands"] = preprocessing.scale(data["raisedhands"])
data["VisITedResources"] = preprocessing.scale(data["VisITedResources"])
data["AnnouncementsView"] = preprocessing.scale(data["AnnouncementsView"])
data["Discussion"] = preprocessing.scale(data["Discussion"])
# Use dummy variables for categorical fields
data = | pd.get_dummies(data, columns=["gender", "NationalITy", "PlaceofBirth", "SectionID", "StageID", "Topic", "Semester", "Relation", "ParentAnsweringSurvey", "ParentschoolSatisfaction", "StudentAbsenceDays"]) | pandas.get_dummies |
import numpy as np
from matplotlib import pyplot as plt
import time
import emcee
import corner
import seaborn as sns
import pandas as pd
from IPython.display import display, Math
import arviz as az
from scipy.stats import scoreatpercentile
#b_w=0.25 #CC+H0 Mejor sin smoothear!
#b_w=0.005 #Nuisance Mejor sin smoothear!
#b_w=0.12 #CC+SN
#b_w=0.15 #CC+SN+BAO
class Graficador:
'''
Esta clase genera un objeto "Graficador" que toma el objeto sampler
de las cadenas generadas por el método MonteCarlo, las etiquetas
de cada cadena y el título del análisis.
Falta: poder agregar $$ al principio y al final
de cada item de la lista de labels (son necesarios
para los graficos pero no para reportar_intervalos)
'''
def __init__(self,sampler,labels,title):
self.sampler=sampler
self.labels=labels
self.title=title
def graficar_cadenas(self, num_chains = None):
'''Esta función grafica las cadenas en función del largo
de las mismas para cada parámetro.'''
samples = self.sampler.get_chain()
len_chain,nwalkers,ndim=self.sampler.get_chain().shape
sns.set(style='darkgrid', palette="muted", color_codes=True)
sns.set_context("paper", font_scale=1.5, rc={"font.size":10,"axes.labelsize":17})
fig, axes = plt.subplots(ndim, figsize=(10, 7), sharex=True)
for i in range(ndim):
ax = axes[i]
if num_chains != None:
ax.plot(samples[:, 0:num_chains, i], alpha=0.3)
else: #Grafico todas las cadenas
ax.plot(samples[:, :, i], alpha=0.3)
ax.set_xlim(0, len(samples))
ax.set_ylabel(self.labels[i])
ax.yaxis.set_label_coords(-0.1, 0.5)
axes[-1].set_xlabel("Número de pasos N");
if not self.title==None:
fig.suptitle(self.title);
def graficar_cadenas_derivs(self):
'''Esta función grafica las cadenas en función del largo
de las mismas para cada parámetro.'''
if isinstance(self.sampler, np.ndarray)==True: #Es una cadenas procesada
samples = self.sampler
len_chain,ndim=samples.shape
sns.set(style='darkgrid', palette="muted", color_codes=True)
sns.set_context("paper", font_scale=1.5, rc={"font.size":10,"axes.labelsize":17})
fig, axes = plt.subplots(ndim, figsize=(10, 7), sharex=True)
for i in range(ndim):
ax = axes[i]
ax.plot(samples[:, i], alpha=0.3)
ax.set_xlim(0, len(samples))
ax.set_ylabel(self.labels[i])
ax.yaxis.set_label_coords(-0.1, 0.5)
axes[-1].set_xlabel("Número de pasos N");
if not self.title==None:
fig.suptitle(self.title);
def graficar_contornos(self, discard,
thin, poster=False,
color='b', nuisance_only=False):
'''
Grafica los cornerplots para los parámetros a partir de las cadenas
de Markov. En la diagonal aparecen las distribuciones de probabilidad
proyectadas para cada parámetro, y fuera de la diagonal los contornos
de confianza 2D.
Poster: If True, hace los graficos sin relleno y solo plotea los
contornos a 98% CL.
If False, Realiza los contornos de confianza utilizando la
libreria corner, que es mas rapido pero es más feo.
'''
if isinstance(self.sampler, np.ndarray)==True: #Es una cadenas procesada
flat_samples = self.sampler
else:
flat_samples = self.sampler.get_chain(discard=discard, flat=True, thin=thin)
params_truths = np.zeros(len(flat_samples[0,:]))
for i in range(len(params_truths)):
params_truths[i] = np.mean(flat_samples[:,i])
if nuisance_only==True:
flat_samples=flat_samples[:,3:] #Solo para el grafico de nuisance only!
if poster==True:
viz_dict = {
#'axes.titlesize':5,
#'font.size':36,
'axes.labelsize':26,
'xtick.labelsize':15,
'ytick.labelsize':15,
}
df = | pd.DataFrame(flat_samples,columns=self.labels) | pandas.DataFrame |
import pandas as pd
from collections import deque, namedtuple
class PositionSummary(object):
"""
Takes the trade history for a user's watchlist from the database and it's
ticker. Then applies the FIFO accounting methodology to calculate the
overall positions status i.e. final open lots, average cost and a breakdown
of the open lots.
This is a queue data structure.
"""
def __init__(self, trade_history):
self.trade_history = trade_history
self.average_cost = None
self.open_lots = None
self.ticker = self.set_ticker()
self.buy_quantities = deque([])
self.buy_prices = deque([])
self.buy_dates = deque([])
self.sell_quantities = deque([])
self.sell_prices = deque([])
self.sell_dates = deque([])
self.open_direction = None
self.breakdown = []
self.net_position = 0
self._apply_fifo()
def __repr__(self):
return (f"<Ticker: {self.ticker}, Quantity: {self.net_position}>")
def set_ticker(self):
tickers = set([i[0] for i in self.trade_history])
if len(tickers) == 1:
return self.trade_history[0][0]
else:
raise ValueError("The Trade History for this security contains multiple tickers")
def total_open_lots(self):
""" returns the sum of the positions open lots"""
if self.open_direction == "long":
return sum(self.buy_quantities)
elif self.open_direction == "short":
return sum(self.sell_quantities)
else:
return None
def total_market_value(self):
"""Returns the position's market value"""
total = None
if self.buy_quantities and self.open_direction == "long":
zipped = zip(self.buy_quantities, self.buy_prices)
total = (quantity*price for quantity, price in zipped)
elif self.sell_quantities and self.open_direction == "short":
zipped = zip(self.sell_quantities, self.sell_prices)
total = (quantity*price for quantity, price in zipped)
return sum(total) if total is not None else None
def get_average_cost(self):
"""Returns the weighted average cost of the positions open lots."""
open_lots = self.total_open_lots()
if open_lots == 0 or not open_lots:
return 0
return abs(self.total_market_value()/self.total_open_lots())
def remove_trade(self, direction):
if direction == "buy":
popped_quantity = self.buy_quantities.popleft()
self.buy_prices.popleft()
self.buy_dates.popleft()
elif direction == "sell":
popped_quantity = self.sell_quantities.popleft()
self.sell_prices.popleft()
self.sell_dates.popleft()
else:
raise NameError("why did this happen")
return popped_quantity
def _collapse_trade(self):
if self.sell_quantities:
if self.sell_quantities[0] >= 0:
self.remove_trade("sell")
if self.buy_quantities:
if self.buy_quantities[0] <= 0:
self.remove_trade("buy")
def get_summary(self):
"""
Returns a named tuple of the ticker, net position and the average
price of the opens lots
"""
Summary = namedtuple("Summary",
["ticker", "quantity", "average_price"])
ticker = self.ticker
quantity = self.net_position
average_price = round(self.average_cost, 4)
return Summary(ticker, quantity, average_price)
def add(self, side, units, price, date):
if side == "buy":
self.buy_quantities.append(units)
self.buy_prices.append(price)
self.buy_dates.append(date)
elif side == "sell":
self.sell_quantities.append(units)
self.sell_prices.append(price)
self.sell_dates.append(date)
def _set_direction(self):
"""
Checks if there has been a reversal in the users overall
trade direction and sets that direction accordingly.
"""
if self.open_direction == "short" and self.net_position > 0:
self.open_direction = "long"
elif self.open_direction == "long" and self.net_position < 0:
self.open_direction = "short"
def set_initial_trade(self):
units = self.trade_history[0].quantity
price = self.trade_history[0].price
date = self.trade_history[0].date
if units >= 0:
self.open_direction = "long"
self.add("buy", units, price, date)
else:
self.open_direction = "short"
self.add("sell", units, price, date)
self.average_cost = self.get_average_cost()
self.net_position = self.total_open_lots()
self.breakdown.append([date, self.net_position, self.average_cost])
def _apply_fifo(self):
"""
This algorithm iterate over the trade history. It sets the
initial trade direction to get the initial open lots and then increases
or closes lots based on each trade.
In the event that a position was initally long then becomes short or
vice versa the open lots will be increased or closed accordingly.
"""
if self.trade_history:
self.set_initial_trade()
else:
return []
trades = len(self.trade_history)
c1 = 1 # counter
while c1 < trades:
units = self.trade_history[c1].quantity
price = self.trade_history[c1].price
date = self.trade_history[c1].date
if units*self.net_position > 0: # if true both trades have the same sign
if self.open_direction == "long":
self.add("buy", units, price, date)
else:
self.add("sell", units, price, date)
elif units*self.net_position == 0: # position is flat
if units >= 0:
self.open_direction = "long"
self.add("buy", units, price, date)
else:
self.open_direction = "short"
self.add("sell", units, price, date)
else: # both trades are in different directions
if self.open_direction == "long":
self.add("sell", units, price, date)
# while the lots are not empty
while self.sell_quantities and self.buy_quantities:
if abs(self.sell_quantities[0]) >= self.buy_quantities[0]:
self.sell_quantities[0] += self.buy_quantities[0]
self.remove_trade("buy")
else:
temp = self.remove_trade("sell")
self.buy_quantities[0] += temp
self.net_position += units # subtract units from net position
else: # self.open_direction == "short"
self.add("buy", units, price, date)
while self.sell_quantities and self.buy_quantities:
if self.buy_quantities[0] >= abs(self.sell_quantities[0]):
self.buy_quantities[0] += self.sell_quantities[0]
self.remove_trade("sell")
else:
temp = self.remove_trade("buy")
self.sell_quantities[0] += temp
self.net_position += units
self._collapse_trade()
self._set_direction()
self.average_cost = round(self.get_average_cost(), 4)
self.net_position = self.total_open_lots()
self.breakdown.append([date, self.net_position, self.average_cost])
c1 += 1
class PositionAccounting(PositionSummary):
"""
Inherits from the Position Summary and applies accounting methods
to a Position
"""
def __init__(self, close_prices, trade_history):
super().__init__(trade_history)
self.close_prices = close_prices # Daily market prices
def performance_table(self):
"""
Combines the position breakdown with the daily prices to calculate
daily unrealised P&L. The Daily unrealised P&L is the difference
between the postion's weighted average cost and the market
price.
"""
df = pd.DataFrame(self.close_prices, columns=["date", "price"])
df = df.set_index("date")
df["quantity"] = float("nan")
df["avg_cost"] = float("nan")
start_date = str(self.breakdown[0][0])
df2 = df.loc[start_date:]
df2 = df2.copy() # copied to prevent chained assignment
for row in self.breakdown:
df2.at[str(row[0]), "quantity"] = row[1]
df2.at[str(row[0]), "avg_cost"] = row[2]
df2["quantity"] = df2["quantity"].fillna(method="ffill")
df2["price"] = df2["price"].fillna(method="ffill")
df2["avg_cost"] = df2["avg_cost"].fillna(method="ffill")
df2["price"] = pd.to_numeric(df2["price"])
df2.loc[df2['quantity'] <= 0, 'Long/Short'] = -1
df2.loc[df2['quantity'] > 0, 'Long/Short'] = 1
df2["pct_change"] = (((df2["price"] - df2["avg_cost"])/df2["avg_cost"])*df2["Long/Short"])*100
df2["pct_change"] = round(df2["pct_change"], 3)
df2 = df2.reset_index()
df2 = df2[["date", "quantity", "avg_cost", "price", "pct_change"]]
df2 = list(df2.itertuples(index=False))
return df2
def daily_valuations(self):
"""
Combines the position breakdown with the daily prices to calculate
daily market value. The Daily market value is the positions quantity
multiplied by the market price.
"""
df = pd.DataFrame(self.close_prices, columns=["date", "price"])
df = df.set_index("date")
df["quantity"] = float("nan")
df["market_val"] = float("nan")
# the prices starting from the first date the security was held
start_date = str(self.breakdown[0][0])
df2 = df.loc[start_date:]
df2 = df2.copy() # copied to prevent chained assignment
# update the quantity at each date
for row in self.breakdown:
df2.at[str(row[0]), "quantity"] = row[1]
df2["price"] = df2["price"].fillna(method="ffill")
df2["quantity"] = df2["quantity"].fillna(method="ffill")
df2["price"] = pd.to_numeric(df2["price"])
df2["market_val"] = round((df2["price"] * df2["quantity"]), 3)
df2 = df2[["market_val"]]
new_name = f"market_val_{self.ticker}"
new_header = {"market_val": new_name}
df2 = df2.rename(columns=new_header)
return df2
class Portfolio_Summary(object):
"""
This is a collection of the Positions for the user accounts, priced as of
the latest market prices
"""
def __init__(self):
self.portfolio_breakdown = pd.DataFrame()
def add_position(self, close_prices, trade_history):
"""
Adds each positions daily market value to the portfolio breakdown.
"""
Position = PositionAccounting(close_prices, trade_history)
Position_valuation = Position.daily_valuations()
if self.portfolio_breakdown.empty:
self.portfolio_breakdown = Position_valuation
else:
self.portfolio_breakdown = self.portfolio_breakdown.join(Position_valuation)
self.portfolio_breakdown = self.portfolio_breakdown.fillna(method="ffill")
def net_valuations(self):
"""
returns the portfolios daily market value
"""
valuation = self.portfolio_breakdown.copy()
valuation["portfolio_val"] = valuation.sum(axis=1)
valuation = valuation[["portfolio_val"]]
return valuation
def convert_flows(self, flows):
"""
Using the Holding Period Return (HPR) methodology. Purchases of
securities are accounted as fund inflows and the sale of securities are
accounted as increases in cash.
By creating the cumulative sum of these values we can maintain an
accurate calculation of the HPR which can be distorted as purchases and
sells are added to the trades.
"""
df_flows = | pd.DataFrame(flows, columns=["date", "flows"]) | pandas.DataFrame |
"""
Prisma Inc.
database.py
Status: UNDER DEVELOPMENT for Major Update Ryzen
Made by <NAME>.
"""
import pandas as pd
import os
import requests
import progressbar
import gc
import pymongo
import gridfs
from pprint import pprint
import json
import certifi
from sneakers.api.low import builder as bd
from sneakers.api.low import threading as thr
import base64
import bson
from bson.binary import Binary
from bson.json_util import dumps, loads
ca = certifi.where()
#--------- MONGO DB IMPLEMENTATION -----------------
snkclient = pymongo.MongoClient("mongodb+srv://Prismadevops:tetas1@sneakercluster.iuykn.mongodb.net/stockdotshopdb?retryWrites=true&w=majority", tlsCAFile=ca)
snkdb=snkclient['stockdotshopdb']
snkcoll = snkdb['sneakers']
# Issue the serverStatus command and print the results
def core_connection_snk():
serverStatusResult = snkdb.command("serverStatus")
pprint(serverStatusResult)
print(snkdb)
return'8=======D'
def load_database_ryzen():
sneakerscur = list(snkcoll.find({}))
# Calling DataFrame constructor on list
sneakers = pd.DataFrame(sneakerscur)
return sneakers
def search_query_database(query):
myquery2 = {"$text" : {"$search": query}}
resultcursor = list(snkcoll.find(myquery2).limit(100))
result = pd.DataFrame(resultcursor)
return result
def get_page_database_ryzen(x):
# myquery2 = {"$text" : {"$search": query}}
resultcursor = list(snkcoll.find().skip(x).limit(100))
result = pd.DataFrame(resultcursor)
return result
def get_sneaker_by_sku(sku):
myquery2 = {"sku": sku}
q3 = {"sku": {"$in": sku}}
resultcursor = list(snkcoll.find(q3))
result = | pd.DataFrame(resultcursor) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from hypothesis import given, settings
from pandas.testing import assert_frame_equal
from janitor.testing_utils.strategies import (
conditional_df,
conditional_right,
conditional_series,
)
@pytest.mark.xfail(reason="empty object will pass thru")
@given(s=conditional_series())
def test_df_empty(s):
"""Raise ValueError if `df` is empty."""
df = pd.DataFrame([], dtype="int", columns=["A"])
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@pytest.mark.xfail(reason="empty object will pass thru")
@given(df=conditional_df())
def test_right_empty(df):
"""Raise ValueError if `right` is empty."""
s = pd.Series([], dtype="int", name="A")
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@given(df=conditional_df())
def test_right_df(df):
"""Raise TypeError if `right` is not a Series/DataFrame."""
with pytest.raises(TypeError):
df.conditional_join({"non": [2, 3, 4]}, ("A", "non", "=="))
@given(df=conditional_df(), s=conditional_series())
def test_right_series(df, s):
"""Raise ValueError if `right` is not a named Series."""
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@given(df=conditional_df())
def test_df_MultiIndex(df):
"""Raise ValueError if `df` columns is a MultiIndex."""
with pytest.raises(ValueError):
df.columns = [list("ABCDE"), list("FGHIJ")]
df.conditional_join(
pd.Series([2, 3, 4], name="A"), (("A", "F"), "non", "==")
)
@given(df=conditional_df())
def test_right_MultiIndex(df):
"""Raise ValueError if `right` columns is a MultiIndex."""
with pytest.raises(ValueError):
right = df.copy()
right.columns = [list("ABCDE"), list("FGHIJ")]
df.conditional_join(right, (("A", "F"), "non", ">="))
@given(df=conditional_df(), s=conditional_series())
def test_check_conditions_exist(df, s):
"""Raise ValueError if no condition is provided."""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s)
@given(df=conditional_df(), s=conditional_series())
def test_check_condition_type(df, s):
"""Raise TypeError if any condition in conditions is not a tuple."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("A", "B", ""), ["A", "B"])
@given(df=conditional_df(), s=conditional_series())
def test_check_condition_length(df, s):
"""Raise ValueError if any condition is not length 3."""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("A", "B", "C", "<"))
df.conditional_join(s, ("A", "B", ""), ("A", "B"))
@given(df=conditional_df(), s=conditional_series())
def test_check_left_on_type(df, s):
"""Raise TypeError if left_on is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, (1, "B", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_right_on_type(df, s):
"""Raise TypeError if right_on is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", 1, "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_op_type(df, s):
"""Raise TypeError if the operator is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", 1))
@given(df=conditional_df(), s=conditional_series())
def test_check_column_exists_df(df, s):
"""
Raise ValueError if `left_on`
can not be found in `df`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("C", "B", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_column_exists_right(df, s):
"""
Raise ValueError if `right_on`
can not be found in `right`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "A", ">="))
@given(df=conditional_df(), s=conditional_series())
def test_check_op_correct(df, s):
"""
Raise ValueError if `op` is not any of
`!=`, `<`, `>`, `>=`, `<=`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "B", "=!"))
@given(df=conditional_df(), s=conditional_series())
def test_check_how_type(df, s):
"""
Raise TypeError if `how` is not a string.
"""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), how=1)
@given(df=conditional_df(), s=conditional_series())
def test_check_how_value(df, s):
"""
Raise ValueError if `how` is not one of
`inner`, `left`, or `right`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), how="INNER")
@given(df=conditional_df(), right=conditional_right())
def test_dtype_strings_non_equi(df, right):
"""
Raise ValueError if the dtypes are both strings
on a non-equi operator.
"""
with pytest.raises(ValueError):
df.conditional_join(right, ("C", "Strings", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_not_permitted(df, s):
"""
Raise ValueError if dtype of column in `df`
is not an acceptable type.
"""
df["F"] = pd.Timedelta("1 days")
with pytest.raises(ValueError):
s.name = "A"
df.conditional_join(s, ("F", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_str(df, s):
"""
Raise ValueError if dtype of column in `df`
does not match the dtype of column from `right`.
"""
with pytest.raises(ValueError):
s.name = "A"
df.conditional_join(s, ("C", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_category_non_equi(df, s):
"""
Raise ValueError if dtype is category,
and op is non-equi.
"""
with pytest.raises(ValueError):
s.name = "A"
s = s.astype("category")
df["C"] = df["C"].astype("category")
df.conditional_join(s, ("C", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_sort_by_appearance_type(df, s):
"""
Raise TypeError if `sort_by_appearance` is not a boolean.
"""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), sort_by_appearance="True")
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_floats(df, right):
"""Test output for a single condition. "<"."""
left_on, right_on = ["B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_ints(df, right):
"""Test output for a single condition. "<"."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1, C="2"), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_ints_extension_array(df, right):
"""Test output for a single condition. "<"."""
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_equal(df, right):
"""Test output for a single condition. "<=". DateTimes"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_date(df, right):
"""Test output for a single condition. "<". Dates"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_datetime(df, right):
"""Test output for a single condition. ">". Datetimes"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} > {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_ints(df, right):
"""Test output for a single condition. ">="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} >= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
| assert_frame_equal(expected, actual) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Created on Mon May 30 06:44:09 2016
@author: subhajit
"""
import pandas as pd
import datetime
from sklearn import cross_validation
import xgboost as xgb
import numpy as np
import h5py
import os
os.chdir('D:\Data Science Competitions\Kaggle\Expedia Hotel Recommendations\codes')
def map5eval(preds, dtrain):
actual = dtrain.get_label()
predicted = preds.argsort(axis=1)[:,-np.arange(5)]
metric = 0.
for i in range(5):
metric += np.sum(actual==predicted[:,i])/(i+1)
metric /= actual.shape[0]
return 'MAP@5', -metric
def pre_process(data):
try:
data.loc[data.srch_ci.str.endswith('00'),'srch_ci'] = '2015-12-31'
data['srch_ci'] = data.srch_ci.astype(np.datetime64)
data.loc[data.date_time.str.endswith('00'),'date_time'] = '2015-12-31'
data['date_time'] = data.date_time.astype(np.datetime64)
except:
pass
data.fillna(0, inplace=True)
data['srch_duration'] = data.srch_co-data.srch_ci
data['srch_duration'] = data['srch_duration'].apply(lambda td: td/np.timedelta64(1, 'D'))
data['time_to_ci'] = data.srch_ci-data.date_time
data['time_to_ci'] = data['time_to_ci'].apply(lambda td: td/np.timedelta64(1, 'D'))
data['ci_month'] = data['srch_ci'].apply(lambda dt: dt.month)
data['ci_day'] = data['srch_ci'].apply(lambda dt: dt.day)
#data['ci_year'] = data['srch_ci'].apply(lambda dt: dt.year)
data['bk_month'] = data['date_time'].apply(lambda dt: dt.month)
data['bk_day'] = data['date_time'].apply(lambda dt: dt.day)
#data['bk_year'] = data['date_time'].apply(lambda dt: dt.year)
data['bk_hour'] = data['date_time'].apply(lambda dt: dt.hour)
data.drop(['date_time', 'user_id', 'srch_ci', 'srch_co'], axis=1, inplace=True)
if os.path.exists('../output/srch_dest_hc_hm_agg.csv'):
agg1 = pd.read_csv('../output/srch_dest_hc_hm_agg.csv')
else:
reader = pd.read_csv('../input/train.csv', parse_dates=['date_time', 'srch_ci', 'srch_co'], chunksize=200000)
pieces = [chunk.groupby(['srch_destination_id','hotel_country','hotel_market','hotel_cluster'])['is_booking'].agg(['sum','count']) for chunk in reader]
agg = pd.concat(pieces).groupby(level=[0,1,2,3]).sum()
del pieces
agg.dropna(inplace=True)
agg['sum_and_cnt'] = 0.85*agg['sum'] + 0.15*agg['count']
agg = agg.groupby(level=[0,1,2]).apply(lambda x: x.astype(float)/x.sum())
agg.reset_index(inplace=True)
agg1 = agg.pivot_table(index=['srch_destination_id','hotel_country','hotel_market'], columns='hotel_cluster', values='sum_and_cnt').reset_index()
agg1.to_csv('../output/srch_dest_hc_hm_agg.csv', index=False)
del agg
destinations = | pd.read_csv('../input/destinations.csv') | pandas.read_csv |
"""Mock data for bwaw.insights tests."""
import pandas as pd
ACTIVE_BUSES = pd.DataFrame([
['213', 21.0921481, '1001', '2021-02-09 15:45:27', 52.224536, '2'],
['213', 21.0911025, '1001', '2021-02-09 15:46:22', 52.2223788, '2'],
['138', 21.0921481, '1001', '2021-02-09 15:45:27', 52.224536, '05'],
['138', 21.0911025, '1001', '2021-02-09 15:46:22', 52.2223788, '05']
], columns=['Lines', 'Lon', 'VehicleNumber', 'Time', 'Lat', 'Brigade'])
ACTIVE_BUSES['Time'] = pd.to_datetime(ACTIVE_BUSES['Time'])
COORDINATES = | pd.DataFrame([
['1001', '01', 52.224536, 21.0921481, 'al.Zieleniecka', '2020-10-12 00:00:00.0']
], columns=['ID', 'Number', 'Latitude', 'Longitude', 'Destination', 'Validity']) | pandas.DataFrame |
"""
-------------------------------------------
Author: <NAME> (<EMAIL>)
Date: 10/13/17
-------------------------------------------
"""
# our modules
import visJS2jupyter.visJS_module as visJS_module # "pip install visJS2jupyter"
import create_graph # from URA package
# common packages, most likely already installed
import scipy
import math
import pandas as pd
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
import sys
from scipy.special import ndtr
# uncommon packages required for this analysis
import seaborn as sns # pip install seaborn
# -------------------- LOCALIZATION ---------------------------------#
def localization(Gint, focal_genes, num_reps = 10, sample_frac = 0.8, method = 'numedges', plot = True, print_counter = False):
"""
Function to calculate localization of an input set of genes (focal_genes) on a background network (Gint).
Option to compute number of edges (method = 'numedges') or largest connected component (method = 'LLC')
localization analysis. Calculates by sampling sub-sections of the focal genes/random set. Percentage to sample
is set by sample_frac. Option to plot the distributions of random and focal gene localizaiton.
Args:
Gint: Networkx Graph, background network to randomly sample from
focal_genes: List, set of genes to calculate localization of
num_reps: Int, number of times to randomly sample
sample_frac: Float, percent of sampled genes
method: String, to decide which type of localization analysis to run. Options: 'numedges', 'LLC', or 'both'.
plot: Bool, whether to plot the distributions in the output jupyter notebook cell
print_counter: Bool, whether to print a counter that tells you which iteration you are on (every 25 iterations).
Useful when the num_reps is very high.
Returns:
numedges_list: List, the number of edges calculated for each rep, sampling over focal genes.
Empty if method = 'LLC'.
numedges_rand: List, the number of edges calculated for each rep, sampling over random genes of
similar degree in the background network. Empty if method = 'LLC'.
LCC_list: List, the size of the largest connected component, calculated for each rep, sampling over focal genes.
Empty if method = 'numedges'.
LCC_rand: List, the size of the largest connected component, calculated for each rep, sampling over random genes of
similar degree in the background network. Empty if method = 'numedges'.
"""
# Create degree bins to sample from
bins = get_degree_binning(Gint, 10)
min_degree, max_degree, genes_binned = zip(*bins)
bin_df = pd.DataFrame({'min_degree':min_degree, 'max_degree':max_degree, 'genes_binned':genes_binned})
# create a lookup table for degree and index
actual_degree_to_bin_df_idx = {}
for i in range(0, bin_df['max_degree'].max() + 1):
idx_temp = bin_df[ (bin_df['min_degree'].lt(i + 1)) & (bin_df['max_degree'].gt(i - 1)) ].index.tolist()
if len(idx_temp) > 0: # there are some degrees which aren't represented in the graph
actual_degree_to_bin_df_idx[i] = idx_temp[0]
focal_genes = list(np.intersect1d(focal_genes, Gint.nodes())) # only use focal_genes which are in Gint
numedges_list = []
numedges_rand = []
LCC_list = []
LCC_rand = []
for r in range(num_reps):
if print_counter == True:
# so user knows how far along the process is
if (r % 25) == 0:
print(r)
focal_80 = focal_genes
np.random.shuffle(focal_80)
focal_80 = focal_80[:int(len(focal_80)*sample_frac)]
# find genes with similar degrees to focal gene degree
seed_random = []
for g in focal_80:
degree_temp = nx.degree(Gint,g)
genes_temp = bin_df.loc[actual_degree_to_bin_df_idx[degree_temp]]['genes_binned'] # use the lookup table for speed
np.random.shuffle(genes_temp) # shuffle them
seed_random.append(genes_temp[0]) # build the seed_D1_random list
if (method == 'numedges') or (method == 'both'):
# number edges calc on focal set
numedges_temp = len(nx.subgraph(Gint,focal_80).edges())
numedges_list.append(numedges_temp)
# number edges calc on random sample
numedges_temp_rand = len(nx.subgraph(Gint,seed_random).edges())
numedges_rand.append(numedges_temp_rand)
if (method == 'LCC') or (method == 'both'):
# LLC calc on focal set
G_sub_temp = nx.Graph(nx.subgraph(Gint, focal_80))
G_sub_temp = max(nx.connected_component_subgraphs(G_sub_temp), key = len)
LCC_list.append(len(G_sub_temp.nodes()))
# LLC calc on random sample
G_sub_temp = nx.Graph(nx.subgraph(Gint, seed_random))
G_sub_temp = max(nx.connected_component_subgraphs(G_sub_temp), key=len)
LCC_rand.append(len(G_sub_temp.nodes()))
if plot == True:
if (method == 'numedges') or (method == 'both'):
fig, ax = plt.subplots(figsize = (12, 7))
sns.distplot(numedges_list, ax = ax, hist = True, label = 'focal genes')
sns.distplot(numedges_rand, ax = ax, hist = True, label = 'random set')
plt.ylabel('frequency', fontsize = 16)
plt.xlabel('number of edges', fontsize = 16)
plt.title('Number of Edges Localization', fontsize = 18)
plt.legend(loc = 'upper right', fontsize = 14)
if (method == 'LCC') or (method == 'both'):
fig, ax = plt.subplots(figsize = (12, 7))
sns.distplot(LCC_list, ax = ax, hist = True, label = 'focal genes')
sns.distplot(LCC_rand, ax = ax, hist = True, label = 'random set')
plt.ylabel('frequency', fontsize = 16)
plt.xlabel('largest connected component size', fontsize = 16)
plt.title('Largest Connected Component Localization', fontsize = 18)
plt.legend(loc = 'upper right', fontsize = 14)
return numedges_list, numedges_rand, LCC_list, LCC_rand
def localization_full(Gint, focal_genes,
num_reps = 200,
method = 'LCC',
print_counter = False,
label = 'focal genes',
line_height = 0.1,
legend_loc = 'upper left'):
"""
Function to calculate localization of an input set of genes (focal_genes) on a background network (Gint).
Option to compute number of edges (method = 'numedges') or largest connected component (method = 'LLC')
localization analysis. DOes no sub-sampling. Plots the distribution of random gene localizaiton, and
marks the focal set localization on distribution. Includes p-value of focal set localization.
Args:
Gint: Networkx Graph, background network to randomly sample from
focal_genes: List, set of genes to calculate localization of
num_reps: Int, number of times to randomly sample
method: String, to decide which type of localization analysis to run. Options: 'numedges', 'LLC', or 'both'.
print_counter: Bool, whether to print a counter that tells you which iteration you are on (every 25 iterations).
Useful when the num_reps is very high.
label: String, label for focal genes in graph legend
line_height: Float, the height of the red line that marks the focal gene localization
legend_loc: String, relative position of legend in graph. Something similar to 'upper left'.
Returns:
numedges_list: List, the number of edges calculated for each rep, over focal genes.
Empty if method = 'LLC'.
numedges_rand: List, the number of edges calculated for each rep, over random genes of
similar degree in the background network. Empty if method = 'LLC'.
LCC_list: List, the size of the largest connected component, calculated for each rep, over focal genes.
Empty if method = 'numedges'.
LCC_rand: List, the size of the largest connected component, calculated for each rep, over random genes of
similar degree in the background network. Empty if method = 'numedges'.
"""
numedges_list, numedges_rand, LCC_list, LCC_rand = localization(Gint, focal_genes, num_reps,
sample_frac = 1,
method = method,
plot = False,
print_counter = print_counter)
if method == 'numedges':
analysis_list = numedges_list
analysis_rand = numedges_rand
title = 'number of edges'
else:
analysis_list = LCC_list
analysis_rand = LCC_rand
title = 'largest connected component'
# plot distributions for non-sampled case
fig, ax = plt.subplots(figsize = (12, 7))
sns.set_style('white')
plt.vlines(np.mean(analysis_list), ymin = 0, ymax = line_height, color = 'r', lw = 2, label = label)
sns.kdeplot(analysis_rand, ax = ax, color = 'k', lw = 2, alpha = 0.5, shade = True, label = 'random')
plt.legend(loc = legend_loc, fontsize = 12)
plt.ylabel('frequency', fontsize = 16)
plt.xlabel(title, fontsize = 16)
# print the z-score and fdr
analysis_z = (np.mean(analysis_list) - np.mean(analysis_rand))/float(np.std(analysis_rand))
print(1 - ndtr(analysis_z))
plt.title('permutation p = ' + str(1 - ndtr(analysis_z)))
return numedges_list, numedges_rand, LCC_list, LCC_rand
def get_degree_binning(g, bin_size, lengths = None):
"""
Helper function for localization(). This function comes from network_utilities.py of emregtoobox.
"""
degree_to_nodes = {}
if sys.version_info >= (3, 0):
for node, degree in dict(g.degree()).items():
if lengths is not None and node not in lengths:
continue
degree_to_nodes.setdefault(degree, []).append(node)
else:
for node, degree in dict(g.degree()).iteritems():
if lengths is not None and node not in lengths:
continue
degree_to_nodes.setdefault(degree, []).append(node)
values = list(degree_to_nodes.keys())
values.sort()
bins = []
i = 0
while i < len(values):
low = values[i]
val = degree_to_nodes[values[i]]
while len(val) < bin_size:
i += 1
if i == len(values):
break
val.extend(degree_to_nodes[values[i]])
if i == len(values):
i -= 1
high = values[i]
i += 1
#print low, high, len(val)
if len(val) < bin_size:
low_, high_, val_ = bins[-1]
bins[-1] = (low_, high, val_ + val)
else:
bins.append((low, high, val))
return bins
# --------------------- P-VALUE FUNCTIONS ---------------------------#
def tf_pvalues(DG_TF, DG_universe, DEG_list):
"""
Our p-value function calculates the log of the p-value for every TF in the graph using [scipy.stats.hypergeom.logsf]
(https://docs.scipy.org/doc/scipy-0.19.1/reference/generated/scipy.stats.hypergeom.html). These values help us
determine which TF's are actually associated with our DEG's. If a TF is given a high value (because we are
working with logs, not straight p-values), then it is likely that there is correlation between that TF and its
DEG targets. Therefore, it is likely that TF is responsible for some of our observed gene expression.
Note that if a TF is given a value of zero, that means none of the TF's targets were DEG's.
Args:
DG_TF: Digraph, a directed networkx graph with edges mapping from transcription factors to expressed genes (filtered)
DG_universe: a networkx graph containing all interactions in our universe (not filtered)
DEG_list: list of strings, your list of differentially expressed genes
Returns: A sorted Pandas Series that maps a transcription factor's gene symbol to its calculated p-vlaue log.
"""
source_nodes = list(set(zip(*DG_TF.edges())[0])) # identifying unique source nodes in graph
background_list = list(DG_universe.nodes()) # list of all unique nodes in universe
TR_to_pvalue = {}
x_n_to_p_score = {}
M = len(background_list) # num unique nodes in universe, aka background network (STRING)
N = len(list(set(background_list) & set(DEG_list))) # number of DEG, picked from universe "at random"
for TR in source_nodes:
x = len(list(set(DG_TF.neighbors(TR)) & set(DEG_list))) # per TR, observed overlap between TR neighbors and DEG_list
n = len(DG_TF.neighbors(TR)) # per TR, number of targets for that TR
if (x,n) in x_n_to_p_score: # if we have computed this value before
TR_to_pvalue[TR] = x_n_to_p_score[(x,n)]
else:
if x == 0:
TR_to_pvalue[TR] = 0
elif x == n:
TR_to_pvalue[TR] = float('Inf')
else:
TR_to_pvalue[TR] = -(scipy.stats.hypergeom.logsf(x, M, n, N, loc=0)) # remove unnecessary negative sign
x_n_to_p_score[(x,n)] = TR_to_pvalue[TR] # record that we have calculated this value
TR_to_pvalue = | pd.Series(TR_to_pvalue) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import panel as pn
pn.extension()
import hvplot.pandas
import datetime
# # 1 load data
# In[2]:
folder_path = './data/'
# Load data from each csv and assign to variable data_****"
data_net_zero = pd.read_csv(folder_path + 'net_zero.csv', parse_dates = ['date_call_made'], dayfirst =True)
data_la_dec = pd.read_csv(folder_path + 'la_declarations.csv', parse_dates = ['declaration_date'], dayfirst=True)
data_social = pd.read_csv(folder_path + 'social_media_stats.csv', parse_dates = ['date'], dayfirst=True) # i think in future it may be better to have each platform in sepereate csvs
data_books = | pd.read_csv(folder_path + 'book_sales.csv', parse_dates = ['as_at_date'], dayfirst=True) | pandas.read_csv |
"""Filter classifier"""
import json
import logging
import collections
import functools
import math
import scipy.optimize
import numpy as np
import pandas as pd
from pandas import json_normalize
import sklearn.linear_model
from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score, log_loss
from .util import grouper, file_open
logger = logging.getLogger(__name__)
def lists_to_dicts(obj):
"""Convert lists in a JSON-style object to dicts recursively
Examples:
>>> lists_to_dicts([3, 4])
{"0": 3, "1": 4}
>>> lists_to_dicts([3, [4, 5]])
{"0": 3, "1": {"0": 4, "1": 5}}
>>> lists_to_dicts({"a": [3, 4], "b": []})
{"a": {"0": 3, "1": 4}, "b": {}}
"""
if isinstance(obj, dict):
return {key: lists_to_dicts(value) for key, value in obj.items()}
if isinstance(obj, list):
return {str(idx): lists_to_dicts(value) for idx, value in enumerate(obj)}
return obj
def load_dataframe(data_file):
"""Load normalized scores dataframe from a JSON lines file"""
data = []
with file_open(data_file) as dfile:
for line in dfile:
try:
data.append(lists_to_dicts(json.loads(line)))
except json.decoder.JSONDecodeError as err:
logger.error(line)
raise err
return pd.DataFrame( | json_normalize(data) | pandas.json_normalize |
# Author: <NAME> 2021-10-01
#
# Amazon has a new format for their website
#
# copy text of your Kindle library to a *.txt file
# entries will look a little like this:
#
# King of Thorns (The Broken Empire Book 2)
# <NAME>
# Acquired on September 24, 2021
# In2
# Collections
# 1
# Device
# Deliver or Remove from Device
# Mark as Read
# More actions
#
# The following may or may not be true anymore; the code still searches for it
# Disturbingly, sometimes the Kindle Library list will have a book more than once
# and sometimes it will not list a book that you actually have.
# I don't have a solution for that, but I do print some alerts.
# I have seen it list book 10 of a series two times and not list book 8.
# When I searched the content for "Book 8", it found it.
# When I cleared the search, it listed book 10 once and book 8 once.
# I have code that will keep previously found books and if there are duplicates it keeps the first one with an alert.
# ... and sometimes they will change the Author name: they changed "<NAME>" to "<NAME>"
# There is some text output that might help alert you to this
# flags --oldapproxmatch and --newapproxmatch are one way to help deal with it
import sys
import string
import os
import argparse
import pandas as pd
import datetime
MONTHS = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
# special cases. Believe it or not, there are a bunch of normal cases too...
# [lcstring, series, seriesNum]
TITLE_totalMatch = []
TITLE_partialMatch = []
SUBSTITUTE_goofy = []
# do this substitution in 'Title', 'Author', 'Author (f,m,l)'
doSubstituteGoofy = ['Title', 'Author', 'Author (f,m,l)']
###################################################################################
# startOfLineIsIn()
#
# theLine - line to check
# listOfCompares [] - strings to compare
#
def startOfLineIsIn(theLine, listOfCompares):
startIsIn = False
for compare in listOfCompares:
if 0 == theLine.find(compare):
startIsIn = True
break
return startIsIn
# end of startOfLineIsIn()
###################################################################################
# doReadPreviousRatings()
#
# prevRatingsFname spreadsheet has tabs
# Books - previous version of our output spreadsheet
# save ratings, etc. and notice if we don't see one in listFname
# TITLE_totalMatch - if this matches total title then use series and seriesNum
# TITLE_partialMatch - if this matches any part of title then use series and seriesNum
# NOTE: titles in TITLE_totalMatch and TITLE_partialMatch are all made lower-case when we return
#
def doReadPreviousRatings(prevRatingsFname):
prevRatings = {}
# this was code for reading a tab-separated-variable version of tab 'Books'
# df = pd.read_table(prevRatingsFname,sep='\t',encoding="cp1252") # I hate Windows smart quotes
# Import the excel file
sys.stderr.write("opening %s\n" % os.path.abspath(prevRatingsFname))
xlsPd = | pd.ExcelFile(prevRatingsFname) | pandas.ExcelFile |
# #-- -- -- -- Supervised Learning with scikit-learn
# # Used for Data Scientist Training Path
# #FYI it's a compilation of how to work
# #with different commands.
# ### --------------------------------------------------------
# # # # ------>>>> Which of these is a
# classification problem? Once
# you decide to leverage
# supervised machine learning
# to solve a new problem, you
# need to identify whether your
# problem is better suited to
# classification or regression.
# This exercise will help you
# develop your intuition for
# distinguishing between the
# two.
# Provided below are 4 example
# applications of machine
# learning. Which of them is a
# supervised classification
# problem?
# R/ Using labeled financial data to predict whether the value of a stock will go up or go down next week.
# ### --------------------------------------------------------
# # # # ------>>>> Numerical EDA
# In this chapter, you'll be
# working with a dataset
# obtained from the UCI Machine
# Learning Repository
# consisting of votes made by
# US House of Representatives
# Congressmen. Your goal will
# be to predict their party
# affiliation ('Democrat' or
# 'Republican') based on how
# they voted on certain key
# issues. Here, it's worth
# noting that we have
# preprocessed this dataset to
# deal with missing values.
# This is so that your focus
# can be directed towards
# understanding how to train
# and evaluate supervised
# learning models. Once you
# have mastered these
# fundamentals, you will be
# introduced to preprocessing
# techniques in Chapter 4 and
# have the chance to apply them
# there yourself - including on
# this very same dataset!
# Before thinking about what
# supervised learning models
# you can apply to this,
# however, you need to perform
# Exploratory data analysis (
# EDA) in order to understand
# the structure of the data.
# For a refresher on the
# importance of EDA, check out
# the first two chapters of
# Statistical Thinking in
# Python (Part 1).
# Get started with your EDA now
# by exploring this voting
# records dataset numerically.
# It has been pre-loaded for
# you into a DataFrame called
# df. Use pandas' .head(),
# .info(), and .describe()
# methods in the IPython Shell
# to explore the DataFrame, and
# select the statement below
# that is not true.
df.head()
df.info()
df.describe()
# R/There are 17 predictor variables, or features, in this DataFrame.
# ### --------------------------------------------------------
# # # # ------>>>> Visual EDA
# The Numerical EDA you did in
# the previous exercise gave
# you some very important
# information, such as the
# names and data types of the
# columns, and the dimensions
# of the DataFrame. Following
# this with some visual EDA
# will give you an even better
# understanding of the data. In
# the video, Hugo used the
# scatter_matrix() function on
# the Iris data for this
# purpose. However, you may
# have noticed in the previous
# exercise that all the
# features in this dataset are
# binary; that is, they are
# either 0 or 1. So a different
# type of plot would be more
# useful here, such as
# Seaborn's countplot.
# Given on the right is a
# countplot of the 'education'
# bill, generated from the
# following code:
# plt.figure() sns.countplot(
# x='education', hue='party',
# data=df, palette='RdBu')
# plt.xticks([0,1], ['No',
# 'Yes']) plt.show() In
# sns.countplot(), we specify
# the x-axis data to be
# 'education', and hue to be
# 'party'. Recall that 'party'
# is also our target variable.
# So the resulting plot shows
# the difference in voting
# behavior between the two
# parties for the 'education'
# bill, with each party colored
# differently. We manually
# specified the color to be
# 'RdBu', as the Republican
# party has been traditionally
# associated with red, and the
# Democratic party with blue.
# It seems like Democrats voted
# resoundingly against this
# bill, compared to
# Republicans. This is the kind
# of information that our
# machine learning model will
# seek to learn when we try to
# predict party affiliation
# solely based on voting
# behavior. An expert in U.S
# politics may be able to
# predict this without machine
# learning, but probably not
# instantaneously - and
# certainly not if we are
# dealing with hundreds of
# samples!
# In the IPython Shell, explore
# the voting behavior further
# by generating countplots for
# the 'satellite' and 'missile'
# bills, and answer the
# following question: Of these
# two bills, for which ones do
# Democrats vote resoundingly
# in favor of, compared to
# Republicans? Be sure to begin
# your plotting statements for
# each figure with plt.figure()
# so that a new figure will be
# set up. Otherwise, your plots
# will be overlaid onto the
# same figure.
# R/ Both 'satellite' and 'missile'.
# ### --------------------------------------------------------
# # # # ------>>>> k-Nearest Neighbors: Fit
# Import KNeighborsClassifier from sklearn.neighbors
from sklearn.neighbors import KNeighborsClassifier
# Create arrays for the features and the response variable
y = df['party'].values
X = df.drop('party', axis=1).values
# Create a k-NN classifier with 6 neighbors
knn = KNeighborsClassifier(n_neighbors=6)
# Fit the classifier to the data
knn.fit(X,y)
# ### --------------------------------------------------------
# # # # ------>>>> k-Nearest Neighbors: Predict
# Import KNeighborsClassifier from sklearn.neighbors
from sklearn.neighbors import KNeighborsClassifier
# Create arrays for the features and the response variable
y = df['party'].values
X = df.drop('party', axis=1).values
# Create a k-NN classifier with 6 neighbors: knn
knn = knn = KNeighborsClassifier(n_neighbors=6)
# Fit the classifier to the data
knn.fit(X,y)
# Predict the labels for the training data X
y_pred = knn.predict(X)
# Predict and print the label for the new data point X_new
new_prediction = knn.predict(X_new)
print("Prediction: {}".format(new_prediction))
# ### --------------------------------------------------------
# # # # ------>>>> The digits recognition dataset
# Import necessary modules
from sklearn import datasets
import matplotlib.pyplot as plt
# Load the digits dataset: digits
digits = datasets.load_digits()
# Print the keys and DESCR of the dataset
print(digits.keys())
print(digits.DESCR)
# Print the shape of the images and data keys
print(digits.images.shape)
print(digits.data.shape)
# Display digit 1010
plt.imshow(digits.images[1010], cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
# ### --------------------------------------------------------
# # # # ------>>>> Train/Test Split + Fit/Predict/Accuracy
# Import necessary modules
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn import datasets
# Create feature and target arrays
X = digits.data
y = digits.target
# Split into training and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=42, stratify=y)
# Create a k-NN classifier with 7 neighbors: knn
knn = KNeighborsClassifier(n_neighbors=7)
# Fit the classifier to the training data
knn.fit(X_train, y_train)
# Print the accuracy
print(knn.score(X_test, y_test))
# ### --------------------------------------------------------
# # # # ------>>>> Overfitting and underfitting
# Setup arrays to store train and test accuracies
neighbors = np.arange(1, 9)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
# Loop over different values of k
for i, k in enumerate(neighbors):
# Setup a k-NN Classifier with k neighbors: knn
knn = KNeighborsClassifier(n_neighbors=k)
# Fit the classifier to the training data
knn.fit(X_train, y_train)
#Compute accuracy on the training set
train_accuracy[i] = knn.score(X_train, y_train)
#Compute accuracy on the testing set
test_accuracy[i] = knn.score(X_test, y_test)
# Generate plot
plt.title('k-NN: Varying Number of Neighbors')
plt.plot(neighbors, test_accuracy, label = 'Testing Accuracy')
plt.plot(neighbors, train_accuracy, label = 'Training Accuracy')
plt.legend()
plt.xlabel('Number of Neighbors')
plt.ylabel('Accuracy')
plt.show()
# ### --------------------------------------------------------
# # # # ------>>>> Which of the following is a
# regression problem? Andy
# introduced regression to you
# using the Boston housing
# dataset. But regression
# models can be used in a
# variety of contexts to solve
# a variety of different
# problems.
# Given below are four example
# applications of machine
# learning. Your job is to pick
# the one that is best framed
# as a regression problem
# R/ A bike share company using time and weather data to predict the number of bikes being rented at any given hour.
# ### --------------------------------------------------------
# # # # ------>>>> Importing data for supervised learning
# Import numpy and pandas
import numpy as np
import pandas as pd
# Read the CSV file into a DataFrame: df
df = pd.read_csv('gapminder.csv')
# Create arrays for features and target variable
y = df['life'].values
X = df['fertility'].values
# Print the dimensions of X and y before reshaping
print("Dimensions of y before reshaping: {}".format(y.shape))
print("Dimensions of X before reshaping: {}".format(X.shape))
# Reshape X and y
y = y.reshape(-1,1)
X = X.reshape(-1,1)
# Print the dimensions of X and y after reshaping
print("Dimensions of y after reshaping: {}".format(y.shape))
print("Dimensions of X after reshaping: {}".format(X.shape))
# ### --------------------------------------------------------
# # # # ------>>>> Exploring the Gapminder data
# As always, it is important to
# explore your data before
# building models. On the
# right, we have constructed a
# heatmap showing the
# correlation between the
# different features of the
# Gapminder dataset, which has
# been pre-loaded into a
# DataFrame as df and is
# available for exploration in
# the IPython Shell. Cells that
# are in green show positive
# correlation, while cells that
# are in red show negative
# correlation. Take a moment to
# explore this: Which features
# are positively correlated
# with life, and which ones are
# negatively correlated? Does
# this match your intuition?
# Then, in the IPython Shell,
# explore the DataFrame using
# pandas methods such as
# .info(), .describe(), .head()
# .
# In case you are curious, the
# heatmap was generated using
# Seaborn's heatmap function
# and the following line of
# code, where df.corr()
# computes the pairwise
# correlation between columns:
# sns.heatmap(df.corr(),
# square=True, cmap='RdYlGn')
# Once you have a feel for the
# data, consider the statements
# below and select the one that
# is not true. After this, Hugo
# will explain the mechanics of
# linear regression in the next
# video and you will be on your
# way building regression
# models!
df.info()
df.head()
# R/fertility is of type int64.
# ### --------------------------------------------------------
# # # # ------>>>> Fit & predict for regression
# Import LinearRegression
from sklearn.linear_model import LinearRegression
# Create the regressor: reg
reg = LinearRegression()
# Create the prediction space
prediction_space = np.linspace(min(X_fertility), max(X_fertility)).reshape(-1,1)
# Fit the model to the data
reg.fit(X_fertility, y)
# Compute predictions over the prediction space: y_pred
y_pred = reg.predict(prediction_space)
# Print R^2
print(reg.score(X_fertility, y))
# Plot regression line
plt.plot(prediction_space, y_pred, color='black', linewidth=3)
plt.show()
# ### --------------------------------------------------------
# # # # ------>>>> Train/test split for regression
# Import necessary modules
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
# Create training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state=42)
# Create the regressor: reg_all
reg_all = LinearRegression()
# Fit the regressor to the training data
reg_all.fit(X_train, y_train)
# Predict on the test data: y_pred
y_pred = reg_all.predict(X_test)
# Compute and print R^2 and RMSE
print("R^2: {}".format(reg_all.score(X_test, y_test)))
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: {}".format(rmse))
# ### --------------------------------------------------------
# # # # ------>>>> 5-fold cross-validation
# Import the necessary modules
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
# Create a linear regression object: reg
reg = LinearRegression()
# Compute 5-fold cross-validation scores: cv_scores
cv_scores = cross_val_score(reg,X,y,cv=5)
# Print the 5-fold cross-validation scores
print(cv_scores)
print("Average 5-Fold CV Score: {}".format(np.mean(cv_scores)))
# ### --------------------------------------------------------
# # # # ------>>>> K-Fold CV comparison
# Import necessary modules
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
# Create a linear regression object: reg
reg = LinearRegression()
# Perform 3-fold CV
cvscores_3 = cross_val_score(reg, X, y, cv=3)
print(np.mean(cvscores_3))
# Perform 10-fold CV
cvscores_10 = cross_val_score(reg, X, y, cv=10)
print(np.mean(cvscores_10))
# ### --------------------------------------------------------
# # # # ------>>>> Regularization I: Lasso
# Import Lasso
from sklearn.linear_model import Lasso
# Instantiate a lasso regressor: lasso
lasso = Lasso(alpha=0.4, normalize=True)
# Fit the regressor to the data
lasso.fit(X,y)
# Compute and print the coefficients
lasso_coef = lasso.coef_
print(lasso_coef)
# Plot the coefficients
plt.plot(range(len(df_columns)), lasso_coef)
plt.xticks(range(len(df_columns)), df_columns.values, rotation=60)
plt.margins(0.02)
plt.show()
# ### --------------------------------------------------------
# # # # ------>>>> Regularization II: Ridge
# Import necessary modules
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
# Setup the array of alphas and lists to store scores
alpha_space = np.logspace(-4, 0, 50)
ridge_scores = []
ridge_scores_std = []
# Create a ridge regressor: ridge
ridge = Ridge(normalize=True)
# Compute scores over range of alphas
for alpha in alpha_space:
# Specify the alpha value to use: ridge.alpha
ridge.alpha = alpha
# Perform 10-fold CV: ridge_cv_scores
ridge_cv_scores = cross_val_score(ridge, X, y, cv=10)
# Append the mean of ridge_cv_scores to ridge_scores
ridge_scores.append(np.mean(ridge_cv_scores))
# Append the std of ridge_cv_scores to ridge_scores_std
ridge_scores_std.append(np.std(ridge_cv_scores))
# Display the plot
display_plot(ridge_scores, ridge_scores_std)
# ### --------------------------------------------------------
# # # # ------>>>> Metrics for classification
# Import necessary modules
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
# Create training and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4,random_state=42)
# Instantiate a k-NN classifier: knn
knn = KNeighborsClassifier(n_neighbors=6)
# Fit the classifier to the training data
knn.fit(X_train, y_train)
# Predict the labels of the test data: y_pred
y_pred = knn.predict(X_test)
# Generate the confusion matrix and classification report
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# ### --------------------------------------------------------
# # # # ------>>>> Building a logistic regression model
# Import the necessary modules
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report
# Create training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4, random_state=42)
# Create the classifier: logreg
logreg = LogisticRegression()
# Fit the classifier to the training data
logreg.fit(X_train, y_train)
# Predict the labels of the test set: y_pred
y_pred = logreg.predict(X_test)
# Compute and print the confusion matrix and classification report
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# ### --------------------------------------------------------
# # # # ------>>>> Plotting an ROC curve
# Import necessary modules
from sklearn.metrics import roc_curve
# Compute predicted probabilities: y_pred_prob
y_pred_prob = logreg.predict_proba(X_test)[:,1]
# Generate ROC curve values: fpr, tpr, thresholds
fpr, tpr, thresholds = roc_curve(y_test, y_pred_prob)
# Plot ROC curve
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
# # ### --------------------------------------------------------
# # # # # ------>>>> Precision-recall Curve
# When looking at your ROC
# curve, you may have noticed
# that the y-axis (True
# positive rate) is also known
# as recall. Indeed, in
# addition to the ROC curve,
# there are other ways to
# visually evaluate model
# performance. One such way is
# the precision-recall curve,
# which is generated by
# plotting the precision and
# recall for different
# thresholds. On the right, a
# precision-recall curve has
# been generated for the
# diabetes dataset. The
# classification report and
# confusion matrix are
# displayed in the IPython
# Shell.
# Study the precision-recall
# curve and then consider the
# statements given below.
# Choose the one statement that
# is not true. Note that here,
# the class is positive (1) if
# # the individual has diabetes.
# R/ -> Precision and recall take true negatives into consideration.
# ---> negatives do not appear at all in the definitions of precision and recall.
# ### --------------------------------------------------------
# # # # ------>>>>AUC computation
# Import necessary modules
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import cross_val_score
# Compute predicted probabilities: y_pred_prob
y_pred_prob = logreg.predict_proba(X_test)[:,1]
# Compute and print AUC score
print("AUC: {}".format(roc_auc_score(y_test, y_pred_prob)))
# Compute cross-validated AUC scores: cv_auc
cv_auc = cross_val_score(logreg, X, y, cv=5,
scoring='roc_auc')
# Print list of AUC scores
print("AUC scores computed using 5-fold cross-validation: {}".format(cv_auc))
# ### --------------------------------------------------------
# # # # ------>>>> Hyperparameter tuning with GridSearchCV
# Import necessary modules
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
# Setup the hyperparameter grid
c_space = np.logspace(-5, 8, 15)
param_grid = {'C': c_space}
# Instantiate a logistic regression classifier: logreg
logreg = LogisticRegression()
# Instantiate the GridSearchCV object: logreg_cv
logreg_cv = GridSearchCV(logreg, param_grid, cv=5)
# Fit it to the data
logreg_cv.fit(X, y)
# Print the tuned parameters and score
print("Tuned Logistic Regression Parameters: {}".format(logreg_cv.best_params_))
print("Best score is {}".format(logreg_cv.best_score_))
# ### --------------------------------------------------------
# # # # ------>>>>Hyperparameter tuning with RandomizedSearchCV
# Import necessary modules
from scipy.stats import randint
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import RandomizedSearchCV
# Setup the parameters and distributions to sample from: param_dist
param_dist = {"max_depth": [3, None],
"max_features": randint(1, 9),
"min_samples_leaf": randint(1, 9),
"criterion": ["gini", "entropy"]}
# Instantiate a Decision Tree classifier: tree
tree = DecisionTreeClassifier()
# Instantiate the RandomizedSearchCV object: tree_cv
tree_cv = RandomizedSearchCV(tree, param_dist, cv=5)
# Fit it to the data
tree_cv.fit(X, y)
# Print the tuned parameters and score
print("Tuned Decision Tree Parameters: {}".format(tree_cv.best_params_))
print("Best score is {}".format(tree_cv.best_score_))
# ### --------------------------------------------------------
# # # # ------>>>>-Hold-out set reasoning
# For which of the following reasons would you want to use a hold-out set for the very end?
# R/ You want to be absolutely certain about your model's ability to generalize to unseen data.
# ### --------------------------------------------------------
# # # # ------>>>> Hold-out set in practice I: Classification
# Import necessary modules
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
# Create the hyperparameter grid
c_space = np.logspace(-5, 8, 15)
param_grid = {'C': c_space, 'penalty': ['l1', 'l2']}
# Instantiate the logistic regression classifier: logreg
logreg = LogisticRegression()
# Create train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4, random_state=42)
# Instantiate the GridSearchCV object: logreg_cv
logreg_cv = GridSearchCV(logreg, param_grid, cv=5)
# Fit it to the training data
logreg_cv.fit(X_train, y_train)
# Print the optimal parameters and best score
print("Tuned Logistic Regression Parameter: {}".format(logreg_cv.best_params_))
print("Tuned Logistic Regression Accuracy: {}".format(logreg_cv.best_score_))
# ### --------------------------------------------------------
# # # # ------>>>> Hold-out set in practice II: Regression
# Import necessary modules
from sklearn.linear_model import ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
# Create train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.40, random_state = 42)
# Create the hyperparameter grid
l1_space = np.linspace(0, 1, 30)
param_grid = {'l1_ratio': l1_space}
# Instantiate the ElasticNet regressor: elastic_net
elastic_net = ElasticNet()
# Setup the GridSearchCV object: gm_cv
gm_cv = GridSearchCV(elastic_net, param_grid, cv=5)
# Fit it to the training data
gm_cv.fit(X_train, y_train)
# Predict on the test set and compute metrics
y_pred = gm_cv.predict(X_test)
r2 = gm_cv.score(X_test, y_test)
mse = mean_squared_error(y_test, y_pred)
print("Tuned ElasticNet l1 ratio: {}".format(gm_cv.best_params_))
print("Tuned ElasticNet R squared: {}".format(r2))
print("Tuned ElasticNet MSE: {}".format(mse))
# ### --------------------------------------------------------
# # # # ------>>>> Exploring categorical features
# Import pandas
import pandas as pd
# Read 'gapminder.csv' into a DataFrame: df
df = pd.read_csv('gapminder.csv')
# Create a boxplot of life expectancy per region
df.boxplot('life', 'Region', rot=60)
# Show the plot
plt.show()
# ### --------------------------------------------------------
# # # # ------>>>> Creating dummy variables
# Create dummy variables: df_region
df_region = pd.get_dummies(df)
# Print the columns of df_region
print(df_region.columns)
# Create dummy variables with drop_first=True: df_region
df_region = | pd.get_dummies(df, drop_first=True) | pandas.get_dummies |
from ast import literal_eval
import numpy as np
import pandas as pd
import scipy
from pandas import DataFrame
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.neighbors import BallTree, KDTree, NearestNeighbors
from sklearn.preprocessing import MultiLabelBinarizer, Normalizer
from tqdm import tqdm
def parse_json(filename_python_json: str, read_max: int = -1) -> DataFrame:
"""Parses json file into a DataFrame
Args:
filename_python_json (str): Path to json file
read_max (int, optional): Max amount of lines to read from json file. Defaults to -1.
Returns:
DataFrame: DataFrame from parsed json
"""
with open(filename_python_json, "r", encoding="utf-8") as f:
# parse json
parse_data = []
# tqdm is for showing progress bar, always good when processing large amounts of data
for line in tqdm(f):
# load python nested datastructure
parsed_result = literal_eval(line)
parse_data.append(parsed_result)
if read_max != -1 and len(parse_data) > read_max:
print(f"Break reading after {read_max} records")
break
print(f"Reading {len(parse_data)} rows.")
# create dataframe
df = DataFrame.from_dict(parse_data)
return df
# TODO: use seed for SVD
class ContentBasedRec(object):
def __init__(self, items_path: str, sparse: bool = True, distance_metric='minkowski', dim_red=None, tfidf='default', use_feedback=True, normalize=False) -> None:
"""Content based recommender
Args:
items_path (str): Path to json file containing the items
sparse (bool, optional): If recommender uses a sparse representation. Defaults to True.
distance_metric (str, optional): Which distance metric to use. Defaults to 'minkowski'.
dim_red ([type], optional): Which dimensionality reduction to use. Defaults to None.
tfidf (str, optional): Which tf-idf method to use. Defaults to 'default'.
use_feedback (bool, optional): Consider positive/negative reviews. Defaults to True.
"""
super().__init__()
self.sparse = sparse
self.dim_red = dim_red
self.use_feedback = use_feedback
self.normalize = normalize
self.items = self._generate_item_features(parse_json(items_path))
self.recommendations = None
self.normalizer = Normalizer(copy=False)
# Select tf-idf method to use
self.tfidf = None
if tfidf == 'default':
self.tfidf = TfidfTransformer(smooth_idf=False, sublinear_tf=False)
elif tfidf == 'smooth':
self.tfidf = TfidfTransformer(smooth_idf=True, sublinear_tf=False)
elif tfidf == 'sublinear':
self.tfidf = TfidfTransformer(smooth_idf=False, sublinear_tf=True)
elif tfidf == 'smooth_sublinear':
self.tfidf = TfidfTransformer(smooth_idf=True, sublinear_tf=True)
# Select algorithm to use for neighbour computation
algorithm = 'auto'
if distance_metric in BallTree.valid_metrics:
algorithm = 'ball_tree'
elif distance_metric in KDTree.valid_metrics:
algorithm = 'kd_tree'
self.method = NearestNeighbors(n_neighbors=10, algorithm=algorithm, metric=distance_metric)
def _generate_item_features(self, items: DataFrame) -> DataFrame:
"""Generates feature vector of items and appends to returned DataFrame
Args:
items (DataFrame): dataframe containing the items
Returns:
DataFrame: dataframe with feature vector appended
"""
items.drop(["publisher", "app_name", "title", "url", "release_date", "discount_price", "reviews_url",
"price", "early_access", "developer", "sentiment", "metascore", "specs"], axis=1, inplace=True)
items.dropna(subset=["id"], inplace=True)
items = items.reset_index(drop=True)
# Combine genres, tags and specs into one column
items["genres"] = items["genres"].fillna("").apply(set)
items["tags"] = items["tags"].fillna("").apply(set)
items["tags"] = items.apply(lambda x: list(
set.union(x["genres"], x["tags"])), axis=1)
items = items.drop(["genres"], axis=1)
# Compute one-hot encoded vector of tags
mlb = MultiLabelBinarizer(sparse_output=self.sparse)
if self.sparse:
items = items.join(DataFrame.sparse.from_spmatrix(mlb.fit_transform(items.pop(
"tags")), index=items.index, columns=["tag_" + c for c in mlb.classes_]))
else:
items = items.join(DataFrame(mlb.fit_transform(items.pop(
"tags")), index=items.index, columns=["tag_" + c for c in mlb.classes_]))
return items
def generate_recommendations(self, data_path: str, amount=10, read_max=None) -> None:
"""Generate recommendations based on user review data
Args:
data_path (str): User review data
amount (int, optional): Amount of times to recommend. Defaults to 10.
read_max (int, optional): Max amount of users to read. Defaults to None.
"""
items = self.items
df = parse_json(data_path) if read_max is None else parse_json(data_path, read_max=read_max)
df.drop(df[~df["reviews"].astype(bool)].index,inplace=True) # filter out empty reviews
# Process reviews
df = df.explode("reviews", ignore_index=True)
df = pd.concat([df.drop(["reviews", "user_url"], axis=1), pd.json_normalize(df.reviews)],
axis=1).drop(["funny", "helpful", "posted", "last_edited", "review"], axis=1)
df = df.groupby("user_id").agg(list).reset_index()
# Drop id so only feature vector is left
if self.sparse:
X = scipy.sparse.csr_matrix(items.drop(["id"], axis=1).values)
else:
X = np.array(items.drop(["id"], axis=1).values)
if self.tfidf:
# Use tf-idf
X = self.tfidf.fit_transform(X)
if self.dim_red:
# Use dimensionality reduction
X = self.dim_red.fit_transform(X)
if self.normalize:
X = self.normalizer.fit_transform(X)
# Combine transformed feature vector back into items
if self.sparse:
items = pd.concat([items["id"], | DataFrame.sparse.from_spmatrix(X) | pandas.DataFrame.sparse.from_spmatrix |
# Bulding futures_bundle
import pandas as pd
from os import listdir
from tqdm import tqdm # Used for progress bar
# Change the path to where you have your data
base_path = "/Users/dmitrymikhaylov/Documents/code/fin/testing_clenow/data/"
data_path = base_path + 'random_futures/'
meta_path = 'futures_meta/meta.csv'
futures_lookup = pd.read_csv(base_path + meta_path, index_col=0)
"""
The ingest function needs to have this exact signature,
meaning these arguments passed, as shown below.
"""
def random_futures_data(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
# Get list of files from path
# Slicing off the last part
# 'example.csv'[:-4] = 'example'
symbols = [f[:-4] for f in listdir(data_path)]
if not symbols:
raise ValueError("No symbols found in folder.")
# Prepare an empty DataFrame for dividends
divs = pd.DataFrame(columns=['sid',
'amount',
'ex_date',
'record_date',
'declared_date',
'pay_date']
)
# Prepare an empty DataFrame for splits
splits = pd.DataFrame(columns=['sid',
'ratio',
'effective_date']
)
# Prepare an empty DataFrame for metadata
metadata = pd.DataFrame(columns=('start_date',
'end_date',
'auto_close_date',
'symbol',
'root_symbol',
'expiration_date',
'notice_date',
'tick_size',
'exchange'
)
)
# Check valid trading dates, according to the selected exchange calendar
sessions = calendar.sessions_in_range(start_session, end_session)
# Get data for all stocks and write to Zipline
daily_bar_writer.write(
process_futures(symbols, sessions, metadata)
)
adjustment_writer.write(splits=splits, dividends=divs)
# Prepare root level metadata
root_symbols = futures_lookup.copy()
root_symbols['root_symbol_id'] = root_symbols.index.values
del root_symbols['minor_fx_adj']
#write the meta data
asset_db_writer.write(futures=metadata, root_symbols=root_symbols)
def process_futures(symbols, sessions, metadata):
# Loop the stocks, setting a unique Security ID (SID)
sid = 0
# Loop the symbols with progress bar, using tqdm
for symbol in tqdm(symbols, desc='Loading data...'):
sid += 1
# Read the stock data from csv file.
df = pd.read_csv('{}/{}.csv'.format(data_path, symbol), index_col=[0], parse_dates=[0])
# Check for minor currency quotes
adjustment_factor = futures_lookup.loc[
futures_lookup['root_symbol'] == df.iloc[0]['root_symbol']
]['minor_fx_adj'].iloc[0]
df['open'] *= adjustment_factor
df['high'] *= adjustment_factor
df['low'] *= adjustment_factor
df['close'] *= adjustment_factor
# Avoid potential high / low data errors in data set
# And apply minor currency adjustment for USc quotes
df['high'] = df[['high', 'close']].max(axis=1)
df['low'] = df[['low', 'close']].min(axis=1)
df['high'] = df[['high', 'open']].max(axis=1)
df['low'] = df[['low', 'open']].min(axis=1)
# Synch to the official exchange calendar
df = df.reindex(sessions.tz_localize(None))[df.index[0]:df.index[-1] ]
# Forward fill missing data
df.fillna(method='ffill', inplace=True)
# Drop remaining NaN
df.dropna(inplace=True)
# Cut dates before 2000, avoiding Zipline issue
df = df['2000-01-01':]
# Prepare contract metadata
make_meta(sid, metadata, df, sessions)
del df['openinterest']
del df['expiration_date']
del df['root_symbol']
del df['symbol']
yield sid, df
def make_meta(sid, metadata, df, sessions):
# Check first and last date.
start_date = df.index[0]
end_date = df.index[-1]
# The auto_close date is the day after the last trade.
ac_date = end_date + | pd.Timedelta(days=1) | pandas.Timedelta |
##################################
# #
# Leveraged product scrapers #
# oliverk1 #
# July 2019 #
# #
##################################
# Import packages and setup time
from selenium import webdriver
import pandas as pd
import time, datetime, traceback, pyodbc, re
from statistics import mean
start_time = time.time()
# Setup Chrome window
driver = webdriver.Chrome("chromedriver.exe")
def ing():
# Setup ticker names and DataFrame
ing_tickers = ['adyen','amg','bam','besi','fugro','galapagos','pharming','post-nl','randstad','tomtom']
totalresult = pd.DataFrame(columns=['ISIN', 'Name', 'Bid', 'Ask', 'Leverage', 'StopLoss', 'FinancingLevel', 'ReferenceLevel', 'Ratio'])
# Accept cookies and disclaimer
driver.get('https://www.ingsprinters.nl/markten/aandelen/' + ing_tickers[0] + "/page/1")
time.sleep(1)
driver.find_element_by_xpath("//*[contains(text(), 'Akkoord')]").click()
time.sleep(1)
buttons = driver.find_elements_by_xpath("//*[contains(text(), 'akkoord')]")
buttons[1].click()
time.sleep(1)
for ticker in ing_tickers:
driver.get('https://www.ingsprinters.nl/markten/aandelen/' + ticker + '/page/1')
# Make sure all products are shown by clicking a
show_alls = driver.find_elements_by_xpath("//span[@class='js-label']")
for i in show_alls:
i.click()
time.sleep(1)
# Obtain productcodes from page by selecting links that contain "NL00"
elems = driver.find_elements_by_xpath("//a[@href]")
productcodes = []
for elem in elems:
link = elem.get_attribute("href")
if "NL00" in link:
productcode = link.split("/")[-1]
productcodes.append(productcode)
productcodes = list(set(productcodes))
productnames = []
isins = []
bids = []
asks = []
stoplosslevels = []
leverages = []
referencelevels = []
ratios = []
financinglevels = []
for code in productcodes:
driver.get('https://www.ingsprinters.nl/markten/aandelen/' + ticker + '/' + code)
# Append productname
productnames.append(driver.find_element_by_xpath("//h1[@class='text-body']").text.split('\n')[0])
# Append ISIN
isins.append(code)
top_row = driver.find_element_by_xpath("//div[@class='cell']").text.split('\n')
time.sleep(0.5)
# Append bid and ask
bids.append(float(top_row[1].replace(",",".")))
asks.append(float(top_row[4].replace(",",".")))
# Leverage
leverages.append(float(top_row[10].replace(",",".")))
# Stoploss
stoplosslevels.append(float(top_row[12].replace(",",".")[1:]))
# Referencelevel
if "€" in top_row[14]:
referencelevels.append(float(top_row[14].split(" ")[1].replace(",",".")))
else:
referencelevels.append(float(top_row[14].split(" ")[0].replace(",",".")))
side_row = driver.find_elements_by_xpath("//div[@class='card']")[3].text.split('\n')
# FinancingLevel
financinglevels.append(float(side_row[3][1:].replace(",",".")))
# Ratio
ratios.append(float(side_row[9].replace(",",".")))
# Place results in DataFrame and concatenate results
result = pd.DataFrame({'ISIN': isins, 'Name': productnames, 'Bid': bids, 'Ask': asks, 'Leverage': leverages, 'StopLoss': stoplosslevels, 'FinancingLevel': financinglevels, 'ReferenceLevel': referencelevels, 'Ratio': ratios})
totalresult = | pd.concat([totalresult, result], axis=0, sort=False) | pandas.concat |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box):
# GH#18849
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box):
# GH#18824, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box_df_fail):
# GH#18849
box = box_df_fail
box2 = Series if box in [pd.Index, tm.to_array] else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox,
box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# TODO: Moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns'])
def test_timedelta64_conversions(self, m, unit):
startdate = Series(pd.date_range('2013-01-01', '2013-01-03'))
enddate = Series(pd.date_range('2013-03-01', '2013-03-03'))
ser = enddate - startdate
ser[2] = np.nan
# op
expected = Series([x / np.timedelta64(m, unit) for x in ser])
result = ser / np.timedelta64(m, unit)
tm.assert_series_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x
for x in ser])
result = np.timedelta64(m, unit) / ser
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 1.0))
expected = tm.box_expected(expected, xbox)
result = idx * Series(rng5f + 1.0)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_with_array):
box = box_with_array
xbox = get_upcast_box(box, other)
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
expected._data.freq = None
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__, __rdiv__
def test_td64arr_div_nat_invalid(self, box_with_array):
# don't allow division by NaT (maybe could in the future)
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError,
match="'?true_divide'? cannot use operands"):
rng / pd.NaT
with pytest.raises(TypeError, match='Cannot divide NaTType by'):
pd.NaT / rng
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
rng = timedelta_range('1 days', '10 days',)
rng = tm.box_expected(rng, box_with_array)
other = np.timedelta64('NaT')
expected = np.array([np.nan] * 10)
expected = tm.box_expected(expected, box_with_array)
result = rng / other
tm.assert_equal(result, expected)
result = other / rng
tm.assert_equal(result, expected)
def test_td64arr_div_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx / 1
tm.assert_equal(result, idx)
with pytest.raises(TypeError, match='Cannot divide'):
# GH#23829
1 / idx
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours,
box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
expected = pd.Float64Index([12, np.nan, 24])
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
other = np.array([2, 4, 2], dtype='m8[h]')
result = rng / other
tm.assert_equal(result, expected)
result = rng / tm.box_expected(other, box_with_array)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
tm.assert_equal(result, expected)
result = rng / list(other)
tm.assert_equal(result, expected)
# reversed op
expected = 1 / expected
result = other / rng
tm.assert_equal(result, expected)
result = tm.box_expected(other, box_with_array) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
tm.assert_equal(result, expected)
result = list(other) / rng
tm.assert_equal(result, expected)
def test_tdarr_div_length_mismatch(self, box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
with pytest.raises(ValueError):
rng / other
with pytest.raises(ValueError):
other / rng
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = td1 // scalar_td
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = scalar_td // td1
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box_with_array,
scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx // 1
tm.assert_equal(result, idx)
pattern = ('floor_divide cannot use operands|'
'Cannot divide int by Timedelta*')
with pytest.raises(TypeError, match=pattern):
1 // idx
def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi // two_hours
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_with_array):
# GH#19125
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# mod, divmod
# TODO: operations with timedelta-like arrays, numeric arrays,
# reversed ops
def test_td64arr_mod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range('1 Day', '9 days')
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(['1 Day', '2 Days', '0 Days'] * 3)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % three_days
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, three_days)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // three_days)
def test_td64arr_mod_int(self, box_with_array):
tdi = timedelta_range('1 ns', '10 ns', periods=10)
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(['1 ns', '0 ns'] * 5)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % 2
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
2 % tdarr
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, 2)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // 2)
def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range('1 Day', '9 days')
tdarr = tm.box_expected(tdi, box_with_array)
expected = ['0 Days', '1 Day', '0 Days'] + ['3 Days'] * 6
expected = TimedeltaIndex(expected)
expected = tm.box_expected(expected, box_with_array)
result = three_days % tdarr
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(three_days, tdarr)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], three_days // tdarr)
# ------------------------------------------------------------------
# Operations with invalid others
def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with pytest.raises(TypeError, match=pattern):
td1 * scalar_td
with pytest.raises(TypeError, match=pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx[:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box_with_array, one):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(['118 Days', '118 Days', 'NaT'],
dtype='timedelta64[ns]')
expected = tm.box_expected(expected, box_with_array)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('two', [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box_with_array, two):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser / two
| tm.assert_equal(result, expected) | pandas.util.testing.assert_equal |
"""
.. _logs:
Log File Analysis (experimental)
================================
Logs contain very detailed information about events happening on computers.
And the extra details that they provide, come with additional complexity that
we need to handle ourselves. A pageview may contain many log lines, and a
session can consist of several pageviews for example.
Another important characterisitic of log files is that their are usualy not
big.
They are massive.
So, we also need to cater for their large size, as well as rapid changes.
TL;DR
>>> import advertools as adv
>>> import pandas as pd
>>> adv.logs_to_df(log_file='access.log',
... output_file='access_logs.parquet',
... errors_file='log_errors.csv',
... log_format='common',
... fields=None)
>>> logs_df = pd.read_parquet('access_logs.parquet')
How to run the :func:`logs_to_df` function:
-------------------------------------------
* ``log_file``: The path to the log file you are trying to analyze.
* ``output_file``: The path to where you want the parsed and compressed file
to be saved. Only the `parquet` format is supported.
* ``errors_file``: You will almost certainly have log lines that don't conform
to the format that you have, so all lines that weren't properly parsed would
go to this file. This file also contains the error messages, so you know what
went wrong, and how you might fix it. In some cases, you might simply take
these "errors" and parse them again. They might not be really errors, but
lines in a different format, or temporary debug messages.
* ``log_format``: The format in which your logs were formatted. Logs can (and
are) formatted in many ways, and there is no right or wrong way. However,
there are defaults, and a few popular formats that most servers use. It is
likely that your file is in one of the popular formats. This parameter can
take any one of the pre-defined formats, for example "common", or "extended",
or a regular expression that you provide. This means that you can parse any
log format (as long as lines are single lines, and not formatted in JSON).
* ``fields``: If you selected one of the supported formats, then there is no
need to provide a value for this parameter. You have to provide a list of
fields in case you provide a custom (regex) format. The fields will become
the names of the columns of the resulting DataFrame, so you can distinguish
between them (client, time, status code, response size, etc.)
Supported Log Formats
---------------------
* `common`
* `combined` (a.k.a "extended")
* `common_with_vhost`
* `nginx_error`
* `apache_error`
Parse and Analyze Crawl Logs in a Dataframe
===========================================
While crawling with the :func:`crawl` function, the process produces logs for
every page crawled, scraped, redirected, and even blocked by robots.txt rules.
By default, those logs are can be seen on the command line as their default
destination is stdout.
A good practice is to set a ``LOG_FILE`` so you can save those logs to a text
file, and review them later. There are several reasons why you might want to do
that:
* Blocked URLs: The crawler obeys robots.txt rules by default, and when it
encounters pages that it shouldn't crawl, it doesn't. However, this is logged
as an event, and you can easily extract a list of blocked URLs from the logs.
* Crawl errors: You might also get some errors while crawling, and it can be
interesting to know which URLs generated errors.
* Filtered pages: Those are pages that were discovered but weren't crawled
because they are not a sub-domain of the provided url_list, or happen to be
on external domains altogether.
This can simply be done by specifying a file name through the optional
`custom_settings` parameter of ``crawl``:
>>> import advertools as adv
>>> adv.crawl('https://example.com',
output_file='example.jl',
follow_links=True,
custom_settings={'LOG_FILE': 'example.log'})
If you run it this way, all logs will be saved to the file you chose,
`example.log` in this case.
Now, you can use the :func:`crawllogs_to_df` function to open the logs in a
DataFrame:
>>> import advertools as adv
>>> logs_df = adv.crawllogs_to_df('example.log')
The DataFrame might contain the following columns:
* `time`: The timestamp for the process
* `middleware`: The middleware responsible for this process, whether it is the
core engine, the scraper, error handler and so on.
* `level`: The logging level (DEBUG, INFO, etc.)
* `message`: A single word summarizing what this row represents, "Crawled",
"Scraped", "Filtered", and so on.
* `domain`: The domain name of filtered (not crawled pages) typically for URLs
outside the current website.
* `method`: The HTTP method used in this process (GET, PUT, etc.)
* `url`: The URL currently under process.
* `status`: HTTP status code, 200, 404, etc.
* `referer`: The referring URL, where applicable.
* `method_to`: In redirect rows the HTTP method used to crawl the URL going to.
* `redirect_to`: The URL redirected to.
* `method_from`: In redirect rows the HTTP method used to crawl the URL coming
from.
* `redirect_from`: The URL redirected from.
* `blocked_urls`: The URLs that were not crawled due to robots.txt rules.
"""
import os
import re
from pathlib import Path
from tempfile import TemporaryDirectory
import pandas as pd
LOG_FORMATS = {
'common': r'^(?P<client>\S+) \S+ (?P<userid>\S+) \[(?P<datetime>[^\]]+)\] "(?P<method>[A-Z]+) (?P<request>[^ "]+)? HTTP/[0-9.]+" (?P<status>[0-9]{3}) (?P<size>[0-9]+|-)$',
'combined': r'^(?P<client>\S+) \S+ (?P<userid>\S+) \[(?P<datetime>[^\]]+)\] "(?P<method>[A-Z]+) (?P<request>[^ "]+)? HTTP/[0-9.]+" (?P<status>[0-9]{3}) (?P<size>[0-9]+|-) "(?P<referrer>[^"]*)" "(?P<useragent>[^"]*)"$',
'common_with_vhost': r'^(?P<vhost>\S+) (?P<client>\S+) \S+ (?P<userid>\S+) \[(?P<datetime>[^\]]+)\] "(?P<method>[A-Z]+) (?P<request>[^ "]+)? HTTP/[0-9.]+" (?P<status>[0-9]{3}) (?P<size>[0-9]+|-)$',
'nginx_error': r'^(?P<datetime>\d{4}/\d\d/\d\d \d\d:\d\d:\d\d) \[(?P<level>[^\]]+)\] (?P<pid>\d+)#(?P<tid>\d+): (?P<counter>\*\d+ | )?(?P<message>.*)',
'apache_error': r'^(?P<datetime>\[[^\]]+\]) (?P<level>\[[^\]]+\]) \[pid (?P<pid>\d+)\] (?P<file>\S+):(?P<status> \S+| ):? \[client (?P<client>\S+)\] (?P<message>.*)',
}
LOG_FIELDS = {
'common': ['client', 'userid', 'datetime', 'method', 'request', 'status',
'size'],
'combined': ['client', 'userid', 'datetime', 'method', 'request', 'status',
'size', 'referer', 'user_agent'],
'common_with_vhost': ['virtual_host', 'client', 'userid', 'datetime',
'method', 'request', 'status', 'size'],
'nginx_error': ['datetime', 'level', 'process_id', 'thread_id', 'counter',
'message'],
'apache_error': ['datetime', 'level', 'process_id', 'file', 'status',
'client', 'message'],
}
def logs_to_df(log_file, output_file, errors_file, log_format, fields=None):
"""Parse and compress any log file into a DataFrame format.
Convert a log file to a `parquet` file in a DataFrame format, and save all
errors (or lines not conformig to the chosen log format) into a separate
``errors_file`` text file. Any non-JSON log format is possible, provided
you have the right regex for it. A few default ones are provided and can be
used. Check out ``adv.LOG_FORMATS`` and ``adv.LOG_FIELDS`` for the
available formats and fields.
>>> import advertools as adv
>>> import pandas as pd
>>> adv.logs_to_df(log_file='access.log',
... output_file='access_logs.parquet',
... errors_file='log_errors.csv',
... log_format='common',
... fields=None)
>>> logs_df = pd.read_parquet('access_logs.parquet')
You can now analyze ``logs_df`` as a normal pandas DataFrame.
:param str log_file: The path to the log file.
:param str output_file: The path to the desired output file. Must have a
".parquet" extension, and must not have the same
path as an existing file.
:param str errors_file: The path where the parsing errors are stored. Any
text format works, CSV is recommended to easily
open it with any CSV reader with the separator as
"@@".
:param str log_format: Either the name of one of the supported log formats,
or a regex of your own format.
:param str fields: A list of fields, which will become the names of columns
in ``output_file``. Only required if you provide a
custom (regex) ``log_format``.
"""
if not output_file.endswith('.parquet'):
raise ValueError("Please provide an `output_file` with a `.parquet` "
"extension.")
for file in [output_file, errors_file]:
if os.path.exists(file):
raise ValueError(f"The file '{file}' already exists. "
"Please rename it, delete it, or choose another "
"file name/path.")
regex = LOG_FORMATS.get(log_format) or log_format
columns = fields or LOG_FIELDS[log_format]
with TemporaryDirectory() as tempdir:
tempdir_name = Path(tempdir)
with open(log_file) as source_file:
linenumber = 0
parsed_lines = []
for line in source_file:
linenumber += 1
try:
log_line = re.findall(regex, line)[0]
parsed_lines.append(log_line)
except Exception as e:
with open(errors_file, 'at') as err:
err_line = line[:-1] if line.endswith('\n') else line
print('@@'.join([str(linenumber), err_line, str(e)]),
file=err)
pass
if linenumber % 250_000 == 0:
print(f'Parsed {linenumber:>15,} lines.', end='\r')
df = pd.DataFrame(parsed_lines, columns=columns)
df.to_parquet(tempdir_name / f'file_{linenumber}.parquet')
parsed_lines.clear()
else:
print(f'Parsed {linenumber:>15,} lines.', end='\r')
df = pd.DataFrame(parsed_lines, columns=columns)
df.to_parquet(tempdir_name / f'file_{linenumber}.parquet')
final_df = pd.read_parquet(tempdir_name)
try:
final_df['status'] = final_df['status'].astype('category')
final_df['method'] = final_df['method'].astype('category')
final_df['size'] = pd.to_numeric(final_df['size'],
downcast='signed')
except KeyError:
pass
final_df.to_parquet(output_file)
def crawllogs_to_df(logs_file_path):
"""Convert a crawl logs file to a DataFrame.
An interesting option while using the ``crawl`` function, is to specify a
destination file to save the logs of the crawl process itself. This contains
additional information about each crawled, scraped, blocked, or redirected
URL.
What you would most likely use this for is to get a list of URLs blocked by
robots.txt rules. These can be found und the column ``blocked_urls``.
Crawling errors are also interesting, and can be found in rows where
``message`` is equal to "error".
>>> import advertools as adv
>>> adv.crawl('https://example.com',
output_file='example.jl',
follow_links=True,
custom_settings={'LOG_FILE': 'example.log'})
>>> logs_df = adv.crawl_logs_to_df('example.log')
:param str logs_file_path: The path to the logs file.
:returns DataFrame crawl_logs_df: A DataFrame summarizing the logs.
"""
time_middleware_level = "(\d{4}-\d\d-\d\d \d\d:\d\d:\d\d) \[(.*?)\] ([A-Z]+): "
time_middleware_level_error = "(\d{4}-\d\d-\d\d \d\d:\d\d:\d\d) \[(.*?)\] (ERROR): "
filtered_regex = time_middleware_level + "(Filtered) offsite request to '(.*?)': <([A-Z]+) (.*?)>"
filtered_cols = ['time', 'middleware', 'level', 'message', 'domain', 'method', 'url']
crawled_regex = time_middleware_level + "(Crawled) \((\d\d\d)\) <([A-Z]+) (.*?)> \(referer: (.*?)\)"
crawled_cols = ['time', 'middleware', 'level', 'message', 'status', 'method', 'url', 'referer']
scraped_regex = time_middleware_level + "(Scraped) from <(\d\d\d) (.*?)>"
scraped_cols = ['time', 'middleware', 'level', 'message', 'status', 'url']
redirect_regex = time_middleware_level + "(Redirect)ing \((\d\d\d)\) to <([A-Z]+) (.*?)> from <([A-Z]+) (.*?)>"
redirect_cols = ['time', 'middleware', 'level', 'message', 'status', 'method_to', 'redirect_to', 'method_from', 'redirect_from']
blocked_regex = time_middleware_level + "(Forbidden) by robots\.txt: <([A-Z]+) (.*?)>"
blocked_cols = ['time', 'middleware', 'level', 'message', 'method', 'blocked_urls']
error_regex = time_middleware_level + "Spider (error) processing <([A-Z]+) (.*?)> \(referer: (.*?)\)"
error_cols = ['time', 'middleware', 'level', 'message', 'method', 'url', 'referer']
error_level_regex = time_middleware_level_error + '(.*)? (\d\d\d) (http.*)'
error_level_cols = ['time', 'middleware', 'level', 'message', 'status', 'url']
filtered_lines = []
crawled_lines = []
scraped_lines = []
redirect_lines = []
blocked_lines = []
error_lines = []
error_lvl_lines = []
with open(logs_file_path) as file:
for line in file:
if re.findall(filtered_regex, line):
filtered_lines.append(re.findall(filtered_regex, line)[0])
if re.findall(crawled_regex, line):
crawled_lines.append(re.findall(crawled_regex, line)[0])
if re.findall(scraped_regex, line):
scraped_lines.append(re.findall(scraped_regex, line)[0])
if re.findall(redirect_regex, line):
redirect_lines.append(re.findall(redirect_regex, line)[0])
if re.findall(blocked_regex, line):
blocked_lines.append(re.findall(blocked_regex, line)[0])
if re.findall(error_regex, line):
error_lines.append(re.findall(error_regex, line)[0])
if re.findall(error_level_regex, line):
error_lvl_lines.append(re.findall(error_level_regex, line)[0])
final_df = pd.concat([
pd.DataFrame(filtered_lines, columns=filtered_cols),
pd.DataFrame(crawled_lines, columns=crawled_cols),
| pd.DataFrame(scraped_lines, columns=scraped_cols) | pandas.DataFrame |
####
#### Jan 10, 2022
####
"""
This is created after the meeting on Jan, 10, 2022.
Changes made to the previous version:
a. Vertical lines for time reference
b. Add area of fields to the title of the plots.
c. In the title break AdamBenton2016 to one county!
d. make the previous and next auxiliary years gray backgound.
"""
import csv
import numpy as np
import pandas as pd
import datetime
from datetime import date
import time
import scipy
import scipy.signal
import os, os.path
from patsy import cr
# from pprint import pprint
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sb
from pandas.plotting import register_matplotlib_converters
from matplotlib.dates import ConciseDateFormatter
from datetime import datetime
register_matplotlib_converters()
import sys
start_time = time.time()
# search path for modules
# look @ https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
####################################################################################
###
### Aeolus Core path
###
####################################################################################
sys.path.append('/home/hnoorazar/NASA/')
import NASA_core as nc
import NASA_plot_core as ncp
####################################################################################
###
### Parameters
###
####################################################################################
county = sys.argv[1]
print (county)
####################################################################################
###
### Aeolus Directories
###
####################################################################################
param_dir = "/data/hydro/users/Hossein/NASA/000_shapefile_data_part/"
raw_dir = "/data/hydro/users/Hossein/NASA/01_raw_GEE/"
data_dir = "/data/hydro/users/Hossein/NASA/05_SG_TS/"
SOS_plot_dir = "/data/hydro/users/Hossein/NASA/06_SOS_plots/"
eval_dir = "/data/hydro/users/Hossein/NASA/0000_parameters/"
print ("_________________________________________________________")
print ("data dir is: " + data_dir)
print ("_________________________________________________________")
####################################################################################
###
### Read data
###
####################################################################################
if county == "Monterey2014":
raw_names = ["L7_T1C2L2_Scaled_Monterey2014_2013-01-01_2016-01-01.csv",
"L8_T1C2L2_Scaled_Monterey2014_2013-01-01_2016-01-01.csv"]
elif county == "AdamBenton2016":
raw_names = ["L7_T1C2L2_Scaled_AdamBenton2016_2015-01-01_2017-10-14.csv",
"L8_T1C2L2_Scaled_AdamBenton2016_2015-01-01_2017-10-14.csv"]
elif county == "FranklinYakima2018":
raw_names = ["L7_T1C2L2_Scaled_FranklinYakima2018_2017-01-01_2019-10-14.csv",
"L8_T1C2L2_Scaled_FranklinYakima2018_2017-01-01_2019-10-14.csv"]
elif county == "Grant2017":
raw_names = ["L7_T1C2L2_Scaled_Grant2017_2016-01-01_2018-10-14.csv",
"L8_T1C2L2_Scaled_Grant2017_2016-01-01_2018-10-14.csv"]
elif county == "Walla2015":
raw_names = ["L7_T1C2L2_Scaled_Walla2015_2014-01-01_2016-12-31.csv",
"L8_T1C2L2_Scaled_Walla2015_2014-01-01_2016-12-31.csv"]
SF_data_name = county + ".csv"
SG_df_NDVI = pd.read_csv(data_dir + "SG_" + county + "_NDVI_JFD.csv")
SG_df_EVI = pd.read_csv(data_dir + "SG_" + county + "_EVI_JFD.csv")
eval_tb = pd.read_csv(eval_dir + "evaluation_set.csv")
if county == "AdamBenton2016":
eval_tb = eval_tb[eval_tb.county.isin(["Adams", "Benton"])]
elif county == "FranklinYakima2018":
eval_tb = eval_tb[eval_tb.county.isin(["Franklin", "Yakima"])]
elif county == "Grant2017":
eval_tb = eval_tb[eval_tb.county == "Grant"]
elif county == "Walla2015":
eval_tb = eval_tb[eval_tb.county == "Walla Walla"]
# convert the strings to datetime format
SG_df_NDVI['human_system_start_time'] = | pd.to_datetime(SG_df_NDVI['human_system_start_time']) | pandas.to_datetime |
'''
Input event payload expected to be in the following format:
{
"Batch_start": "MAC000001",
"Batch_end": "MAC000010",
"Data_start": "2013-06-01",
"Data_end": "2014-01-01",
"Forecast_period": 7
}
'''
import boto3, os
import json
import pandas as pd
import numpy as np
from pyathena import connect
REGION = os.environ['AWS_REGION']
ATHENA_OUTPUT_BUCKET = os.environ['Athena_bucket']
S3_BUCKET = os.environ['Working_bucket']
DB_SCHEMA = os.environ['Db_schema']
ATHENA_CONNECTION = connect(s3_staging_dir='s3://{}/'.format(ATHENA_OUTPUT_BUCKET), region_name=REGION)
S3 = boto3.resource('s3')
def get_meters(connection, start, end, db_schema):
selected_households = '''select distinct meter_id
from "{}".daily where meter_id between '{}' and '{}' order by meter_id;
'''.format(db_schema, start, end)
df_meters = | pd.read_sql(selected_households, connection) | pandas.read_sql |
from requests import get
import datetime
import pandas as pd
from starlingpy.StarlingAPIs import Account_APIs
BASE_PATH = "https://api.starlingbank.com/api/v2/"
class TransactionHistory:
"""
A history of transactions associated with the Starling account, between stipulated datetimes.
Requires the StarlingAccount object to be passed
"""
def __init__(self,Account,**kwargs):
self.associated_Account = Account
output = Account.get_transactions(**kwargs)
self.start_date , self.end_date = output[0] , output[1]
self.transaction_List = output[2]
self.full_Dataframe = self.generate_transaction_dataframe(**kwargs)
self.summary_Dataframe = self.summary_transaction_dataframe()
def generate_transaction_dataframe(self,**kwargs):
""" Generates full transaction dataframe between dates """
df = pd.DataFrame(self.transaction_List)
running_balance = self.generate_running_balance_list(self.transaction_List)
df['Balance Before'] = running_balance[1:]
df['Balance After'] = running_balance[:-1]
df["transactionTime"]= | pd.to_datetime(df["transactionTime"]) | pandas.to_datetime |
import pandas as pd
import numpy as np
from tqdm import tqdm
#读取轨迹数据
i = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
user = pd.read_csv(r"G:\track data and travel prediction\dataset\DataTech_Travel_Train_User",
sep='|', names=['USER_ID', 'FLAG', 'TRAVEL_TYPE'])
#user = user.sample(100)
userid = list(user['USER_ID'])
dataset_sample = pd.DataFrame(columns=["USER_ID", "START_TIME", "LONGITUDE", "LATITUDE", "P_MONTH"])
for number in tqdm(i):
filename = 'G:/track data and travel prediction/dataset/DataTech_Public_Trace/DataTech_Public_Trace_'
filename += number
dataset = | pd.read_csv(filename, sep='|', names=["USER_ID", "START_TIME", "LONGITUDE", "LATITUDE", "P_MONTH"]) | pandas.read_csv |
"""
This script visualises the prevention parameters of the first and second COVID-19 waves.
Arguments:
----------
-f:
Filename of samples dictionary to be loaded. Default location is ~/data/interim/model_parameters/COVID19_SEIRD/calibrations/national/
Returns:
--------
Example use:
------------
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020 by <NAME>, BIOMATH, Ghent University. All Rights Reserved."
# ----------------------
# Load required packages
# ----------------------
import json
import argparse
import datetime
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.transforms import offset_copy
from covid19model.models import models
from covid19model.data import mobility, sciensano, model_parameters
from covid19model.models.time_dependant_parameter_fncs import ramp_fun
from covid19model.visualization.output import _apply_tick_locator
from covid19model.visualization.utils import colorscale_okabe_ito, moving_avg
# covid 19 specific parameters
plt.rcParams.update({
"axes.prop_cycle": plt.cycler('color',
list(colorscale_okabe_ito.values())),
})
# -----------------------
# Handle script arguments
# -----------------------
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--n_samples", help="Number of samples used to visualise model fit", default=100, type=int)
parser.add_argument("-k", "--n_draws_per_sample", help="Number of binomial draws per sample drawn used to visualize model fit", default=1, type=int)
args = parser.parse_args()
#################################################
## PART 1: Comparison of total number of cases ##
#################################################
youth = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())
cases_youth_nov21 = youth[youth.index == pd.to_datetime('2020-11-21')].values
cases_youth_rel = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())/cases_youth_nov21*100
work = moving_avg((df_sciensano['C_20_29']+df_sciensano['C_30_39']+df_sciensano['C_40_49']+df_sciensano['C_50_59']).to_frame())
cases_work_nov21 = work[work.index == pd.to_datetime('2020-11-21')].values
cases_work_rel = work/cases_work_nov21*100
old = moving_avg((df_sciensano['C_60_69']+df_sciensano['C_70_79']+df_sciensano['C_80_89']+df_sciensano['C_90+']).to_frame())
cases_old_nov21 = old[old.index == pd.to_datetime('2020-11-21')].values
cases_old_rel = old/cases_old_nov21*100
fig,ax=plt.subplots(figsize=(12,4.3))
ax.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax.set_ylim([0,320])
ax.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax = _apply_tick_locator(ax)
ax.set_yticks([0,100,200,300])
ax.grid(False)
plt.tight_layout()
plt.show()
def crosscorr(datax, datay, lag=0):
""" Lag-N cross correlation.
Parameters
----------
lag : int, default 0
datax, datay : pandas.Series objects of equal length
Returns
----------
crosscorr : float
"""
return datax.corr(datay.shift(lag))
lag_series = range(-15,8)
covariance_youth_work = []
covariance_youth_old = []
covariance_work_old = []
for lag in lag_series:
covariance_youth_work.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_youth_old.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_work_old.append(crosscorr(cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariances = [covariance_youth_work, covariance_youth_old, covariance_work_old]
for i in range(3):
n = len(covariances[i])
k = max(covariances[i])
idx=np.argmax(covariances[i])
tau = lag_series[idx]
sig = 2/np.sqrt(n-abs(k))
if k >= sig:
print(tau, k, True)
else:
print(tau, k, False)
fig,(ax1,ax2)=plt.subplots(nrows=2,ncols=1,figsize=(15,10))
# First part
ax1.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax1.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax1.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax1.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax1.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax1.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax1.set_ylim([0,300])
ax1.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax1.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax1 = _apply_tick_locator(ax1)
# Second part
ax2.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax2.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax2.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax2.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax2.axvline(0,linewidth=1, color='black')
ax2.grid(False)
ax2.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax2.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
fig,ax = plt.subplots(figsize=(15,5))
ax.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax.axvline(0,linewidth=1, color='black')
ax.grid(False)
ax.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
#####################################################
## PART 1: Calibration robustness figure of WAVE 1 ##
#####################################################
n_calibrations = 6
n_prevention = 3
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-15.json')), # 2020-04-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-13.json')), # 2020-04-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-23.json')), # 2020-05-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-18.json')), # 2020-05-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-21.json')), # 2020-06-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json')) # 2020-07-01
]
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-03-15'
# Last datapoint used in inference
end_calibrations = ['2020-04-04', '2020-04-15', '2020-05-01', '2020-05-15', '2020-06-01', '2020-07-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2020-07-14'
# ---------
# Load data
# ---------
# Contact matrices
initN, Nc_home, Nc_work, Nc_schools, Nc_transport, Nc_leisure, Nc_others, Nc_total = model_parameters.get_interaction_matrices(dataset='willem_2012')
Nc_all = {'total': Nc_total, 'home':Nc_home, 'work': Nc_work, 'schools': Nc_schools, 'transport': Nc_transport, 'leisure': Nc_leisure, 'others': Nc_others}
levels = initN.size
# Google Mobility data
df_google = mobility.get_google_mobility_data(update=False)
# ---------------------------------
# Time-dependant parameter function
# ---------------------------------
# Extract build contact matrix function
from covid19model.models.time_dependant_parameter_fncs import make_contact_matrix_function, ramp_fun
contact_matrix_4prev, all_contact, all_contact_no_schools = make_contact_matrix_function(df_google, Nc_all)
# Define policy function
def policies_wave1_4prev(t, states, param, l , tau, prev_schools, prev_work, prev_rest, prev_home):
# Convert tau and l to dates
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-09-01') # end of summer holidays
# Define key dates of second wave
t5 = pd.Timestamp('2020-10-19') # lockdown (1)
t6 = pd.Timestamp('2020-11-02') # lockdown (2)
t7 = pd.Timestamp('2020-11-16') # schools re-open
t8 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t9 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t10 = pd.Timestamp('2021-02-15') # Spring break starts
t11 = pd.Timestamp('2021-02-21') # Spring break ends
t12 = pd.Timestamp('2021-04-05') # Easter holiday starts
t13 = pd.Timestamp('2021-04-18') # Easter holiday ends
# ------
# WAVE 1
# ------
if t <= t1:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 < t < t1 + tau_days:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 + tau_days < t <= t1 + tau_days + l_days:
t = pd.Timestamp(t.date())
policy_old = all_contact(t)
policy_new = contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t1)
elif t1 + tau_days + l_days < t <= t2:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t2 < t <= t3:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t3 < t <= t4:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
# ------
# WAVE 2
# ------
elif t4 < t <= t5 + tau_days:
return contact_matrix_4prev(t, school=1)
elif t5 + tau_days < t <= t5 + tau_days + l_days:
policy_old = contact_matrix_4prev(t, school=1)
policy_new = contact_matrix_4prev(t, prev_schools, prev_work, prev_rest,
school=1)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t5)
elif t5 + tau_days + l_days < t <= t6:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t6 < t <= t7:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t7 < t <= t8:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t8 < t <= t9:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t9 < t <= t10:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t10 < t <= t11:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t11 < t <= t12:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t12 < t <= t13:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
else:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Define initial states
initial_states = {"S": initN, "E": np.ones(9)}
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------------
# Define necessary function to plot fit
# -------------------------------------
LL = conf_int/2
UL = 1-conf_int/2
def add_poisson(state_name, output, n_samples, n_draws_per_sample, UL=1-0.05*0.5, LL=0.05*0.5):
data = output[state_name].sum(dim="Nc").values
# Initialize vectors
vector = np.zeros((data.shape[1],n_draws_per_sample*n_samples))
# Loop over dimension draws
for n in range(data.shape[0]):
binomial_draw = np.random.poisson( np.expand_dims(data[n,:],axis=1),size = (data.shape[1],n_draws_per_sample))
vector[:,n*n_draws_per_sample:(n+1)*n_draws_per_sample] = binomial_draw
# Compute mean and median
mean = np.mean(vector,axis=1)
median = np.median(vector,axis=1)
# Compute quantiles
LL = np.quantile(vector, q = LL, axis = 1)
UL = np.quantile(vector, q = UL, axis = 1)
return mean, median, LL, UL
def plot_fit(ax, state_name, state_label, data_df, time, vector_mean, vector_LL, vector_UL, start_calibration='2020-03-15', end_calibration='2020-07-01' , end_sim='2020-09-01'):
ax.fill_between(pd.to_datetime(time), vector_LL, vector_UL,alpha=0.30, color = 'blue')
ax.plot(time, vector_mean,'--', color='blue', linewidth=1.5)
ax.scatter(data_df[start_calibration:end_calibration].index,data_df[state_name][start_calibration:end_calibration], color='black', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax.scatter(data_df[pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim].index,data_df[state_name][pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim], color='red', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax = _apply_tick_locator(ax)
ax.set_xlim(start_calibration,end_sim)
ax.set_ylabel(state_label)
return ax
# -------------------------------
# Visualize prevention parameters
# -------------------------------
# Method 1: all in on page
fig,axes= plt.subplots(nrows=n_calibrations,ncols=n_prevention+1, figsize=(13,8.27), gridspec_kw={'width_ratios': [1, 1, 1, 3]})
prevention_labels = ['$\Omega_{home}$ (-)', '$\Omega_{work}$ (-)', '$\Omega_{rest}$ (-)']
prevention_names = ['prev_home', 'prev_work', 'prev_rest']
row_labels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
pad = 5 # in points
for i in range(n_calibrations):
print('Simulation no. {} out of {}'.format(i+1,n_calibrations))
out = model.sim(end_sim,start_date=start_sim,warmup=warmup,N=args.n_samples,draw_fcn=draw_fcn,samples=samples_dicts[i])
vector_mean, vector_median, vector_LL, vector_UL = add_poisson('H_in', out, args.n_samples, args.n_draws_per_sample)
for j in range(n_prevention+1):
if j != n_prevention:
n, bins, patches = axes[i,j].hist(samples_dicts[i][prevention_names[j]], color='blue', bins=15, density=True, alpha=0.6)
axes[i,j].axvline(np.mean(samples_dicts[i][prevention_names[j]]), ymin=0, ymax=1, linestyle='--', color='black')
max_n = 1.05*max(n)
axes[i,j].annotate('$\hat{\mu} = $'+"{:.2f}".format(np.mean(samples_dicts[i][prevention_names[j]])), xy=(np.mean(samples_dicts[i][prevention_names[j]]),max_n),
rotation=0,va='bottom', ha='center',annotation_clip=False,fontsize=10)
if j == 0:
axes[i,j].annotate(row_labels[i], xy=(0, 0.5), xytext=(-axes[i,j].yaxis.labelpad - pad, 0),
xycoords=axes[i,j].yaxis.label, textcoords='offset points',
ha='right', va='center')
axes[i,j].set_xlim([0,1])
axes[i,j].set_xticks([0.0, 0.5, 1.0])
axes[i,j].set_yticks([])
axes[i,j].grid(False)
if i == n_calibrations-1:
axes[i,j].set_xlabel(prevention_labels[j])
axes[i,j].spines['left'].set_visible(False)
else:
axes[i,j] = plot_fit(axes[i,j], 'H_in','$H_{in}$ (-)', df_sciensano, out['time'].values, vector_median, vector_LL, vector_UL, end_calibration=end_calibrations[i], end_sim=end_sim)
axes[i,j].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[i,j].set_yticks([0,300, 600])
axes[i,j].set_ylim([0,700])
plt.tight_layout()
plt.show()
model_results_WAVE1 = {'time': out['time'].values, 'vector_mean': vector_mean, 'vector_median': vector_median, 'vector_LL': vector_LL, 'vector_UL': vector_UL}
#####################################
## PART 2: Hospitals vs. R0 figure ##
#####################################
def compute_R0(initN, Nc, samples_dict, model_parameters):
N = initN.size
sample_size = len(samples_dict['beta'])
R0 = np.zeros([N,sample_size])
R0_norm = np.zeros([N,sample_size])
for i in range(N):
for j in range(sample_size):
R0[i,j] = (model_parameters['a'][i] * samples_dict['da'][j] + samples_dict['omega'][j]) * samples_dict['beta'][j] * np.sum(Nc, axis=1)[i]
R0_norm[i,:] = R0[i,:]*(initN[i]/sum(initN))
R0_age = np.mean(R0,axis=1)
R0_overall = np.mean(np.sum(R0_norm,axis=0))
return R0, R0_overall
R0, R0_overall = compute_R0(initN, Nc_all['total'], samples_dicts[-1], params)
cumsum = out['H_in'].cumsum(dim='time').values
cumsum_mean = np.mean(cumsum[:,:,-1], axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0))
cumsum_LL = cumsum_mean - np.quantile(cumsum[:,:,-1], q = 0.05/2, axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0))
cumsum_UL = np.quantile(cumsum[:,:,-1], q = 1-0.05/2, axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0)) - cumsum_mean
cumsum = (out['H_in'].mean(dim="draws")).cumsum(dim='time').values
fraction = cumsum[:,-1]/sum(cumsum[:,-1])
fig,ax = plt.subplots(figsize=(12,4))
bars = ('$[0, 10[$', '$[10, 20[$', '$[20, 30[$', '$[30, 40[$', '$[40, 50[$', '$[50, 60[$', '$[60, 70[$', '$[70, 80[$', '$[80, \infty[$')
x_pos = np.arange(len(bars))
#ax.bar(x_pos, np.mean(R0,axis=1), yerr = [np.mean(R0,axis=1) - np.quantile(R0,q=0.05/2,axis=1), np.quantile(R0,q=1-0.05/2,axis=1) - np.mean(R0,axis=1)], width=1, color='b', alpha=0.5, capsize=10)
ax.bar(x_pos, np.mean(R0,axis=1), width=1, color='b', alpha=0.8)
ax.set_ylabel('$R_0$ (-)')
ax.grid(False)
ax2 = ax.twinx()
#ax2.bar(x_pos, cumsum_mean, yerr = [cumsum_LL, cumsum_UL], width=1,color='orange',alpha=0.9,hatch="/", capsize=10)
ax2.bar(x_pos, cumsum_mean, width=1,color='orange',alpha=0.6,hatch="/")
ax2.set_ylabel('Fraction of hospitalizations (-)')
ax2.grid(False)
plt.xticks(x_pos, bars)
plt.tight_layout()
plt.show()
#########################################
## Part 3: Robustness figure of WAVE 2 ##
#########################################
n_prevention = 4
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-06.json')), # 2020-11-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-05.json')), # 2020-11-16
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-04.json')), # 2020-12-24
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-02.json')), # 2021-02-01
]
n_calibrations = len(samples_dicts)
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-09-01'
# Last datapoint used in inference
end_calibrations = ['2020-11-06','2020-11-16','2020-12-24','2021-02-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2021-02-14'
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Model initial condition on September 1st
warmup = 0
with open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/initial_states_2020-09-01.json', 'r') as fp:
initial_states = json.load(fp)
initial_states.update({
'VE': np.zeros(9),
'V': np.zeros(9),
'V_new': np.zeros(9),
'alpha': np.zeros(9)
})
#initial_states['ICU_tot'] = initial_states.pop('ICU')
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_schools'] = samples_dict['prev_schools'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------
# Visualize prevention parameters
# -------------------------------
# Method 1: all in on page
fig,axes= plt.subplots(nrows=n_calibrations,ncols=n_prevention+1, figsize=(13,8.27), gridspec_kw={'width_ratios': [1, 1, 1, 1, 6]})
prevention_labels = ['$\Omega_{home}$ (-)', '$\Omega_{schools}$ (-)', '$\Omega_{work}$ (-)', '$\Omega_{rest}$ (-)']
prevention_names = ['prev_home', 'prev_schools', 'prev_work', 'prev_rest']
row_labels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
pad = 5 # in points
for i in range(n_calibrations):
print('Simulation no. {} out of {}'.format(i+1,n_calibrations))
out = model.sim(end_sim,start_date=start_sim,warmup=warmup,N=args.n_samples,draw_fcn=draw_fcn,samples=samples_dicts[i])
vector_mean, vector_median, vector_LL, vector_UL = add_poisson('H_in', out, args.n_samples, args.n_draws_per_sample)
for j in range(n_prevention+1):
if j != n_prevention:
n, bins, patches = axes[i,j].hist(samples_dicts[i][prevention_names[j]], color='blue', bins=15, density=True, alpha=0.6)
axes[i,j].axvline(np.mean(samples_dicts[i][prevention_names[j]]), ymin=0, ymax=1, linestyle='--', color='black')
max_n = 1.05*max(n)
axes[i,j].annotate('$\hat{\mu} = $'+"{:.2f}".format(np.mean(samples_dicts[i][prevention_names[j]])), xy=(np.mean(samples_dicts[i][prevention_names[j]]),max_n),
rotation=0,va='bottom', ha='center',annotation_clip=False,fontsize=10)
if j == 0:
axes[i,j].annotate(row_labels[i], xy=(0, 0.5), xytext=(-axes[i,j].yaxis.labelpad - pad, 0),
xycoords=axes[i,j].yaxis.label, textcoords='offset points',
ha='right', va='center')
axes[i,j].set_xlim([0,1])
axes[i,j].set_xticks([0.0, 1.0])
axes[i,j].set_yticks([])
axes[i,j].grid(False)
if i == n_calibrations-1:
axes[i,j].set_xlabel(prevention_labels[j])
axes[i,j].spines['left'].set_visible(False)
else:
axes[i,j] = plot_fit(axes[i,j], 'H_in','$H_{in}$ (-)', df_sciensano, out['time'].values, vector_median, vector_LL, vector_UL, start_calibration = start_calibration, end_calibration=end_calibrations[i], end_sim=end_sim)
axes[i,j].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[i,j].set_yticks([0,250, 500, 750])
axes[i,j].set_ylim([0,850])
plt.tight_layout()
plt.show()
model_results_WAVE2 = {'time': out['time'].values, 'vector_mean': vector_mean, 'vector_median': vector_median, 'vector_LL': vector_LL, 'vector_UL': vector_UL}
model_results = [model_results_WAVE1, model_results_WAVE2]
#################################################################
## Part 4: Comparing the maximal dataset prevention parameters ##
#################################################################
samples_dict_WAVE1 = json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json'))
samples_dict_WAVE2 = json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-02.json'))
labels = ['$\Omega_{schools}$','$\Omega_{work}$', '$\Omega_{rest}$', '$\Omega_{home}$']
keys = ['prev_schools','prev_work','prev_rest','prev_home']
fig,axes = plt.subplots(1,4,figsize=(12,4))
for idx,ax in enumerate(axes):
if idx != 0:
(n1, bins, patches) = ax.hist(samples_dict_WAVE1[keys[idx]],bins=15,color='blue',alpha=0.4, density=True)
(n2, bins, patches) =ax.hist(samples_dict_WAVE2[keys[idx]],bins=15,color='black',alpha=0.4, density=True)
max_n = max([max(n1),max(n2)])*1.10
ax.axvline(np.mean(samples_dict_WAVE1[keys[idx]]),ls=':',ymin=0,ymax=1,color='blue')
ax.axvline(np.mean(samples_dict_WAVE2[keys[idx]]),ls=':',ymin=0,ymax=1,color='black')
if idx ==1:
ax.annotate('$\mu_1 = \mu_2 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE1[keys[idx]])), xy=(np.mean(samples_dict_WAVE1[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
else:
ax.annotate('$\mu_1 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE1[keys[idx]])), xy=(np.mean(samples_dict_WAVE1[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
ax.annotate('$\mu_2 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE2[keys[idx]])), xy=(np.mean(samples_dict_WAVE2[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
ax.set_xlabel(labels[idx])
ax.set_yticks([])
ax.spines['left'].set_visible(False)
else:
ax.hist(samples_dict_WAVE2['prev_schools'],bins=15,color='black',alpha=0.6, density=True)
ax.set_xlabel('$\Omega_{schools}$')
ax.set_yticks([])
ax.spines['left'].set_visible(False)
ax.set_xlim([0,1])
ax.xaxis.grid(False)
ax.yaxis.grid(False)
plt.tight_layout()
plt.show()
################################################################
## Part 5: Relative contributions of each contact: both waves ##
################################################################
# --------------------------------
# Re-define function to compute R0
# --------------------------------
def compute_R0(initN, Nc, samples_dict, model_parameters):
N = initN.size
sample_size = len(samples_dict['beta'])
R0 = np.zeros([N,sample_size])
R0_norm = np.zeros([N,sample_size])
for i in range(N):
for j in range(sample_size):
R0[i,j] = (model_parameters['a'][i] * samples_dict['da'][j] + samples_dict['omega'][j]) * samples_dict['beta'][j] *Nc[i,j]
R0_norm[i,:] = R0[i,:]*(initN[i]/sum(initN))
R0_age = np.mean(R0,axis=1)
R0_mean = np.sum(R0_norm,axis=0)
return R0, R0_mean
# -----------------------
# Pre-allocate dataframes
# -----------------------
index=df_google.index
columns = [['1','1','1','1','1','1','1','1','1','1','1','1','1','1','1','2','2','2','2','2','2','2','2','2','2','2','2','2','2','2'],['work_mean','work_LL','work_UL','schools_mean','schools_LL','schools_UL','rest_mean','rest_LL','rest_UL',
'home_mean','home_LL','home_UL','total_mean','total_LL','total_UL','work_mean','work_LL','work_UL','schools_mean','schools_LL','schools_UL',
'rest_mean','rest_LL','rest_UL','home_mean','home_LL','home_UL','total_mean','total_LL','total_UL']]
tuples = list(zip(*columns))
columns = pd.MultiIndex.from_tuples(tuples, names=["WAVE", "Type"])
data = np.zeros([len(df_google.index),30])
df_rel = pd.DataFrame(data=data, index=df_google.index, columns=columns)
df_abs = pd.DataFrame(data=data, index=df_google.index, columns=columns)
df_Re = pd.DataFrame(data=data, index=df_google.index, columns=columns)
samples_dicts = [samples_dict_WAVE1, samples_dict_WAVE2]
start_dates =[pd.to_datetime('2020-03-15'), pd.to_datetime('2020-10-19')]
waves=["1", "2"]
for j,samples_dict in enumerate(samples_dicts):
print('\n WAVE: ' + str(j)+'\n')
# ---------------
# Rest prevention
# ---------------
print('Rest\n')
data_rest = np.zeros([len(df_google.index.values), len(samples_dict['prev_rest'])])
Re_rest = np.zeros([len(df_google.index.values), len(samples_dict['prev_rest'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_rest[idx,:] = 0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))*np.ones(len(samples_dict['prev_rest']))
contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_rest'])])
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))*np.ones(len(samples_dict['prev_rest']))
new = (0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))\
)*np.array(samples_dict['prev_rest'])
data_rest[idx,:]= old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_rest'])])
new_contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.array(samples_dict['prev_rest'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_rest[idx,:] = (0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))\
)*np.array(samples_dict['prev_rest'])
contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.array(samples_dict['prev_rest'])
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_rest_mean = np.mean(Re_rest,axis=1)
Re_rest_LL = np.quantile(Re_rest,q=0.05/2,axis=1)
Re_rest_UL = np.quantile(Re_rest,q=1-0.05/2,axis=1)
# ---------------
# Work prevention
# ---------------
print('Work\n')
data_work = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
Re_work = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_work[idx,:] = 0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0)))*np.ones(len(samples_dict['prev_work']))
contacts = np.expand_dims(0.01*(100+df_google['work'][date])* (np.sum(Nc_work,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
R0, Re_work[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0)))*np.ones(len(samples_dict['prev_work']))
new = 0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0)))*np.array(samples_dict['prev_work'])
data_work[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(0.01*(100+df_google['work'][date])*(np.sum(Nc_work,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
new_contacts = np.expand_dims(0.01*(100+df_google['work'][date])* (np.sum(Nc_work,axis=1)),axis=1)*np.array(samples_dict['prev_work'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_work[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_work[idx,:] = (0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0))))*np.array(samples_dict['prev_work'])
contacts = np.expand_dims(0.01*(100+df_google['work'][date])* (np.sum(Nc_work,axis=1)),axis=1)*np.array(samples_dict['prev_work'])
R0, Re_work[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_work_mean = np.mean(Re_work,axis=1)
Re_work_LL = np.quantile(Re_work, q=0.05/2, axis=1)
Re_work_UL = np.quantile(Re_work, q=1-0.05/2, axis=1)
# ----------------
# Home prevention
# ----------------
print('Home\n')
data_home = np.zeros([len(df_google['work'].values),len(samples_dict['prev_home'])])
Re_home = np.zeros([len(df_google['work'].values),len(samples_dict['prev_home'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_home[idx,:] = np.sum(np.mean(Nc_home,axis=0))*np.ones(len(samples_dict['prev_home']))
contacts = np.expand_dims((np.sum(Nc_home,axis=1)),axis=1)*np.ones(len(samples_dict['prev_home']))
R0, Re_home[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = np.sum(np.mean(Nc_home,axis=0))*np.ones(len(samples_dict['prev_home']))
new = np.sum(np.mean(Nc_home,axis=0))*np.array(samples_dict['prev_home'])
data_home[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(np.sum(Nc_home,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
new_contacts = np.expand_dims((np.sum(Nc_home,axis=1)),axis=1)*np.array(samples_dict['prev_home'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_home[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_home[idx,:] = np.sum(np.mean(Nc_home,axis=0))*np.array(samples_dict['prev_home'])
contacts = np.expand_dims((np.sum(Nc_home,axis=1)),axis=1)*np.array(samples_dict['prev_home'])
R0, Re_home[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_home_mean = np.mean(Re_home,axis=1)
Re_home_LL = np.quantile(Re_home, q=0.05/2, axis=1)
Re_home_UL = np.quantile(Re_home, q=1-0.05/2, axis=1)
# ------------------
# School prevention
# ------------------
if j == 0:
print('School\n')
data_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
Re_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_schools[idx,:] = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_work']))
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_work']))
new = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_work'])
data_schools[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
new_contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days + l_days < date <= pd.to_datetime('2020-09-01'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_work'])
contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_schools[idx,:] = 1 * (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_work']) # This is wrong, but is never used
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif j == 1:
print('School\n')
data_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_schools'])])
Re_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_schools[idx,:] = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_schools']))
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_schools']))
new = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
data_schools[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
new_contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days + l_days < date <= pd.to_datetime('2020-11-16'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2020-11-16') < date <= pd.to_datetime('2020-12-18'):
data_schools[idx,:] = 1* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2020-12-18') < date <= pd.to_datetime('2021-01-04'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = tmp = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2021-01-04') < date <= pd.to_datetime('2021-02-15'):
data_schools[idx,:] = 1* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2021-02-15') < date <= pd.to_datetime('2021-02-21'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_schools[idx,:] = 1* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_schools_mean = np.mean(Re_schools,axis=1)
Re_schools_LL = np.quantile(Re_schools, q=0.05/2, axis=1)
Re_schools_UL = np.quantile(Re_schools, q=1-0.05/2, axis=1)
# -----
# Total
# -----
data_total = data_rest + data_work + data_home + data_schools
Re_total = Re_rest + Re_work + Re_home + Re_schools
Re_total_mean = np.mean(Re_total,axis=1)
Re_total_LL = np.quantile(Re_total, q=0.05/2, axis=1)
Re_total_UL = np.quantile(Re_total, q=1-0.05/2, axis=1)
# -----------------------
# Absolute contributions
# -----------------------
abs_rest = np.zeros(data_rest.shape)
abs_work = np.zeros(data_rest.shape)
abs_home = np.zeros(data_rest.shape)
abs_schools = np.zeros(data_schools.shape)
abs_total = data_total
for i in range(data_rest.shape[0]):
abs_rest[i,:] = data_rest[i,:]
abs_work[i,:] = data_work[i,:]
abs_home[i,:] = data_home[i,:]
abs_schools[i,:] = data_schools[i,:]
abs_schools_mean = np.mean(abs_schools,axis=1)
abs_schools_LL = np.quantile(abs_schools,LL,axis=1)
abs_schools_UL = np.quantile(abs_schools,UL,axis=1)
abs_rest_mean = np.mean(abs_rest,axis=1)
abs_rest_LL = np.quantile(abs_rest,LL,axis=1)
abs_rest_UL = np.quantile(abs_rest,UL,axis=1)
abs_work_mean = np.mean(abs_work,axis=1)
abs_work_LL = np.quantile(abs_work,LL,axis=1)
abs_work_UL = np.quantile(abs_work,UL,axis=1)
abs_home_mean = np.mean(abs_home,axis=1)
abs_home_LL = np.quantile(abs_home,LL,axis=1)
abs_home_UL = np.quantile(abs_home,UL,axis=1)
abs_total_mean = np.mean(abs_total,axis=1)
abs_total_LL = np.quantile(abs_total,LL,axis=1)
abs_total_UL = np.quantile(abs_total,UL,axis=1)
# -----------------------
# Relative contributions
# -----------------------
rel_rest = np.zeros(data_rest.shape)
rel_work = np.zeros(data_rest.shape)
rel_home = np.zeros(data_rest.shape)
rel_schools = np.zeros(data_schools.shape)
rel_total = np.zeros(data_schools.shape)
for i in range(data_rest.shape[0]):
total = data_schools[i,:] + data_rest[i,:] + data_work[i,:] + data_home[i,:]
rel_rest[i,:] = data_rest[i,:]/total
rel_work[i,:] = data_work[i,:]/total
rel_home[i,:] = data_home[i,:]/total
rel_schools[i,:] = data_schools[i,:]/total
rel_total[i,:] = total/total
rel_schools_mean = np.mean(rel_schools,axis=1)
rel_schools_LL = np.quantile(rel_schools,LL,axis=1)
rel_schools_UL = np.quantile(rel_schools,UL,axis=1)
rel_rest_mean = np.mean(rel_rest,axis=1)
rel_rest_LL = np.quantile(rel_rest,LL,axis=1)
rel_rest_UL = np.quantile(rel_rest,UL,axis=1)
rel_work_mean = np.mean(rel_work,axis=1)
rel_work_LL = np.quantile(rel_work,LL,axis=1)
rel_work_UL = np.quantile(rel_work,UL,axis=1)
rel_home_mean = np.mean(rel_home,axis=1)
rel_home_LL = np.quantile(rel_home,LL,axis=1)
rel_home_UL = np.quantile(rel_home,UL,axis=1)
rel_total_mean = np.mean(rel_total,axis=1)
rel_total_LL = np.quantile(rel_total,LL,axis=1)
rel_total_UL = np.quantile(rel_total,UL,axis=1)
# ---------------------
# Append to dataframe
# ---------------------
df_rel[waves[j],"work_mean"] = rel_work_mean
df_rel[waves[j],"work_LL"] = rel_work_LL
df_rel[waves[j],"work_UL"] = rel_work_UL
df_rel[waves[j], "rest_mean"] = rel_rest_mean
df_rel[waves[j], "rest_LL"] = rel_rest_LL
df_rel[waves[j], "rest_UL"] = rel_rest_UL
df_rel[waves[j], "home_mean"] = rel_home_mean
df_rel[waves[j], "home_LL"] = rel_home_LL
df_rel[waves[j], "home_UL"] = rel_home_UL
df_rel[waves[j],"schools_mean"] = rel_schools_mean
df_rel[waves[j],"schools_LL"] = rel_schools_LL
df_rel[waves[j],"schools_UL"] = rel_schools_UL
df_rel[waves[j],"total_mean"] = rel_total_mean
df_rel[waves[j],"total_LL"] = rel_total_LL
df_rel[waves[j],"total_UL"] = rel_total_UL
copy1 = df_rel.copy(deep=True)
df_Re[waves[j],"work_mean"] = Re_work_mean
df_Re[waves[j],"work_LL"] = Re_work_LL
df_Re[waves[j],"work_UL"] = Re_work_UL
df_Re[waves[j], "rest_mean"] = Re_rest_mean
df_Re[waves[j],"rest_LL"] = Re_rest_LL
df_Re[waves[j],"rest_UL"] = Re_rest_UL
df_Re[waves[j], "home_mean"] = Re_home_mean
df_Re[waves[j], "home_LL"] = Re_home_LL
df_Re[waves[j], "home_UL"] = Re_home_UL
df_Re[waves[j],"schools_mean"] = Re_schools_mean
df_Re[waves[j],"schools_LL"] = Re_schools_LL
df_Re[waves[j],"schools_UL"] = Re_schools_UL
df_Re[waves[j],"total_mean"] = Re_total_mean
df_Re[waves[j],"total_LL"] = Re_total_LL
df_Re[waves[j],"total_UL"] = Re_total_UL
copy2 = df_Re.copy(deep=True)
df_abs[waves[j],"work_mean"] = abs_work_mean
df_abs[waves[j],"work_LL"] = abs_work_LL
df_abs[waves[j],"work_UL"] = abs_work_UL
df_abs[waves[j], "rest_mean"] = abs_rest_mean
df_abs[waves[j], "rest_LL"] = abs_rest_LL
df_abs[waves[j], "rest_UL"] = abs_rest_UL
df_abs[waves[j], "home_mean"] = abs_home_mean
df_abs[waves[j], "home_LL"] = abs_home_LL
df_abs[waves[j], "home_UL"] = abs_home_UL
df_abs[waves[j],"schools_mean"] = abs_schools_mean
df_abs[waves[j],"schools_LL"] = abs_schools_LL
df_abs[waves[j],"schools_UL"] = abs_schools_UL
df_abs[waves[j],"total_mean"] = abs_total_mean
df_abs[waves[j],"total_LL"] = abs_total_LL
df_abs[waves[j],"total_UL"] = abs_total_UL
df_rel = copy1
df_Re = copy2
#df_abs.to_excel('test.xlsx', sheet_name='Absolute contacts')
#df_rel.to_excel('test.xlsx', sheet_name='Relative contacts')
#df_Re.to_excel('test.xlsx', sheet_name='Effective reproduction number')
print(np.mean(df_abs["1","total_mean"][pd.to_datetime('2020-03-22'):pd.to_datetime('2020-05-04')]))
print(np.mean(df_Re["1","total_LL"][pd.to_datetime('2020-03-22'):pd.to_datetime('2020-05-04')]),
np.mean(df_Re["1","total_mean"][pd.to_datetime('2020-03-22'):pd.to_datetime('2020-05-04')]),
np.mean(df_Re["1","total_UL"][pd.to_datetime('2020-03-22'):pd.to_datetime('2020-05-04')]))
print(np.mean(df_abs["1","total_mean"][pd.to_datetime('2020-06-01'):pd.to_datetime('2020-07-01')]))
print(np.mean(df_Re["1","total_LL"][pd.to_datetime('2020-06-01'):pd.to_datetime('2020-07-01')]),
np.mean(df_Re["1","total_mean"][pd.to_datetime('2020-06-01'):pd.to_datetime('2020-07-01')]),
np.mean(df_Re["1","total_UL"][pd.to_datetime('2020-06-01'):pd.to_datetime('2020-07-01')]))
print(np.mean(df_abs["2","total_mean"][pd.to_datetime('2020-10-25'):pd.to_datetime('2020-11-16')]))
print(np.mean(df_Re["2","total_LL"][pd.to_datetime('2020-10-25'):pd.to_datetime('2020-11-16')]),
np.mean(df_Re["2","total_mean"][pd.to_datetime('2020-10-25'):pd.to_datetime('2020-11-16')]),
np.mean(df_Re["2","total_UL"][pd.to_datetime('2020-10-25'):pd.to_datetime('2020-11-16')]))
print(np.mean(df_abs["2","total_mean"][pd.to_datetime('2020-11-16'):pd.to_datetime('2020-12-18')]))
print(np.mean(df_Re["2","total_LL"][pd.to_datetime('2020-11-16'):pd.to_datetime('2020-12-18')]),
np.mean(df_Re["2","total_mean"][pd.to_datetime('2020-11-16'):pd.to_datetime('2020-12-18')]),
np.mean(df_Re["2","total_UL"][pd.to_datetime('2020-11-16'): | pd.to_datetime('2020-12-18') | pandas.to_datetime |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import prince
from sklearn.cluster import DBSCAN
import itertools
from cmca import CMCA
from ccmca import CCMCA
plt.style.use('ggplot')
alpha = r'$ \alpha $'
tableau10 = {
'blue': '#507AA6',
'orange': '#F08E39',
'red': '#DF585C',
'teal': '#78B7B2',
'green': '#5BA053',
'yellow': '#ECC854',
'purple': '#AF7BA1',
'pink': '#FD9EA9',
'brown': '#9A7460',
'gray': '#BAB0AC',
1: '#507AA6',
0: '#F08E39',
2: '#DF585C',
3: '#78B7B2',
4: '#5BA053',
5: '#ECC854',
6: '#AF7BA1',
7: '#FD9EA9',
8: '#9A7460',
9: '#BAB0AC',
-1: '#BAB0AC',
'LDP': '#507AA6',
'DPJ': '#F08E39',
"JRP": '#DF585C',
'DEM': '#507AA6',
'REP': '#F08E39',
}
def fillna_based_on_dtype(df):
for key in dict(df.dtypes).keys():
if df.dtypes[key] == np.object:
df[key] = df[key].fillna('na')
else:
df[key] = df[key].fillna(99)
def csv_to_mats_2(csv):
df = | pd.read_csv(csv) | pandas.read_csv |
import pandas as pd
import inspect
import functools
# ============================================ DataFrame ============================================ #
# Decorates a generator function that yields rows (v,...)
def pd_dfrows(columns=None):
def dec(fn):
def wrapper(*args,**kwargs):
return pd.DataFrame([*fn(*args,**kwargs)],columns=columns)
return functools.update_wrapper(wrapper,fn)
return dec
# Decorates a generator function that yields k,(v,...) pairs
def pd_dataframe(index=None,columns=None):
def dec(fn):
def wrapper(*args,**kwargs):
i,d = (list(x) for x in zip(*fn(*args,**kwargs)))
return pd.DataFrame(d,pd.Index(i,name=index),columns=columns)
return functools.update_wrapper(wrapper,fn)
return dec
# Decorates a generator function that yields (k,...),(v,...) pairs
def pd_multiframe(index=None,columns=None):
def dec(fn):
def wrapper(*args,**kwargs):
i,d = (list(x) for x in zip(*fn(*args,**kwargs)))
return pd.DataFrame(d,index=pd.MultiIndex.from_tuples(i,names=index),columns=columns)
return functools.update_wrapper(wrapper,fn)
return dec
# ============================================ Series ============================================ #
# Decorates a generator function that yields k,v pairs
def pd_series(index=None,name=None):
def dec(fn):
def wrapper(*args,**kwargs):
i,d = (list(x) for x in zip(*fn(*args,**kwargs)))
return pd.Series(d,index=pd.Index(i,name=index),name=name)
return functools.update_wrapper(wrapper,fn)
return dec
# Decorates a generator function that yields (k,...),v pairs
def pd_multiseries(index=None,name=None):
def dec(fn):
def wrapper(*args,**kwargs):
i,d = [[*x] for x in zip(*fn(*args,**kwargs))]
return pd.Series(d,index=pd.MultiIndex.from_tuples(i,names=index),name=name)
return functools.update_wrapper(wrapper,fn)
return dec
# ============================================ Index ============================================ #
# Decorates a generator function that yields (k,...)
def pd_multi_index(names=None):
def dec(fn):
def wrapper(*args,**kwargs):
return pd.MultiIndex.from_tuples([*fn(*args,**kwargs)],names=names)
return functools.update_wrapper(wrapper,fn)
return dec
# Decorates a generator function that yields k
def pd_index(name=None):
def dec(fn):
def wrapper(*args,**kwargs):
return pd.Index([*fn(*args,**kwargs)],name=name)
return functools.update_wrapper(wrapper,fn)
return dec
# ============================================ Joins ============================================ #
# decorates either a generator function that yields dataframes, or an iterable containing dataframes.
def pd_concat(axis=0,**catargs):
def dec(fn):
def wrapper(*args,**kwargs):
return pd.concat([*fn(*args,**kwargs)],axis=axis,**catargs)
return functools.update_wrapper(wrapper,fn)
return dec
# ============================================ Transforms ============================================ #
# decorates a function that reindexes dataframes
def pd_reindex(name=None):
def dec(fn):
def wrapper(df):
inx = pd.Index([*map(fn,df.index)],name=(name if name!=None else df.index.names if df.index.ndim>1 else df.index.name))
return pd.DataFrame(df.values,index=inx,columns=df.columns)
return wrapper
return dec
# decorates a function that transforms both the index values and column values of an inputted dataframe
def pd_transform(inx=None,col=None):
def dec(fn):
def wrapper(df,*args,**kwargs):
i,d = [[*x] for x in zip(*fn(df,*args,**kwargs))]
index = pd.Index(i,name=(inx if inx!=None else df.index.names if df.index.ndim>1 else df.index.name))
return pd.DataFrame(d,index,columns=(col if col!=None else df.columns))
return wrapper
return dec
# ============================================ GroupBy ============================================ #
def pd_groupby_agg(by,columns=None):
def dec(fn):
if inspect.isgeneratorfunction(fn):
def wrapper(df,*args,**kwargs):
i,d = [[*x] for x in zip(*(a for b in (((g,r) for r in fn(data,*args,**kwargs)) for g,data in df.groupby(by)) for a in b))]
inx = pd.Index(i,name=by) if type(by) == str else pd.MultiIndex.from_tuples(i,names=by)
return | pd.DataFrame(d,inx,columns=columns) | pandas.DataFrame |
import requests
import os
import time
from datetime import datetime
from calendar import timegm
import pandas
import boto3
import io
from utilities.exceptions import ApiError
from airflow.models import Variable
class TDAClient:
"""
Class for accessing TDA API.
...
Attributes
----------
client_id : str
client ID associated with your TDA developer app
access_token: str
temporary access token that's generated in order to send authenticated requests to TDA
url : str
url prefix for API endpoints and API version
Methods
-------
get_access_token():
Refresh access token. (Best way to use this is to catch an expired access token exception from their API and programatically refresh.)
_request(url, authorized=True, method="GET", **kwargs):
Internal method for making requests to API. Handles creating headers, parsing additional arguments, making request, and handling error codes.
get_quote(symbol):
Fetch current ticker price for a ticker symbol.
get_quote_history(symbol, ...):
Get entire history for a ticker. Can pass in date ranges.
get_option_chain(symbol, ...)
Get current option chain for a ticker. See method documentation for additional parameters.
"""
def __init__(self, client_id):
# need to initialize with client_id found in developer account settings
self.client_id = client_id
self.access_token = None
self.url = "https://api.tdameritrade.com/v1/"
def get_access_token(self):
"""implement refresh token method for getting access token, reliant on an existing access token stored in a /creds/tokeninfo.txt file in the working directory (make sure this gets copied to prod)"""
# will need to implement method for refreshing refresh token (90 day expiration)
aws_access_key = Variable.get("aws_access_key_id")
aws_secret_key = Variable.get("aws_secret_access_key")
s3_client = boto3.client(
's3',
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key
)
bytes_buffer = io.BytesIO()
s3_client.download_fileobj(Bucket="on-da-dip", Key="tokeninfo.txt", Fileobj=bytes_buffer)
byte_value = bytes_buffer.getvalue()
refresh_token = byte_value.decode()
endpoint = self.url + "oauth2/token"
grant_type = "refresh_token"
access_type = "offline"
data = {
"grant_type": grant_type,
"access_type": access_type,
"refresh_token": refresh_token,
"client_id": self.client_id
}
result = requests.post(url=endpoint, data=data)
if result.status_code == 200:
result_body = result.json()
self.access_token = result_body["access_token"]
cwd = os.getcwd()
dir = os.path.dirname(cwd)
refresh_token_file = open(dir + "/creds/tokeninfo.txt", "wt")
# need to update token file with latest refresh token
refresh_token_file.write(result_body["refresh_token"])
refresh_token_file.close()
s3_client.upload_file(Filename=dir + "/creds/tokeninfo.txt", Bucket="on-da-dip", Key="tokeninfo.txt")
elif result.status_code == 401:
print("Invalid credentials.")
elif result.status_code == 403:
print("User doesn't have access to this account and/or permissions.")
elif result.status_code == 400:
print("Validation unsuccessful. Check that client id and refresh tokens are correct.")
elif result.status_code == 500:
print("Server error, try again later.")
else:
print("Unknown error.")
def _request(self, url, authorized=True, method="GET", **kwargs):
"""
Internal generic method for handling requests to TDA endpoints. This should never need to be called directly by an end user.
Helps manage headers for authenticated requests, url construction, parameter construction, and also error handling.
:param url: TDA endpoint to be concatenated with https://api.tdameritrade.com/v1/marketdata/
:param authorized: Make an authenticated vs. unauthenticated request. Setting this to False allows you to bypass access token step, but will typically return stale data (1 day old).
:param method: Possible values are GET and POST right now. Default is GET.
:param kwargs: can extend this with additional arguments for the respective endpoints.
:return: Response from TDA.
"""
endpoint = self.url + "marketdata/" + url
params = {
"apikey": self.client_id
}
if kwargs:
for key in kwargs:
params[key] = kwargs[key]
if authorized:
self.get_access_token()
headers = {
"Authorization": "Bearer " + self.access_token
}
else:
headers = None
if method == "GET":
result = requests.get(url=endpoint, headers=headers, params=params)
if result.status_code == 200:
return result
else:
raise ApiError(result.status_code)
else:
raise TypeError("Invalid method. Please pass either GET or POST.")
def get_quote(self, symbol):
endpoint = symbol + "/quotes"
result = self._request(url=endpoint)
return result
def get_quote_history(self, symbol, startdate=None, enddate=None):
# default is YTD
if startdate is None:
current_year = datetime.today().year
startdate = str(current_year) + "-01-01"
if enddate is None:
enddate = str(datetime.today().strftime("%Y-%m-%d"))
url = symbol + "/pricehistory"
start_converted = timegm(time.strptime(startdate + "T00:00:00Z", "%Y-%m-%dT%H:%M:%SZ"))*1000
end_converted = timegm(time.strptime(enddate + "T00:00:00Z", "%Y-%m-%dT%H:%M:%SZ"))*1000
period_type = "year"
frequency_type = "daily"
frequency = 1
result = self._request(
url=url,
method="GET",
start_date=start_converted,
end_date=end_converted,
period_type=period_type, frequency_type=frequency_type, frequency=frequency
)
return result
def get_option_chain(self, symbol, contract_type="ALL", include_quotes=False, strike_range="ALL", strategy="SINGLE", option_type="ALL", exp_month="ALL", strike_count=-1):
"""
Method for fetching option chain from TDA. Returns their option chain object, which the end user will have to traverse to extract key datapoints.
:param symbol: Ticke symbol.
:param contract_type: Supports ALL, PUT, or CALL. Default is ALL. This param is case sensitive.
:param include_quotes: Whether or not to include underlying quote data. Default is false.
:param strike_range: Which strike prices to return. Supports ITM (In-the-money), NTM (Near-the-money), OTM (Out-of-the-money), SAK (Strikes Above Market), SBK (Strikes Below Market), SNK (Strikes Near Market), ALL (All Strikes). Default is ALL.
:param strategy: Options strategy (i.e. SINGLE, ANALYTICAL, SPREAD, etc.). Right now only supports SINGLE.
:param option_type: Option type i.e. ALL, S (standard), NS (nonstandard). Right now only supports ALL.
:param exp_month: Expiration month to return, given in all caps and first three letters, i.e. JUN. Also supports ALL. Default is ALL.
:param strike_count: Number of strikes to return above and below at-the-money price. Default is ignore parameter.
:return: JSON blob representing option chain information
"""
url = "chains"
if strike_count == -1:
result = self._request(
url=url,
method="GET",
authorized=True,
symbol=symbol,
contractType=contract_type,
includeQuotes=include_quotes,
range=strike_range,
strategy=strategy,
optionType=option_type,
expMonth=exp_month
)
else:
result = self._request(
url=url,
method="GET",
authorized=True,
symbol=symbol,
contractType=contract_type,
includeQuotes=include_quotes,
range=strike_range,
strategy=strategy,
optionType=option_type,
expMonth=exp_month,
strikeCount=strike_count
)
return result
class OptionChain:
"""
Class for representing an option chain object from TDA. Consists of some high level attributes, in combination with a dictionary where each key is an expiration date representing an OptionSubChain class.
Attributes
----------
symbol: ticker symbol
strategy: only support single
contract_type: either PUT or CALL
chain: an ExpDateMap dictionary from TDA. Every key is an expiration date where the values are another set of dictionaries. The second level dictionary has keys representing strike prices where the values are information about the specific contract.
"""
def __init__(self, symbol, strategy, contract_type):
self.symbol = symbol
self.strategy = strategy
self.contract_type = contract_type
self.chain = None
def unpack(self):
"""
This is the primary function users will interact with. If an ExpDateMap has been attached to the chain attribute, unpack() will traverse the nested dictionaries and convert to a denormalized dataframe.
:return: Dataframe where data has been denormalized to represent all data at every expiration and strike combination.
"""
if self.chain:
chain_data = self.chain
first_value = chain_data[list(chain_data.keys())[0]]
column_names = list(first_value[list(first_value.keys())[0]][0].keys())
column_names.extend(['strike_price', 'expiration_date'])
chain_df = pandas.DataFrame(columns=column_names)
for exp in chain_data:
exp_date = exp.split(":")[0]
subchain = OptionSubChain(exp_date, chain_data[exp])
subchain_df = subchain.unpack()
subchain_df['expiration_date'] = exp_date
chain_df = chain_df.append(subchain_df)
return chain_df
class OptionSubChain:
"""
Internal class representing a series of contracts at multiple strikes prices, for a given expiration date. A contract at a specific strike price can be represented by an OptionContract class.
The primary purpose of this class is to make unpacking the overall option chain object a little easier for the OptionChain class. There's almost no reason to every directly access this class.
"""
def __init__(self, expiration_date, expiration_dict):
self.expiration_dict = expiration_dict
self.expiration_date = expiration_date
def unpack(self):
exp_dict = self.expiration_dict
column_names = list(exp_dict[list(exp_dict.keys())[0]][0].keys())
column_names.append('strike_price')
subchain_df = pandas.DataFrame(columns=column_names)
for strike in exp_dict:
contract = OptionContract(strike, exp_dict[strike])
contract_df = contract.unpack()
contract_df['strike_price'] = float(strike)
subchain_df = subchain_df.append(contract_df)
return subchain_df
class OptionContract:
"""
A set of data representing a contract at a given strike and expiration. Example fields include price, bids, volatility, etc.
The primary purpose of this class is to make unpacking the overall option chain object a little easier for the OptionChain class. There's almost no reason to every directly access this class.
"""
def __init__(self, strike_price, data):
self.strike_price = strike_price
self.data = data
def unpack(self):
column_names = list(self.data[0].keys())
contract_df = | pandas.DataFrame(columns=column_names) | pandas.DataFrame |
"""
Contains functions and classes that are olfactory-specific.
@author: <NAME>
"""
# ################################# IMPORTS ###################################
import copy
import itertools
import sys
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.linalg as LA
import scipy.stats as sps
import pathlib
import functions.nmf as nmf
import functions.general as FG
import functions.plotting as FP
import functions.olf_circ_offline as FOC
import params.act2 as par_act2
import params.act3 as par_act3
import params.con as par_con
import os
from typing import List, Tuple, Any
from functions.plotting import plot_cov # since we are not using any other
# plotting function
PATH = os.path.realpath(f"{os.path.expanduser('~')}/ORN-LN_circuit") + '/'
OLF_PATH = pathlib.Path.home() / 'ORN-LN_circuit'
# ###################### FUNCTIONS CONNECTIVITY DATA ##########################
def get_labels(sheet, n_begin: int, n: int):
all_labels_row = np.array(FG.get_labels_row(sheet, n_begin, n, 1))
all_labels_column = np.array(FG.get_labels_clmn(sheet, n_begin, n, 1))
if np.sum((all_labels_row == all_labels_column)-1) != 0:
print('left headers rows and columns are not the same!')
all_labels = all_labels_row # both labels are the same
# to get names similar to the activity data
all_labels = FG.replace_substr_np(all_labels, par_con.dict_str_replace)
return all_labels
def combine_labels(labels_left, labels_right, lab):
"""
this function combines the left and right cell names.
It first replaces left and L and right by R if it is the same label on
both sides
Then if on the right there is the string right and on the left there is the
string left, it is replaced by the string lab
"""
labels_combined = np.zeros(len(labels_left), dtype='U40')
for i in range(len(labels_combined)):
if labels_left[i] == labels_right[i]:
labels_combined[i] = FG.repl_add(labels_left[i], ' left', ' L')
labels_combined[i] = FG.repl_add(labels_combined[i], ' right', ' R')
labels_combined[i] += lab
else:
if ('left' in labels_left[i]) and ('right' in labels_right[i]):
labels_combined[i] = FG.repl_add(labels_left[i], ' left', lab)
else:
labels_combined[i] = labels_left[i] + labels_right[i]
print('something weird in the function combine_labels')
print('labels are', labels_left[i], labels_right[i])
return labels_combined
# We need to find a new way to combine the cells, being caresulf with the
# broad cells
# that means we cannot anymore just sum cells
def import_con_data(keys=['L', 'R', 'M']):
"""
returns the connectivity data
this function makes several transformations to the initial dataset
which is encoded in an excel sheet. Transformations that are made
are around the names of the variables, and also the creation of
the M dataset, which are the mean connections from L and R
keys should be a list or an array indicating which 'sides' we want to get
options are: 'L', 'R', 'S', 'M'
the option None return them all, no option returns L, R, M
"""
dict_str = {'bilateral': 'bil.',
# 'left': 'L',
# 'right': 'R',
'dendrites': 'dend',
'octopaminergic': 'oct.',
'Olfactory': 'olf.',
'LOWER': 'low.',
'UPPER': 'up.',
'Descending': 'Desc.'}
sheet_con_L = FG.get_sheet(OLF_PATH / par_con.file_L)
sheet_con_R = FG.get_sheet(OLF_PATH / par_con.file_R)
cons = {}
cons['L'] = FG.get_data(sheet_con_L, par_con.all_begin, par_con.all_n,
par_con.all_begin, par_con.all_n)
cons['R'] = FG.get_data(sheet_con_R, par_con.all_begin, par_con.all_n,
par_con.all_begin, par_con.all_n)
cells = {}
cells['L'] = get_labels(sheet_con_L, par_con.all_begin, par_con.all_n)
cells['R'] = get_labels(sheet_con_R, par_con.all_begin, par_con.all_n)
# changing the position of ORN and PN in the cell names
for i, s in itertools.product(range(len(cells['L'])), ['L', 'R']):
cells[s][i] = FG.repl_preadd(cells[s][i], ' ORN', 'ORN ')
cells[s][i] = FG.repl_preadd(cells[s][i], ' PN', 'PN ')
cells['S'] = combine_labels(cells['L'], cells['R'], ' S')
cells['M'] = combine_labels(cells['L'], cells['R'], ' M')
for cells1 in cells.values():
cells1 = FG.replace_substr_np(cells1, dict_str)
for i in range(len(cells['L'])):
cells['L'][i] = FG.repl_add(cells['L'][i], ' left', ' L')
cells['R'][i] = FG.repl_add(cells['R'][i], ' left', ' L')
cells['L'][i] = FG.repl_add(cells['L'][i], ' right', ' R')
cells['R'][i] = FG.repl_add(cells['R'][i], ' right', ' R')
cells_bil = ['Keystone', 'PN 35a bil.'] # bilateral cells
for cl in cells_bil:
if cells['L'][i] == f'{cl} L':
cells['L'][i] = f'{cl} L L'
if cells['L'][i] == f'{cl} R':
cells['L'][i] = f'{cl} R L'
if cells['R'][i] == f'{cl} R':
cells['R'][i] = f'{cl} R R'
if cells['R'][i] == f'{cl} L':
cells['R'][i] = f'{cl} L R'
cons['L'] = pd.DataFrame(cons['L'], index=cells['L'], columns=cells['L'])
cons['R'] = pd.DataFrame(cons['R'], index=cells['R'], columns=cells['R'])
# the sum of the connectivities
cons['S'] = pd.DataFrame(cons['L'].values + cons['R'].values,
index=cells['S'], columns=cells['S'])
# the mean connectivity
cons['M'] = pd.DataFrame(cons['S'].values/2,
index=cells['M'], columns=cells['M'])
for con in cons.values():
con.columns.name = 'Post cells'
con.index.name = 'Pre cells'
if keys is None:
return cons
else:
return {k: cons[k] for k in keys}
def get_con_data(keys=['L', 'R', 'M']):
cons = {}
for k in keys:
cons[k] = pd.read_hdf(f'{PATH}results/cons/cons_full_{k}.hdf')
return cons
def create_summary_LNs(cons1, func='mean', S='M'):
"""
this function adds columns and lines in the connectivity matrix
which correspond to the sum/mean of broad trios, duets and keystones,
picky (averaging the dendrite and axon) and choosy (averaging the dend and
axons).
"""
# without the following line, the function changes the input cons
cons = copy.deepcopy(cons1)
if func == 'mean':
f = pd.DataFrame.mean
elif func == 'sum':
f = pd.DataFrame.sum
else:
raise ValueError(f'func should be \'mean\' or \'sum\', got {func}')
for s in cons.keys():
# for broads
(B_T1, B_T3) = (f'Broad T1 {s}', f'Broad T3 {s}')
(B_D1, B_D2) = (f'Broad D1 {s}', f'Broad D2 {s}')
cons[s].loc[:, f'Broad T {S} {s}'] = f(cons[s].loc[:, B_T1: B_T3],
axis=1)
cons[s].loc[:, f'Broad D {S} {s}'] = f(cons[s].loc[:, B_D1: B_D2],
axis=1)
cons[s].loc[f'Broad T {S} {s}'] = f(cons[s].loc[B_T1: B_T3])
cons[s].loc[f'Broad D {S} {s}'] = f(cons[s].loc[B_D1: B_D2])
for i in range(5):
(P01, P02) = (f'Picky {i} [dend] {s}', f'Picky {i} [axon] {s}')
cons[s].loc[:, f'Picky {i} {S} {s}'] = f(cons[s].loc[:, P01: P02],
axis=1)
cons[s].loc[f'Picky {i} {S} {s}'] = f(cons[s].loc[P01: P02])
for i in range(1, 3):
(P01, P02) = (f'Choosy {i} [dend] {s}', f'Choosy {i} [axon] {s}')
cons[s].loc[:, f'Choosy {i} {S} {s}'] = f(cons[s].loc[:, P01: P02],
axis=1)
cons[s].loc[f'Choosy {i} {S} {s}'] = f(cons[s].loc[P01: P02])
# for keystones
(key1, key2) = (f'Keystone L {s}', f'Keystone R {s}')
cons[s].loc[:, f'Keystone {S} {s}'] = f(cons[s].loc[:, key1: key2],
axis=1)
cons[s].loc[f'Keystone {S} {s}'] = f(cons[s].loc[key1: key2])
# s = 'S'
# key1, key2 = ('Keystone L', 'Keystone R')
# cons[s].loc[:, 'Keystone ' + s] = cons[s].loc[:, key1: key2].sum(axis=1)
# cons[s].loc['Keystone ' + s] = cons[s].loc[key1: key2].sum()
#
return cons
# probably this function and the one above can be easily merged,
# but don't want to go through the trouble now
# =============================================================================
# def create_mean_broads_keystone(cons):
# """
# this function adds columns and rows in the connectivity matrix
# which correspond to the mean of broad trios, duets and keystoes
# """
# for s in cons.keys():
# # for broads
# (BT1, BT3) = ('Broad T1 ' + s, 'Broad T3 ' + s)
# (BD1, BD2) = ('Broad D1 ' + s, 'Broad D2 ' + s)
# cons[s].loc[:, f'Broad T {s}'] = cons[s].loc[:, BT1:BT3].mean(axis=1)
# cons[s].loc[:, f'Broad D {s}] = cons[s].loc[:, BD1:BD2].mean(axis=1)
# cons[s].loc['Broad T ' + s] = cons[s].loc[BT1: BT3].mean()
# cons[s].loc['Broad D ' + s] = cons[s].loc[BD1: BD2].mean()
#
# # for keystones
# (key1, key2) = ('Keystone L ' + s, 'Keystone R ' + s)
# cons[s].loc[:, 'Keystone M ' + s] = (cons[s].loc[:, key1: key2]
# .mean(axis=1))
# cons[s].loc['Keystone M ' + s] = cons[s].loc[key1: key2].mean()
#
# return cons
# =============================================================================
def merge_con_data(con_L, con_R):
"""
this creates one huge matrix from the left and right datasets
"""
func = np.max # some of the data is repeated from cell to cell
shape = con_L.shape
con = np.block([[con_L, np.zeros(shape)], [np.zeros(shape), con_R]])
all_cells = np.concatenate((list(con_L.index), list(con_R.index)))
con = pd.DataFrame(con, index=all_cells, columns=all_cells)
con = con.groupby(by=[con.index], axis=0).transform(func)
con = con.groupby(by=[con.columns], axis=1).transform(func)
con = con.loc[~con.index.duplicated(keep='first')]
con = con.loc[:, ~con.columns.duplicated(keep='first')]
# these 2 lines are just to check that you did the merge correctly
print('errors when merging:',
np.abs(con_R - con.loc[con_R.index, con_R.columns]).max().max(),
np.abs(con_L - con.loc[con_L.index, con_L.columns]).max().max())
# i would still like to control the following: that in duplicates:
# either they are the same, either one is zero
# for that you can choose groups that are larger than 1, and then
# somehow compare
# the following lines work but it messes up the order of
# cells, which is a bit annoying
# con = con.groupby(by=[con.index], axis = 0).first()
# con = con.groupby(by=[con.columns], axis = 1).first()
return con
def get_con_pps(con, labels):
"""
this function creates all the streams of connections
0: feedforward, from labels to all others
1: feedback, from all others to labels
2: feedback, with some normalization, using the total input to that label
3: average of feedforward and feedback, i.e., of 0 and 1
Parameters
----------
con
labels
Returns
-------
"""
X2A = con.loc[labels] # selection to all
A2X = con.loc[:, labels] # all to selection
X_A = X2A + A2X.T # mean connection (maybe you would even need to do
# an other type of normalization...)
(X2A_c, X2A_n, X2A_cn) = FG.get_ctr_norm(X2A, opt=2)
(A2X_c, A2X_n, A2X_cn) = FG.get_ctr_norm(A2X.T, opt=2)
(X_A_c, X_A_n, X_A_cn) = FG.get_ctr_norm(X_A, opt=2)
# divide by the indegree
A2X2 = A2X/A2X.sum()
(A2X2_c, A2X2_n, A2X2_cn) = FG.get_ctr_norm(A2X2.T, opt=2)
return {0: {'o': X2A, 'c': X2A_c, 'n': X2A_n, 'cn': X2A_cn},
1: {'o': A2X.T, 'c': A2X_c, 'n': A2X_n, 'cn': A2X_cn},
2: {'o': A2X2.T, 'c': A2X2_c, 'n': A2X2_n, 'cn': A2X2_cn},
3: {'o': X_A, 'c': X_A_c, 'n': X_A_n, 'cn': X_A_cn}}
# =============================================================================
# def get_con_pps_x(con, labels):
# X2A = con.loc[labels] # selection to all
# A2X = con[labels] # all to selection
#
# (X2A_c, X2A_n, X2A_cn) = FG.get_ctr_norm(X2A, opt=2)
# (A2X_c, A2X_n, A2X_cn) = FG.get_ctr_norm(A2X.T, opt=2)
#
# # divide by the indegree
# A2X2 = A2X/A2X.sum()
# (A2X2_c, A2X2_n, A2X2_cn) = FG.get_ctr_norm(A2X2.T, opt=2)
#
# return {0: {'o': X2A, 'c': X2A_c, 'n': X2A_n, 'cn': X2A_cn},
# 1: {'o': A2X, 'c': A2X_c.T, 'n': A2X_n.T, 'cn': A2X_cn.T},
# 2: {'o': A2X2, 'c': A2X2_c.T, 'n': A2X2_n.T, 'cn': A2X2_cn.T}}
# =============================================================================
def get_con_norm_by_in_degree(A2X):
A2X2 = A2X/A2X.sum()
A2X2_c = A2X2.subtract(A2X2.mean(axis=1), axis=0)
A2X2_cn = A2X2_c.divide(LA.norm(A2X2_c.values, axis=1), axis=0)
return A2X2, A2X2_c, A2X2_cn
# #############################################################################
# ########################### PLOTTING CONNECTIVITY ###########################
# #############################################################################
def plot_gram(data, ax=None, name='', splits=[], fs=(13, 10), adj=0.2,
ctr=True):
"""
this function does not use the latest technologies of plotting,
i.e., not using the function imshow_df which already has all the needed
functionality
"""
# data_c = data # if you don't want normalization
cmap = plt.bwr
if ctr:
data = FG.get_ctr_norm(data)
vmin, vmax = (-1, 1)
gram = FG.get_corr(data, data)
else:
data = FG.get_norm(data)
gram = FG.get_cos_sim(data, data)
vmin, vmax = (np.nanmin(gram.values), np.nanmax(gram.values))
print(vmin, vmax)
if ax is None:
f, ax = plt.subplots(1, 1, figsize=fs)
# print('creating new figure in plot_gram')
plt.sca(ax)
plt.subplots_adjust(bottom=adj)
masked_array = np.ma.array(gram, mask=np.isnan(gram))
cmap.set_bad('black', 1.)
cp = ax.imshow(masked_array, cmap=cmap, vmin=vmin, vmax=vmax)
for s in splits:
ax.plot([-0.5, len(data.columns)-0.5], [s-0.5, s-0.5], c='k')
ax.plot([s-0.5, s-0.5], [-0.5, len(data.columns)-0.5], c='k')
plt.xticks(np.arange(len(gram)), list(gram.index), rotation='vertical')
plt.yticks(np.arange(len(gram)), list(gram.index))
ax.set_title('grammian' + name)
plt.colorbar(cp, ax=ax)
return plt.gcf(), ax
def plot_con_full(save_plots, plot_plots, path_plots,
cell_sort=par_con.ORN, cell_name='ORN',
sides=['L', 'R', 'M'], ppss=['o', 'c', 'n', 'cn'],
strms=[0, 1, 2, 3], show_values=True, show_n_syn=True):
"""
Plots all the connection directions and all the connection pps,
e.g., o, c, n, cn (original, centered, normalized, cent-normalized)
for the cells cell_sort
"""
print(f"""sides to plot {sides}\npps to plot {ppss}\n
streams to plot {strms}""")
FP.set_plotting(plot_plots)
# ################ ANALYSIS #########
cons = get_con_data()
# calcualting the number of connections and synapses.
# It is something that is relevant only for streams 0, 1, 3
# and for L, R, M, and for pps 'o'
# n_con containts the number of connections from or towards
# any cell with the ORN/uPN set
# n_syn containts the number of connections from or towards
# any cell with the ORN/uPN set
mi = pd.MultiIndex(levels=[[], []], codes=[[], []],
names=['side', 'stream'])
n_con = pd.DataFrame(columns=mi)
n_syn = pd.DataFrame(columns=mi) # this is the number of synapses
for side in sides:
chosen_cells = FG.get_match_names(cell_sort, list(cons[side].index))
con_sel = get_con_pps(cons[side], chosen_cells)
for strm in strms:
con_o = con_sel[strm]['o']
n_con[side, strm] = np.sign(con_o).sum().values.astype(int)
n_syn[side, strm] = con_o.sum().values.astype(int)
# con_m = of.merge_con_data(con_L, con_R)
titles = [f'connections from {cell_name} to all',
f'connections from all to {cell_name}',
f'connections from all to {cell_name}, scaled by in-degree',
f'connections of {cell_name} with all',
]
# iterating over the different "sides" (L, R, M)
for side in sides:
chosen_cells = FG.get_match_names(cell_sort, list(cons[side].index))
print(chosen_cells)
con_sel = get_con_pps(cons[side], chosen_cells)
# 0: sel2A, 1: A2sel, 2:A2sel: normalized by the input of each neuron
# plots of the connectivity, using lines
# =============================================================================
# of.plot_con_data(ORN2A, A2ORN)
# of.plot_con_data(ORN2A_cn, A2ORN_cn)
# =============================================================================
# iterating over the different pps of the connections
# a different figure/file for each
for pps in ppss:
# if pps_key != 'o':
# continue
# plots of the cells connectivity, using imshow
f, axx = plt.subplots(len(strms), 1, figsize=(25, 22))
# iterating over the 4 streams (ff, fb, fb-indegr, (ff+fb))
# which are all in the same figure
for i, strm in enumerate(strms):
FP.imshow_df(con_sel[strm][pps], ax=axx[i],
title=f'{titles[strm]}, {pps}',
show_values=show_values)
if show_n_syn:
# adding the information about the number of connections
# and the number of synapses above the heatmap
x_n = len(n_con.index)
for j in range(x_n):
axx[i].text((j + 0.5)/x_n, 1.05,
(f'{n_con[side, strm].loc[j]},'
f' {n_syn[side, strm].loc[j]}'),
ha='center', va='bottom',
transform=axx[i].transAxes, rotation=90)
plt.tight_layout()
FP.save_plot(f, f'{path_plots}{cell_name}_con_{side}_{pps}.png',
save_plots)
# #############################################################################
# ################ PLOTTING SVD ANALYSIS OF CONNECTIONS #####################
# #############################################################################
# need to check the resemblance with the SVD/PCA of the activity function
def plot_SVD_con(data, sets=[], vlim=None, title=''):
SVD1 = FG.get_svd_df(data)
SVD1_s = np.diag(SVD1['s'])
x = SVD1_s[0] * SVD1['Vh'].iloc[0]
y = SVD1_s[1] * SVD1['Vh'].iloc[1]
z = SVD1_s[2] * SVD1['Vh'].iloc[2]
print(SVD1_s)
if vlim is None:
vlim = max([x.abs().max(), y.abs().max(), z.abs().max()])*1.1
if len(sets) == 0:
sets = [np.arange(len(data.T))]
f, axx = plt.subplots(2, 3, figsize=(15, 10))
plt.suptitle(title)
FP.imshow_df(data, ax=axx[0, 0])
plot_gram(data, ax=axx[0, 1])
ax = axx[1, 0]
ax.plot(np.arange(1, 1+len(SVD1_s)), SVD1_s)
ax.set_xlabel('principal component number')
plt.sca(ax)
plt.xticks(np.arange(1, 1+len(SVD1_s)))
ax.set_ylim(0, None)
ax = axx[1, 1]
for s in sets:
ax.scatter(x.iloc[s], y.iloc[s])
ax.set_xlim(-vlim, vlim)
ax.set_ylim(-vlim, vlim)
ax.set_xlabel('PC1')
ax.set_ylabel('PC2')
for cell_label in list(SVD1['Vh'].columns):
ax.annotate(cell_label, (x.loc[cell_label], y.loc[cell_label]))
ax = axx[1, 2]
for s in sets:
ax.scatter(y.iloc[s], z.iloc[s])
ax.set_xlim(-vlim, vlim)
ax.set_ylim(-vlim, vlim)
ax.set_xlabel('PC2')
ax.set_ylabel('PC3')
for cell_label in list(SVD1['Vh'].columns):
ax.annotate(cell_label, (y.loc[cell_label], z.loc[cell_label]))
axx[0, 2].axis('off')
plt.tight_layout()
return f, axx
def get_ORN_act_data_2(dropna: bool = True, fast: bool = True) -> pd.DataFrame:
"""
this function reads out the data from the second provided dataset, which
has more odors, and for which all the ORNs have been activated
The initial data was provided in a mat file, but it was exported to a
csv file so that python could read it, with separators as ';', because
other separators like spaces and commas are present in the odor names
The dropna option is weather or not to drop the rows that have NA
The lines that have NA means that actually all the ORNs were not
recorded simultaneously.
the option fast reads the stored copy of the activity, it is quicker
than reading it from scratch from the csv file
"""
if fast:
act_ORN_df = pd.read_hdf(par_act2.file_hdf)
else:
# reading the raw table
# would be good to compare with the calculation they did...
# needed to use the ';' separator because there are commas in the odors
act_ORN_df = pd.read_csv(OLF_PATH / par_act2.file_tbl, sep=';',
index_col=[0, 1, 2])
# Changing the names form ['Odor', 'Exp_ID', 'Concentration']
act_ORN_df.index.names = ['odor', 'exp_id', 'conc']
concs_new = -np.log10(act_ORN_df.index.levels[2]).astype(int)
act_ORN_df.index = act_ORN_df.index.set_levels(concs_new, 'conc')
ORNs = np.array(act_ORN_df.columns)
# the next line removes the columns.name, so it needs to come after
act_ORN_df.columns = FG.replace_substr_np(ORNs,
{'_': '/', 'Or': 'ORN '})
# correcing small typos in the original dataset
act_ORN_df.rename(inplace=True,
index={'4-pheny-2-butanol': '4-phenyl-2-butanol',
'4-methylcyclohexane': '4-methylcyclohexanol'})
# whether or not to drop the rows that have NA
if dropna:
act_ORN_df = act_ORN_df.dropna()
act_ORN_df.columns.name = 'cell'
# instead of just dropping the values that are not available, we could also
# combine them from different trials... however i am not sure that it is
# what you want. We can just leave it as it is right now.
print(act_ORN_df.columns.name)
print(act_ORN_df.index.names)
return act_ORN_df
def get_ORN_act_data_3(dropna: bool = False, fast: bool = True)\
-> pd.DataFrame:
"""
We start with dataset 2, with nan values that were kept
then we calculate the mean of the dataset, in order to use the mean
values to populate the nan values of the available initial dataset
This dataset is quite redundant, because the mean values are
repeated several times.
Parameters
----------
dropna:
not used here
fast:
if True: reads the stored copy of the activity, which is quick
if False: reads the data from scratch from the csv file and then
calculates the mean, and the filling all the missing values
Return
------
act: pd.DataFrame
activity of dataset 3
"""
if fast:
act = pd.read_hdf(OLF_PATH / par_act3.file_hdf)
# the object stored is a DataFrame. Must be a clean way to check
# that it is indeed the case i imagine
return act
# this code is for fast==False
act = get_ORN_act_data_2(dropna=False, fast=False)
# raw dataset, nothing averaged
# removing the 9, 10, 11 concentrations, as they are not used later anyway
act = act.loc[(slice(None), slice(None), [4, 5, 6, 7, 8]), :]
# act = act.dropna(thresh=10) # if there are more than 5 nan, the row is
# dropped
# rows_nans = act.isna().sum(axis=1)>5
# using the mean dataset based on the dataset 2
act_mean = act.mean(level=('odor', 'conc'))
act_mean = ORN_act_populate_missing_values(act_mean)
act_mean = act_mean.dropna(axis=0) # removes any row that contains nan
# now we have the nice averages. We want to now populate all the missing
# values in the raw dataset act
# now i would like to itereate over all the values of act, and if there
# is a nan, replace it by the value given in act_mean
cells = list(act.columns)
# iterating through all the cells and odors and concentrations and trials
# maybe there is a faster way, but this is the most straight forward
for cel in cells:
for i in range(len(act.index)):
if pd.isnull(act.loc[:, cel].iloc[i]):
idx = act.iloc[i].name[0: 3: 2]
act.loc[:, cel].iloc[i] = act_mean.loc[idx, cel]
return act
ORN_data_func_dict = {#1: get_ORN_act_data_1,
2: get_ORN_act_data_2,
3: get_ORN_act_data_3}
def get_ORN_act_data(dataset: int, dropna: bool = True, fast: bool = True) \
-> pd.DataFrame:
"""
i don't sure we really need to maintain the first dataset
because the first dataset is just anyway a subset of the first subset
maybe at some moment we can stop maintaining it
Parameters
----------
dataset:
which dataset, can be 1, 2 or 3
dropna:
removed the stimuli with unavailable data
fast:
if True: reads the stored copy of the activity, which is quick
if False: reads the data from scratch from the csv file and then
calculates the mean, and the filling all the missing values
Return
------
act: pd.DataFrame
activity of dataset
"""
return ORN_data_func_dict[dataset](dropna=dropna, fast=fast)
def get_cell_names_act(sheet, n_begin, n, n_row):
cells_act = np.array(FG.get_labels_row(sheet, n_begin, n, n_row),
dtype='U30')
cells_act = FG.replace_substr_np(cells_act, {'_': '/', 'Or': 'ORN '})
return cells_act
def remove_inactive_neurons(act_df: pd.DataFrame) -> pd.DataFrame:
# this is faster than using the pandas sum
active_neurons = act_df.values.sum(axis=0) != 0
# print('number of active neurons: ', sum(active_neurons))
act_df = act_df.loc[:, active_neurons]
# print(data.shape)
return act_df
def get_normalized_act_per_trial(act_df: pd.DataFrame) -> pd.DataFrame:
"""
we use dataframees to do the normalization
the first step is to find the maximum in each trial
a trial is a given odor and a given experiment id
Parameters
----------
act_df
Returns
-------
"""
max_per_exp = act_df.max(axis=1).max(level=('odor', 'exp_id'))
# finding the maximum among all the neurons and
# all the concenctration for a given exp_id and odor
# maybe it is possible to use groupby here?
data_normalized = act_df.unstack(level='conc')
data_normalized = data_normalized.divide(max_per_exp, axis=0).stack()
# moving the level of experiemets fo the columns to make the division
return data_normalized
def get_ORN_act_pps(dataset: int, pps1: str, pps2: str, dropna: bool = True,
fast: bool = True) -> pd.DataFrame:
"""
the first paramater can be tn, raw or model
tn is trial normalized, what is done in the paper
raw means that we don't change the data, just inverting 2 levels
model, means that we produce the data from the hill equation and ec50 data
the second parameter can be mean, raw, l2
mean: we average among trials
raw, we don't change anything
l2 we normalize each response
the usual settings are:
tn, mean - as in the paper
raw, raw, taking all the raw data
raw, mean, taking the raw data and averaging among trials
model, raw: model data produced from ec50
"""
act0 = get_ORN_act_data(dataset, dropna=dropna, fast=fast)
# print(act0.columns.name)
# print(act0.index.names)
# act1 is the raw data, there are many ways we can normalize the data
# before analysing it
if pps1 == 'tn':
# normalizing each trial by the maximum response
# this is not used further on
act1 = get_normalized_act_per_trial(act0)
elif pps1 == 'raw':
act1 = act0.copy()
act1 = act1.swaplevel('conc', 'exp_id')
# elif pps1 == 'model':
# # generating data using the hill function and EC50 data
# # in this cases there are no tirals, only conc and odors
# act1 = get_ORN_act_from_EC50(dataset)
else:
print('act_pps1 is not well defined')
sys.exit(0)
if pps2 == 'mean':
# averaging data between the repetitions
act2 = act1.mean(level=('odor', 'conc'))
elif pps2 == 'raw':
act2 = act1.copy()
elif pps2 == 'l2':
act2 = act1.copy()
act2 = (act2.T / LA.norm(act2, axis=1)).T
# removing all the places where the activity was 0
# as it causes NA because of the division
act2 = act2.dropna(axis=0, how='any')
else:
print('act_pps2 is not well defined')
sys.exit(0)
# print(act2.columns.name)
# print(act2.index.names)
return act2
def ORN_act_populate_missing_values(act: pd.DataFrame) -> pd.DataFrame:
"""
this function fills the missing values at concentration 6, 5, 4
from the activity dataset 2, after averaging.
We put the saturated value for odors:
- 2-heptanone
- methyl salicytate
and cells 85c and 22c, respectively
We ignore the low concentrations (which go down to 10^-11)
We use a function that fills values for a given concentration
by a given value. This function first checks that the previous values
were nan and that it is not overwriting anything.
"""
act_filled = act.copy()
odors = ['2-heptanone', 'methyl salicylate']
cells = ['ORN 85c', 'ORN 22c']
# would be good that the function finds itself the cell that
# needs to be filled
concs = [4, 5, 6] # concentrations that need to be filled
for od, cell in zip(odors, cells):
val = np.nanmax(act_filled.loc[od, cell].values)
# print(val)
act_filled = fill_values_act(act_filled, od, cell, concs, val)
# when all odors are present, we might need to create a new
# parameter file which contains the updated list of odors
# and maybe also a new order of ORNs, if adding those odors has an effect
return act_filled
def fill_values_act(act, odor, cell, concs, val) -> pd.DataFrame:
"""
This function fills in the activity data, for an odor 'odor', above the
concentration conc, the value that is used is for the concentration
just below
"""
# the first step is to check that indeed the values where we are going
# to put values are nan, so that we don't overwrite anything
if act.loc[(odor, concs), cell].isnull().all():
act.loc[(odor, concs), cell] = val
return act
# maybe would make it even easier to fill automatically by making a recurrent
# function:
# find a nan, in the data. look at the previous concentration. If it exists
# use it, if not go to a concentration lower. If there are not concentration
# lower, put 0. During the process, tell which cells and odors you are filling
# ################## ACTIVITY SVD ANALYSIS FUNCTIONS ##########################
def get_SVD_cells(data_sets, PCs=np.arange(1, 3)) -> pd.DataFrame:
SVDs = {k1: {k2: FG.get_svd_df(v2.T) for k2, v2 in v1.items()}
for k1, v1 in data_sets.items()}
# here before we had the sorting, i don't think we need it now...
# putting in a single vector all the principal vectors we are going to use
mi = pd.MultiIndex(levels=[[], [], [], []], codes=[[], [], [], []],
names=['decomp', 'conc', 'pps', 'n'])
U_all = pd.DataFrame(columns=mi)
for k1, v1 in SVDs.items():
for k2, v2 in v1.items():
for PC in PCs:
U_all['PCA', k1, k2, PC] = v2['U'][PC]
return U_all
# ########################## NMF ANALYSIS ###################################
# this is not very flexible, especially if one want to have different
# preprocessing for different betas and different initializations for
# differnet betas...
def get_NMF_set(data_sets, N=2, betas=[0.01, 2, 3],
inits=['nndsvd', 'nndsvda'], rnd=None) -> pd.DataFrame:
"""
data_sets should be a dictionary of the following form:
data_sets['concentration/dataset']['preprocessing']
"""
Ns = np.arange(N)+1
mi = pd.MultiIndex(levels=[[], [], [], []], codes=[[], [], [], []],
names=['decomp', 'conc', 'pps', 'n'])
U_all = pd.DataFrame(columns=mi)
for (beta, init) in itertools.product(betas, inits):
for conc, datas in data_sets.items():
for pps, data in datas.items():
data1 = FG.rectify(data)
# data1 = data-data.min().min()
_, H, _, _ = nmf.get_nmf_df(data1, k=N, beta=beta,
init=init, rnd=rnd)
# for easier comparison, we will order them in a specific order
# sorted by the first value
H = FG.get_ctr_norm(H.T, opt=2)[1]
# H = H.sort_values(by=H.index[0], axis=1)
for n in Ns:
U_all['NMF_' + str(round(beta)) + '_' + init,
conc, pps, n] = H.iloc[:, n-1]
return U_all
# ########################## PLOTTING ACTIVITY DATA #########################
def plot_activity_old(act_df, cell_order, odor_order, norm=True, title=''):
"""
Plot the activity
the rows are stimuli (odors), the columns are ORNs
(just like in the plot in the paper)
a plot for each concentration
if norm is True, then it is the same scaling for each concentration
if norm is False, then each concentration has its own colorbar
not yet tested with the new dataset
"""
print(cell_order)
print(odor_order)
act_df = act_df[cell_order].reindex(odor_order, level='odor')
conc_list = act_df.index.get_level_values('conc').unique()
odor_list = act_df.index.get_level_values('odor').unique()
n_conc = len(conc_list)
cell_list = list(act_df)
# cell_list = [name.strip(' ORN L&R') for name in cell_list]
vmax = act_df.max(axis=0, level='conc').max(axis=1)
vmin = act_df.min(axis=0, level='conc').min(axis=1)
if norm is True:
vmax[:] = vmax.max()
vmin[:] = vmin.min()
n_plots = n_conc+1
else:
n_plots = n_conc
f, axx = plt.subplots(1, n_plots, figsize=(n_plots*2, len(odor_list)/10+1))
for i in range(n_conc):
ax = axx[i]
cp = ax.imshow(act_df.xs(conc_list[i], level='conc'),
vmin=vmin.iloc[i], vmax=vmax.iloc[i], cmap='jet')
plt.sca(ax)
plt.xticks(np.arange(len(cell_list)), cell_list, rotation='vertical')
# putting the odor labels only on the most left graph
if i == 0:
ax.set_yticks(np.arange(len(odor_list)))
ax.set_yticklabels(odor_list)
else:
ax.set_yticks([])
ax.set_title('concentration 1e-' + str(conc_list[i]))
if norm is False:
f.colorbar(cp, ax=ax)
if norm is True:
ax = axx[-1]
ax.axis('off')
f.colorbar(cp, ax=ax)
plt.tight_layout()
plt.suptitle(title)
# plt.show()
return f, axx
def plot_activity(act_df, cell_order, odor_order, norm=True, title='',
cmap=plt.cm.viridis):
"""
Plot the activity
the rows are stimuli (odors), the columns are ORNs
(just like in the plot in the paper)
a plot for each concentration
if norm is True, then it is the same scaling for each concentration
if norm is False, then each concentration has its own colorbar
not yet tested with the new dataset
"""
if not norm:
plt.rc('font', size=5)
else:
plt.rc('font', size=6)
print(cell_order)
print(odor_order)
act_df = act_df[cell_order].reindex(odor_order, level='odor')
conc_list = act_df.index.get_level_values('conc').unique()
print(conc_list)
odor_list = act_df.index.get_level_values('odor').unique()
n_conc = len(conc_list)
# cell_list = list(act_df)
# cell_list = [name.strip(' ORN L&R') for name in cell_list]
vmax = act_df.max(axis=0, level='conc').max(axis=1)
vmin = act_df.min(axis=0, level='conc').min(axis=1)
if norm is True:
vmax[:] = vmax.max()
vmin[:] = vmin.min()
n_plots = n_conc
f_w = n_plots*1.7
f_h = len(odor_list)/12+0.4
f, axx = plt.subplots(1, n_plots, figsize=(f_w, f_h))
for i in range(n_conc):
if n_conc > 1:
ax = axx[i]
else:
ax = axx
FP.imshow_df(act_df.xs(conc_list[i], level='conc'), ax=ax,
cmap=cmap, vlim=[vmin.iloc[i], vmax.iloc[i]],
title='concentration 1e-' + str(conc_list[i]),
cb=not norm, cb_frac=0.066)
ax.set_xlabel('')
if i > 0:
ax.set_yticks([])
ax.set_ylabel('')
if norm is True:
ri = 1 - 0.3/f_w
bot = 0.7/f_h
top = 1-0.4/f_h
plt.subplots_adjust(left=1.3/f_w, bottom=bot,
right=ri, top=top, wspace=0.01/f_w)
cbaxes = f.add_axes([ri + 0.1/f_w, bot, 0.1/f_w, top-bot])
scale = mpl.colors.Normalize(vmin=vmin.iloc[0],
vmax=vmax.iloc[0])
clb = mpl.colorbar.ColorbarBase(cbaxes, norm=scale, cmap=cmap)
clb.outline.set_linewidth(0.00)
clb.ax.tick_params(size=2, direction='in', pad=1.5)
else:
plt.subplots_adjust(left=1.3/f_w, bottom=0.2,
right=1 - 0.3/f_w, top=0.9, wspace=1.4/f_w)
plt.rc('font', size=5)
# =============================================================================
# cp = ax.imshow(act_df.xs(conc_list[i], level='conc'),
# vmin=vmin.iloc[i], vmax=vmax.iloc[i], cmap='jet')
# plt.sca(ax)
# plt.xticks(np.arange(len(cell_list)), cell_list, rotation='vertical')
#
# # putting the odor labels only on the most left graph
# if i == 0:
# ax.set_yticks(np.arange(len(odor_list)))
# ax.set_yticklabels(odor_list)
# else:
# ax.set_yticks([])
# ax.set_title('concentration 1e-' + str(conc_list[i]))
# if norm is False:
# f.colorbar(cp, ax=ax)
# if norm is True:
# ax = axx[-1]
# ax.axis('off')
# f.colorbar(cp, ax=ax)
# =============================================================================
# plt.tight_layout()
plt.suptitle(title)
# plt.show()
plt.rc('font', size=6)
return f, axx
def plot_activity2(act_df, cell_order, odor_order, title=''):
"""
here we plot all the responses in one graph
the rows are different cells
the columns are different stimuli
in the top graph we plot the stimuli ordered by odors
in the lower graph, we plot stimuli first ordered by concentration
for each concentration, it looks just like a flipped version of the plots
above.
"""
plt.rc('font', size=6)
f, axx = plt.subplots(2, 1, figsize=(11, 6))
act_df = act_df[cell_order].reindex(odor_order, level='odor')
FP.imshow_df(act_df.T, ax=axx[0], cmap=plt.cm.jet)
act_df = act_df.sort_index(axis=0, level='conc')
FP.imshow_df(act_df.T, ax=axx[1], cmap=plt.cm.jet)
plt.tight_layout()
plt.suptitle(title)
return f, axx
# #############################################################################
# #############################################################################
# ########################### CLASSES DEFINITIONS ##########################
# #############################################################################
# ############################### LN ANALYSIS ##############################
def plot_hist_cor_pred_vs_fake(pred, fake, verbose=True):
"""
pred and fake must be a matrices with the same number of columns
Plots 2 graphs on 1 figure. One is the distribution of the fake
correlation coefficients and on top the predicted ones
The other plots is the distribution of the max for each row
"""
if pred.shape[1] != fake.shape[1]:
print("shouldn't pred and fake have the same number of columns?")
return
pred_max = pred.max(axis=1)
fake_max = fake.max(axis=1)
ttest_all = sps.ttest_ind(pred.flatten(), fake.flatten())
ttest_max = sps.ttest_ind(pred_max.flatten(), fake_max.flatten())
f, axx = plt.subplots(1, 2, figsize=(5, 2.5))
ax = axx[0]
ax.hist(fake.flatten(), density=True, bins=100,
label='from shuffled data')
ax.hist(pred.flatten(), density=True, histtype='step', bins=20,
label='from prediction')
ax.set_title('all corr. coef.')
ax.set_ylabel('density')
ax.legend(prop={'size': 8}, loc=3)
ax.text(0.03, 0.8, 'p-value: \n' + '{:.0e}'.format(ttest_all.pvalue),
transform=ax.transAxes)
ax = axx[1]
ax.hist(fake_max, density=True, bins=100)
ax.hist(pred_max, density=True, histtype='step',
bins=10)
ax.set_title('best corr. coef.')
ax.text(0.03, 0.80, 'p-value: \n' + '{:.0e}'.format(ttest_max.pvalue),
transform=ax.transAxes)
if verbose is True:
print('ttest all:')
print(ttest_all)
print('ttest max:')
print(ttest_max)
return f, axx
def ctr_divide(x):
x = FG.get_ctr_norm(x, opt=1)[0]
return x/((x**2).sum())
# ##################### ORN/PN ACT VS ACT ###################################
# =============================================================================
# as i will now do everything in a class, i think it makes sense to separate
# some stuff of what was done before in a single instance.
# before i was trying to analyse all at once, but it was creating
# some difficulties. for example analyzing ORNs and uPNs at once was not easy
# to manage, so maybe it would make sense to have 2 instances of this in a
# class
# i don't remember if it makes any sense at all to analyse the 2 cell types
# at the same time. Is there any analysis that is at all in common?
# i guess since things are getting a bit more complicated, it would be good
# to have a plan/description of what we want to achieve, so that we can
# code in a better way.
class NeurActConAnalysis:
'''
this class is relevant for ORNs and for uPNs
'''
sides = ['L', 'R', 'M']
def __init__(self, dataset: int, cell_type: str, strms: List[int],
con_pps_k: str, save_plots: bool, plot_plots: bool,
act_sel_ks: List[str], act_pps_k1: str, act_pps_k2: str,
neur_order: List[str] = None, odor_order: List[str] = None,
odor_sel: List[Tuple[str, str]] = None,
path_plots: str = None, reduce: bool = False,
subfolder: str = 'act_ORN_vs_con_ORN-LN/') -> Any:
"""
after the initialization con_strms, con_strms2 and act will
all have the same ordering of neurons, and it will be ordered by
neur_order, if neur_order is empty, the order will be imposed by
the order that is already in the activity matrix
the reduce option is for the case when the 2 act_pps options are
'raw', and we are taking the same number of points (trials)
for each odor
Parameters
----------
dataset
cell_type
strms
con_pps_k
save_plots
plot_plots
act_sel_ks
act_pps_k1
act_pps_k2
neur_order
odor_order
odor_sel
path_plots
reduce
subfolder
"""
# plotting default
plt.rc('font', size=6)
self.dataset = dataset
print('dataset used: ', str(self.dataset))
self.save_plots = save_plots
self.plot_plots = plot_plots
self.path_plots = path_plots
if self.save_plots is True:
if path_plots is None:
self.path_plots = FG.create_path(OLF_PATH / f'plots/{subfolder}')
print(f'plots will be saved in {self.path_plots}')
else:
print('plots will not be saved')
FP.set_plotting(self.plot_plots)
self.cell_type = cell_type
self.strms = strms # stream, i.e., connectivity directions
self.con_pps_k = con_pps_k
self.fs = (16, 12)
print(f'cell type: {self.cell_type}\n'
f'streams: {self.strms}\n'
f'connectivity data pps: {self.con_pps_k}')
# #####################################################################
# ############## INITIALIZING THE ACTIVITY DATA #####################
# #####################################################################
# here we will need to have some options about choosing the ORN dataset
self.act_pps_k1 = act_pps_k1
self.act_pps_k2 = act_pps_k2
if self.cell_type == 'ORN':
self.act = get_ORN_act_pps(self.dataset, act_pps_k1, act_pps_k2,
fast=True)
# the fast option means that it is read directly from a hdf
# file
# elif self.cell_type == 'PN':
# self.act = get_uPN_act_pps(self.dataset, act_pps_k1, act_pps_k2,
# bin_con=True, side='S')
else:
raise ValueError(f'no cell type {self.cell_type}, exiting.')
# print('EC50 data: retrieving')
# self.EC50 = get_ORN_EC50(dataset=dataset)
# self.EC50.fillna(0, inplace=True) # puts 0 instead of nan
# print('EC50 data: done retrieving')
# there should be a check here that the names we chose are related to
# the names in the activity
# these are mainly used as sets, not so much for their order
self.neur_act = np.array(self.act.columns)
self.odor_act = np.array(self.act.index
.get_level_values('odor').unique())
# setting the internal variables neur_order and odor_order
# if neur_order is empty is not compatible with what is in the activity
# dataset, the order will be imposed by what is in the activity data
self.set_neur_order(neur_order)
self.set_odor_order(odor_order)
self.odor_sel = odor_sel
if odor_sel is not None:
self.act = self.act.loc[odor_sel]
# this puts in the activity the order that is in neur_order
self.act = self.act.loc[:, self.neur_order]
# this is reducing the activity
if act_pps_k1 == 'raw' and act_pps_k2 == 'raw' and reduce is True:
print(f'shape of act: {self.act.shape}')
print(self.act.groupby('odor').size())
min_trials = self.act.groupby('odor').size().min()
print(f'min # of trials per odor: {min_trials}')
# so that there is the same number of samples for each odor
self.act = self.act.groupby('odor').head(min_trials)
# this could be very much simplified and streamed if the ec50 was
# not there, not sure what to do about it
# for the moment we are not using ec50 anyway
self.act_sels = {#'ec50': -self.EC50,
'all': self.act,
'45': self.act.loc(axis=0)[:, [4, 5]],
'678': self.act.loc(axis=0)[:, [6, 7, 8]],
'4': self.act.loc(axis=0)[:, 4],
'5': self.act.loc(axis=0)[:, 5],
'6': self.act.loc(axis=0)[:, 6],
'7': self.act.loc(axis=0)[:, 7],
'8': self.act.loc(axis=0)[:, 8]}
self.act_sel_ks = act_sel_ks
# keeping only those selections that are indicated by the input
# and adding a layer in the dictionnary with key 'o', as later
# we will be adding new pps for the act.
if not set(act_sel_ks) <= self.act_sels.keys():
raise ValueError(f'keys {act_sel_ks} are not available.\n'
f'Available keys are {list(self.act_sels.keys())}')
self.act_sels = {k: {'o': self.act_sels[k]} for k in self.act_sel_ks}
print('Done initializing main activity data.')
# #####################################################################
# ############## IMPORTING GRAPE APPLE ACT DATA #####################
# #####################################################################
# print('importing grape and apple activity data')
# # at a later point, we might explore the raw responses, for now
# # let's just work with the means
# self.act_apl_gr_raw = pd.read_hdf(par_act3.file_apple_grape_hdf)
# self.act_apl_gr = self.act_apl_gr_raw.mean(level=('odor', 'conc'))
#
# # this is shifting the concentrations so that it is in the same
# # range as all the rest
# self.act_apl_gr.rename(index={0: 4, 1: 5, 2: 6, 3: 7, 4: 8},
# inplace=True)
# print('Done importing grape and apple activity data')
# #####################################################################
# ############# INITIALIZING CONNECTIVITY DATA ######################
# #####################################################################
print('Initializing connectivity data')
# there is one more thing to be careful about here, it is that
# the activity data might not have the same neurons as the
# connectivity data, so you should actually only look at the neurons
# that are present in the activity
# also i wonder if i should be careful about the order here...
path = f'{PATH}results/cons/'
self.con_strms2 = {}
for s in self.strms:
# this is the old version, where only a subset of LN is there
# =============================================================================
# self.con_strms2[s] = pd.read_hdf(f'../results/cons/cons_'
# f'{cell_type}_{s}.hdf')
# =============================================================================
self.con_strms2[s] = pd.read_hdf(f'{path}cons_'
f'{cell_type}_{s}_all.hdf')
self.con_strms2[s] = self.con_strms2[s].loc[self.neur_order]
self.con_strms2[s] = FG.get_pps(self.con_strms2[s],
['o', 'cn', 'n'])
# self.con_strms3 = pd.read_hdf(f'{path}cons_{cell_type}.hdf')
self.con_strms3 = pd.read_hdf(f'{path}cons_{cell_type}_all.hdf')
self.con_strms3 = self.con_strms3.loc[self.neur_order]
self.con_strms3 = FG.get_pps(self.con_strms3, ['o', 'cn', 'n'])
# if one wants to, one could rearrange the LNs in con_stream3:
# LN_names = ['Broad T1 L', 'Broad T2 L', 'Broad T3 L']
# (con_strms3.swaplevel(axis=1).stack().loc[:, LN_names]
# .unstack().swaplevel(axis=1))
# i don't remember what is used where and what is needed where
# i guess i will just debug when the time will be right
self.LNs = ['Broad T1 L', 'Broad T2 L', 'Broad T3 L', 'Broad T M L',
'Broad T1 R', 'Broad T2 R', 'Broad T3 R', 'Broad T M R',
'Broad T M M',
'Broad D1 L', 'Broad D2 L', 'Broad D M L',
'Broad D1 R', 'Broad D2 R', 'Broad D M R',
'Broad D M M',
'Keystone L L', 'Keystone R L',
'Keystone L R', 'Keystone R R',
'Keystone M M',
'Picky 0 [dend] L', 'Picky 0 [dend] R', 'Picky 0 [dend] M',
'Picky 0 [axon] L', 'Picky 0 [axon] R', 'Picky 0 [axon] M']
self.splits_LN = [9, 9 + 7, 9 + 7 + 5]
# these are the pps that are outputted by get_con_pps
self.pps = ['o', 'n', 'cn']
# glomeruli data that is not used in the paper
# self.con_gloms = pd.read_hdf(f'{path}cons_gloms_M.hdf')
# idx = self.con_gloms.index
# idx = [f'{cell_type} {name}' for name in idx]
# self.con_gloms.index = idx
# self.con_gloms = self.con_gloms.loc[self.neur_order]
# self.con_gloms = FG.get_pps(self.con_gloms, ['o', 'cn'])
print('done initializing connectivity data.')
# #####################################################################
# ############# DATABASES THAT WILL BE USED #########################
# #####################################################################
mi6 = pd.MultiIndex(levels=[[], [], [], [], [], []],
codes=[[], [], [], [], [], []],
names=['conc', 'pps', 'decomp', 'par1', 'par2', 'n'])
self.act_W = pd.DataFrame(columns=mi6)
self.act_W_cn = pd.DataFrame(columns=mi6)
self.act_H = pd.DataFrame(columns=mi6)
self.act_Y = pd.DataFrame(columns=mi6)
self.act_Z = pd.DataFrame(columns=mi6)
self.act_M = {}
mi3 = pd.MultiIndex(levels=[[], [], []], codes=[[], [], []],
names=['conc', 'decomp', 'k'])
self.errors = pd.Series(index=mi3)
# the additional parameter i here is the identifier for the shuffling
mi7 = pd.MultiIndex(levels=[[], [], [], [], [], [], []],
codes=[[], [], [], [], [], [], []],
names=['conc', 'pps', 'i', 'decomp', 'par1', 'par2', 'n'])
# container for the PCA/NMF calculated on the shuffled/fake data
self.act_fk_U = pd.DataFrame(columns=mi7)
self.act_fk_U_cn = pd.DataFrame(columns=mi7)
# the way these dictionaries will be organized is the following way:
# keys are the streams of the connectivity:
# 0 for ORNs to LNs
# 1 for LNs to ORNs
# 2 for LNs to ORNs with in-degree scaled
# the elements of the dictionary are DataFrames, which the index
# being different methods to analyse activity, and the columns
# being different LNs, i.e., the connection vectors
self.cc = {} # cc is correlation coefficient
self.cc_pv = {} # pv is p-value
# cosine similarity
self.CS = {}
self.CS_pv = {}
self.subspc_cc = {}
self.subspc_cc_pv = {}
self.subspc_CS = {}
self.subspc_CS_pv = {}
# subspace overlap
self.subspc_OL = {}
self.subspc_OL_pv = {}
print('Done creating instance of class NeurActConAnalysis')
# #########################################################################
# ###################### ADMIN FUNCTIONS ################################
# #########################################################################
def set_neur_order(self, neur_order):
"""
setting the internal variable neur_order and checking that all those
cells are indeed present in the activity data
"""
self.neur_order = self.neur_act if neur_order is None else neur_order
if not set(self.neur_order).issubset(set(self.neur_act)):
print('ERROR: provided list of neuron is not contained in the'
+ ' activity data')
self.neur_order = self.neur_act
def set_odor_order(self, odor_order):
"""
setting the internal variable odor_order and checking that all those
odors are indeed present in the activity data
"""
self.odor_order = self.odor_act if odor_order is None else odor_order
if not set(self.odor_order).issubset(set(self.odor_act)):
print('ERROR: provided list of odors is not contained in the'
+ ' activity data')
self.odor_order = self.odor_act
# #########################################################################
# ###################### PLOTTING ACTIVITY ##############################
# #########################################################################
def plot_act(self, ps_str, act=None, neur_order=None, odor_order=None):
"""
plot where each concentration where the graph is scaled to its own max
plot where the responses for all concentations are on the same scale
plot where all responses are all on one plot
"""
# because in the raw case odors will have duplicates, before
# plotting we need to clean up the data by removing the duplicates:
if act is None:
act = self.act.copy()
act = act[~act.index.duplicated(keep='first')]
if neur_order is None:
neur_order = self.neur_order
if odor_order is None:
odor_order = self.odor_order
for norm in [False, True]:
f, _ = plot_activity(act, neur_order, odor_order,
norm=bool(norm), title=f'{self.cell_type} activity')
file_str = (self.path_plots / f'{self.cell_type}_act'
f'{self.dataset}_scale{int(norm)}'
f'{ps_str}.png')
FP.save_plot(f, file_str, self.save_plots, dpi=300)
f, _ = plot_activity2(act, neur_order, odor_order,
title=f'{self.cell_type} activity')
file_str = (self.path_plots / f'{self.cell_type}_act'
f'{self.dataset}{ps_str}.png')
FP.save_plot(f, file_str, self.save_plots)
def plot_cor(self, conc, pps, ps_str):
"""
plotting the ORN and odor covariance matrices
choosing which concentrations we using
ps_str is just a string that we append to the end of the filename
"""
# correlation between cells
f, _, _ = plot_cov(self.act_sels[conc][pps], self.neur_order)
file_str = (self.path_plots / f'{self.cell_type}_act'
f'{self.dataset}_{conc}_{pps}_neur-cov{ps_str}.png')
FP.save_plot(f, file_str, self.save_plots, dpi=300)
# correlation between odors
# using the unstack makes that we average between concentrations
# if we want to have correlations for the odors, then we need to
# ctr and normalize in the ther other direction than for the
# cells
# because in the raw case odors will have duplicates, before
# plotting we need to clean up the data by removing the duplicates:
act = self.act_sels[conc]['o'].copy()
act = act[~act.index.duplicated(keep='first')]
# i don't think that removing dupliates has any effect
act = act.unstack().T
if pps == 'cn':
act = FG.get_ctr_norm(act)
f, _, _ = plot_cov(act, self.odor_order, norm=False)
file_str = (self.path_plots / f'{self.cell_type}_act'
f'{self.dataset}_{conc}_{pps}_odor-cov{ps_str}.png')
FP.save_plot(f, file_str, self.save_plots, dpi=300)
# #########################################################################
# ################## SAVING THE CONNECTIVITY IN HDF5 ####################
# #########################################################################
# exporting connectivity data
# there is already a function in con_preprocess_2.py that does the same
# thing. So not sure if this is not redundant...
def export_con_data(self):
"""
this exports the con_stream2 dataset into 3 hdf5 files
"""
path = f'{PATH}results/'
ct = self.cell_type
self.con_strms2[0]['o'].to_hdf(f'{path}con_{ct}2LN.hdf', f'{ct}2LN')
self.con_strms2[1]['o'].to_hdf(f'{path}con_LN2{ct}.hdf', f'LN2{ct}')
self.con_strms2[2]['o'].to_hdf(f'{path}con_LN2{ct}_indeg.hdf',
f'LN2{ct}2_indeg')
# #########################################################################
# ###################### PLOTTING CONNECTIVITY ##########################
# #########################################################################
def plot_con(self, LNs, pps):
ct = self.cell_type
titles = [f'{ct}s to LNs',
f'LNs to {ct}s',
f'LNs to {ct}s, in-degree scaled',
f'{ct}s with LNs, average ff and fb' ]
# print('hello')
f, axx = plt.subplots(1, len(self.strms), figsize=(7, 2.5))
for i in self.strms:
ax = axx[i]
data = self.con_strms2[i][pps].loc[:, LNs].copy()
# ordering the ORNs or uPNs as implied by the internal variable
data = data.loc[self.neur_order]
FP.imshow_df(data, ax=ax, splits_x=self.splits_LN, cb_frac=0.042)
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_title(titles[i])
if i > 0:
ax.set_yticks([])
# plt.tight_layout()
plt.subplots_adjust(bottom=0.22, top=0.95, left=0.1, right=0.95)
FP.save_plot(f, self.path_plots / f'con_{self.cell_type}-LN_{pps}.png',
self.save_plots, dpi=300)
# #########################################################################
# ############## CALCULATING DIFFERENT ANALYSIS OF ACTIVITY #############
# #########################################################################
# here we have to decide on what data do we exactly calculate the PCA
# of the data. Here are some choices that we have:
# - which concentration selection
# - which preprocessing of the data
# actually we could be quite open and just decide here which
# combinations of these things we want. That want we don't need to make
# anything complicated. It will just be added to a global structure
# which contains all the processing (PCA, NMF) of the activity
# And then at a later stage, you can still choose what from all these
# processing you want to correlate/plot
# So at this stage we are just calculating some pps of the act
# and we are choosing which conc selection and pps
# Actually the thing is that we don't have at this point all the pps
# of all the different activity data
# actually seeing what i did before, i see that i need some home-made
# ppsing of the data before doing the analysis
# so maybe a useful function would be to create a pps of the data
# which would iterate over all the concentrations and just add
def add_act_pps(self, key, func):
if key in self.act_sels[next(iter(self.act_sels))].keys():
print('the key ' + key + ' exists already! Exiting.')
return
for k in self.act_sels.keys():
self.act_sels[k][key] = func(self.act_sels[k]['o'])
def calc_act_PCA(self, concs, act_pps, k: int = 3):
"""
calculates the PCA loading vectors for the selected concentratiosn
and the selected pps of the activity data
If a loading vector has all the values of the same size, then
that vector will be made positive
Parameters
----------
"""
print(f'data selections for which PCA will be calculated: {concs}')
for conc in concs:
SVD = FG.get_svd_df(self.act_sels[conc][act_pps].T)
W = SVD['U']
H = SVD['Vh']
for n in range(1, k + 1):
# making the eigenvector positive if all the values are <=0
# it is usually only something that would happen to the
# first component
sign = -1 if np.sum(W[n] <= 0) == len(W[n]) else 1
self.act_W[conc, act_pps, 'SVD', '', '', n] = sign * W[n]
self.act_H[conc, act_pps, 'SVD', '', '', n] = H.T[n]
def calc_act_NMF(self, concs, act_pps, k: int = 2, beta=2,
n_rnd: int = 10):
"""
N is the order of the NMF
beta is the parameter in the optimization function of the NMF
n_rnd is the number of random initialization in addition on the
deterministic initializations to try in order to get the best NMF
(i.e., with the smallest error according to the objective function)
"""
meth = 'NMF_' + str(k)
print(f'data selections for which NMF will be calculated: {concs}')
for conc in concs:
data = self.act_sels[conc][act_pps].T
# this is the usual way of having data: columns odors, rows: ORNs
W, H, e, init = nmf.get_nmf_best_df(data, k=k, beta=beta,
n_rnd=n_rnd)
self.errors[conc, 'NMF', k] = e
for n in range(1, k + 1):
self.act_W[conc, act_pps, meth, beta, init, n] = W[n]
self.act_H[conc, act_pps, meth, beta, init, n] = H.T[n]
print(f'finished conc: {conc}, error: {e}')
def calc_act_SNMF(self, concs, act_pps, k: int = 3):
"""
Calculating the W that would arise from SNMF
"""
meth = 'SNMF_' + str(k)
print(f'data selections for which SNMF will be calculated: {concs}')
ks = np.arange(k) + 1
for conc in concs:
print(f'working on conc: {conc}')
data = self.act_sels[conc][act_pps].T
Z, e, m = nmf.get_snmf_best_df(data, k=k, rtol=1e-6,
max_iter=int(1e6))
# print(m)
self.errors[conc, 'SNMF', k] = e
W = data @ Z.T / data.shape[1]
M = Z @ Z.T / data.shape[1]
M = pd.DataFrame(M, index=ks, columns=ks)
for n in range(1, k + 1):
self.act_W[conc, act_pps, meth, '', '', n] = W[n]
self.act_H[conc, act_pps, meth, '', '', n] = Z.T[n]
self.act_M.setdefault(conc, {}).setdefault(act_pps, {})[meth] = M
print(f'finished conc: {conc}, error: {e}')
def calc_act_NNC(self, concs, act_pps, k: int = 3, alpha=50, cycle=500,
scaling=1, rtol=1e-7, rectY: bool = True,
rectZ: bool = True, beta=0.2):
"""
Calculating the W that would arise from the non-negative olfactory
circuit
adding the scaling as it also influences the W
Parameters
----------
alpha:
50 usually works well, that the beginning value for parameter
in the gradient
"""
if rectY and rectZ:
meth = 'NNC'
elif rectY:
meth = 'NNYC'
elif rectZ:
meth = 'NNZC'
else:
meth = 'LOC'
meth = f'{meth}_{k}'
print(f'data for which {meth} will be calculated: conc: {concs},'
f'scaling: {scaling}, act_pps: {act_pps}')
ks = np.arange(k) + 1
for conc in concs:
print(f'working on conc: {conc}')
data = self.act_sels[conc][act_pps].T
# here data is oriented as the usual X
pps = f'{scaling}{act_pps}'
Y, Z, costs = FOC.olf_gd_offline(data.values, k, max_iter=10000,
rectY=rectY, rectZ=rectZ,
rtol=rtol, rho=scaling,
alpha=alpha, cycle=cycle,
beta=beta)
self.errors[conc, 'NNC', k] = costs[-1]
W = Y @ Z.T / data.shape[1]
W = | pd.DataFrame(W, index=data.index, columns=ks) | pandas.DataFrame |
"""Class to process full HydReSGeo dataset.
Note: If IRUtils.py is not available, you need to download it before the
installation of the package into the `hprocessing/` folder:
.. code:: bash
wget -P hprocessing/ https://raw.githubusercontent.com/felixriese/thermal
-image-processing/master/tiprocessing/IRUtils.py
"""
import configparser
import glob
import itertools
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from .ProcessEnviFile import (ProcessEnviFile, getEnviFile, getEnviHeader,
readEnviHeader)
from .IRUtils import getIRDataFromMultipleZones
class ProcessFullDataset():
"""
Class to process the full HydReSGeo dataset.
Parameters
----------
envi_hdr_filepath : str
Path to envi header file (low resolution)
meas_name : str
Name of measurement
positions_hyp : dict
Dictionary with information of the positions config file for the
hyperspectral camera
positions_lwir : dict
Dictionary with information of the positions config file for the
lwir camera
zone_list : list
List of measurement zones in the image. That does not include the
spectralon (white reference). If a zone needs to be ignored, it needs
to be removed from this list.
lwir_path : str
Path to long-wave infrared (LWIR) data
soilmoisture_filepath : str
Path to soil moisture data
masks : pd.DataFrame or None
Masks for hyperspectral images
soilmode : str
Mode of the soil measurements (e.g. KW33, Lysimeter)
imageshape : tuple, optional (default= (50, 50))
Height and width of the image
time_window_width : int, optional (default=6)
Time window width to match the hyperspectral image to the soil moisture
data. The unit of the time window width is minutes.
hyp_stat_mode : str
Mode for calculating the "mean spectrum" of a hyperspectral image.
Possible values: median, mean, max, max10 (= maximum of the top 10
pixels), std.
hyp_spectralon_factor : float, optional (default=0.95)
Factor of how much solar radiation the spectralon reflects.
verbose : int, optional (default=0)
Controls the verbosity.
Todo
-----
- Add attributes to class docstring.
- Remove self.date and self.time, only use self.datetime. Remove all
unnecessary functions of self.date and self.time.
"""
def __init__(self,
hyp_hdr_path: str,
meas_name: str,
positions_hyp: dict,
positions_lwir: dict,
zone_list: list,
lwir_path: str,
soilmoisture_path: str,
masks: pd.DataFrame,
grid: tuple = (1, 1),
imageshape: tuple = (50, 50),
time_window_width: int = 6,
hyp_stat_mode: str = "median",
hyp_spectralon_factor: float = 0.95,
verbose=0):
"""Initialize ProcessDataset instance."""
self.hyp_hdr_path = hyp_hdr_path
self.meas_name = meas_name
self.positions_hyp = positions_hyp
self.positions_lwir = positions_lwir
self.zone_list = zone_list
self.lwir_path = lwir_path
self.soilmoisture_path = soilmoisture_path
self.masks = masks
self.grid = grid
self.imageshape = imageshape
self.time_window_width = time_window_width
self.hyp_stat_mode = hyp_stat_mode
self.hyp_spectralon_factor = hyp_spectralon_factor
self.verbose = verbose
# get Envi files
self.envi_hdr_highres_path = self.hyp_hdr_path[:-4] + "_highres.hdr"
self.hdr, self.envi_img = getEnviFile(self.hyp_hdr_path)
self.hdr_highres = getEnviHeader(self.envi_hdr_highres_path)
self.date, self.time = readEnviHeader(self.hdr_highres)
# set datetime TODO: remove hard-coded timezone
self.datetime = pd.to_datetime(self.date+" "+self.time+"+02:00",
utc=True)
# read out header file
self.wavelengths = self.hdr_highres["Wavelength"]
self.bbl = self.hdr_highres["bbl"]
# get measurement index
self.index_of_meas = int(np.argwhere(
positions_hyp["measurement"].values == meas_name))
self.mask = None
# improvised solution to translate between zone1-8 to A1-D2
self.zone_dict = {
"A1": "zone1", "A2": "zone2", "B1": "zone3", "B2": "zone4",
"C1": "zone5", "C2": "zone6", "D1": "zone7", "D2": "zone8"}
def process(self) -> pd.DataFrame:
"""
Process a full dataset.
Returns
-------
pd.DataFrame
Dataframe with hyperspectral, LWIR, and soil moisture data for
one image.
"""
# set mask
if self.masks is not None:
mask_index = self.masks.index[
self.masks["measurement"] == self.meas_name].tolist()[0]
if self.index_of_meas != mask_index:
raise IOError(("positions.csv and mask.csv don't have the"
"same sequence of dates."))
self.mask = getMask(
masks=self.masks,
index_of_meas=self.index_of_meas,
imageshape=self.imageshape)
# random check if hyperspectral image is empty
if np.sum(self.envi_img[:, :, 5]) == 0:
if self.verbose:
print("Error: The hyperspectral image is empty.")
return None
# process
envi_processor = ProcessEnviFile(
image=self.envi_img,
wavelengths=self.wavelengths,
bbl=self.bbl,
zone_list=self.zone_list,
positions=self.positions_hyp,
index_of_meas=self.index_of_meas,
mask=self.mask,
grid=self.grid,
stat_mode=self.hyp_stat_mode,
spectralon_factor=self.hyp_spectralon_factor)
df_hyp = envi_processor.getMultipleSpectra()
# add datetime as column
df_hyp["datetime"] = self.datetime
# add soil moisture data
df_hyd = self.getSoilMoistureData()
df_hyd = df_hyd.drop(labels=["zone"], axis=1)
# add IR data
df_lwir = self.getLwirData()
df_lwir = df_lwir.drop(labels=["zone"], axis=1)
return pd.concat([df_hyp, df_hyd, df_lwir], axis=1)
def getSoilMoistureData(self):
"""
Get soil moisture data.
To match the dates of the soil moisture measurements and the
hyperspectral image, the timezones are converted to UTC.
Returns
-------
pd.Dataframe
Dataframe of soil moisture measurements which correspond to the
hyperspectral image of this instance.
Todo
----
- Move the CSV file read out into process-function outside this file
- Add an optional time shift correction between soil moisture data and
the hyperspectral data.
"""
soilmoisture_sensors = getUppermostSoilMoistureSensors()
# read out soil moisture data
df_sm = pd.read_csv(self.soilmoisture_path)
df_sm["timestamp"] = pd.to_datetime(df_sm["timestamp"], utc=True)
sm_dict = {"zone": [], "volSM_vol%": [], "T_C": []}
for i, sensor in enumerate(soilmoisture_sensors["number"]):
# only consider sensors in zone_list
zone = soilmoisture_sensors["zone"].iloc[i]
if self.zone_dict[zone] not in self.zone_list:
continue
# find nearest date
nearest_date, time_delta = findNearestDate(
df_sm[df_sm["sensorID"] == "T"+str(sensor)].timestamp,
self.datetime)
if time_delta > self.time_window_width / 2:
if self.verbose:
print("Warning: Could not find a soil moisture measurement"
"for sensor {0}".format(sensor))
continue
nearest_row = df_sm[(df_sm["sensorID"] == "T"+str(sensor)) &
(df_sm["timestamp"] == nearest_date)]
sm_dict["zone"].append(self.zone_dict[zone])
sm_dict["volSM_vol%"].append(nearest_row["volSM_vol%"].values[0])
sm_dict["T_C"].append(nearest_row["T_C"].values[0])
return pd.DataFrame(sm_dict)
def getLwirData(self):
"""
Get LWIR data from one of the CSV export files.
This function is based on code from another repository by the authors:
https://github.com/felixriese/thermal-image-processing
Parameters
----------
date : str
Date formatted as yyyymmdd, e.g. 20170816
time : str
Time formatted as hh-mm-ss, e.g. 13-31-40.
Returns
-------
pd.DataFrame
IR data of the current datapoint (matched to date and time)
Todo
-----
- Implement grid-wise LWIR data extraction. (For now, only zone-wise
data extraction is implemented.)
"""
# find LWIR file within the correct time window
lwir_datetime_list = []
for csvfile in glob.glob(self.lwir_path+"/ir_export_*.csv"):
csvfile_list = csvfile.split("/")[-1].split("_")
lwir_datetime = pd.to_datetime(
csvfile_list[2]+" "+csvfile_list[5][:-4].replace("-", ":") +
"+02:00", utc=True)
lwir_datetime_list.append(lwir_datetime)
nearest_date, time_delta = findNearestDate(
lwir_datetime_list, self.datetime)
# check if the nearest datetime is close enough
if time_delta > self.time_window_width / 2:
if self.verbose:
print("Warning: Did not find LWIR data.")
return pd.DataFrame({"zone": [np.nan], "mean": [np.nan],
"med": [np.nan], "std": [np.nan]})
# load LWIR CSV file
csvfile = glob.glob(self.lwir_path+"ir_export_" +
nearest_date.strftime("%Y%m%d")+"_*" +
nearest_date.tz_convert("Europe/Berlin").strftime(
"%H-%M-%S")+".csv")[0]
# get data from different zones
df_lwir_original = getIRDataFromMultipleZones(
csvpath=csvfile,
positions=self.positions_lwir.to_dict('list'),
zone_list=self.zone_list)
# The `df_lwir_original` results in one row and column names such as
# "ir_zone1_med". In the next step, one row per zone needs to be
# generated.
lwir_dict = {"zone": [], "mean": [], "med": [], "std": []}
for zone in self.zone_list:
lwir_dict["zone"].append(zone)
lwir_dict["mean"].append(
df_lwir_original["ir_"+str(zone)+"_mean"].values[0])
lwir_dict["med"].append(
df_lwir_original["ir_"+str(zone)+"_med"].values[0])
lwir_dict["std"].append(
df_lwir_original["ir_"+str(zone)+"_std"].values[0])
return | pd.DataFrame(lwir_dict) | pandas.DataFrame |
import pandas as pd
import numpy as np
import spacy
from sklearn.decomposition import PCA
from sklearn.neighbors import NearestNeighbors
nlp = spacy.load('my_model')
df = pd.read_csv('Spotify/data.csv')
df = df[:100]
df['artists'] = df['artists'].apply(lambda x: x[1:-1].replace("'", ''))
df_slim = df.drop(['id', 'release_date', 'year', 'mode', 'key'], axis=1)
def standardizer(data):
d_maxes = data.max()
d_mins = data.min()
data/d_maxes
return d_maxes, d_mins
df_slim['duration_ms'] = df_slim['duration_ms']/5403500
df_slim['popularity'] = df_slim['popularity']/100
df_slim['tempo'] = df_slim['tempo']/244.091
df_slim['loudness'] = abs(df_slim['loudness']/60)
ndf_slim = df_slim[:2000]
def get_word_vectors(w):
#converts of words to their own vectors
return [nlp(word).vector for word in w]
df_name=ndf_slim['name']
s1_PCA_song_names = PCA(n_components=2)
s1_PCA_song_names.fit(get_word_vectors(df_name))
s1_name_vect = s1_PCA_song_names.transform(get_word_vectors(df_name))
nn1 = NearestNeighbors(n_neighbors=10, radius=0.5, algorithm='ball_tree', n_jobs=2)
nn1.fit(s1_name_vect)
names = ndf_slim['name']
s2_PCA_song_names = PCA(n_components=1)
s2_PCA_song_names.fit(get_word_vectors(names))
name_vect = s2_PCA_song_names.transform(get_word_vectors(names))
artists = ndf_slim['artists']
PCA_artist_names = PCA(n_components=1)
PCA_artist_names.fit(get_word_vectors(artists))
artist_vect = PCA_artist_names.transform(get_word_vectors(artists))
fdf= ndf_slim.drop(['name', 'artists'], axis=1)
fdf['1d_vectorized_name'] = name_vect
fdf['1d_vectorized_artist'] = artist_vect
pca2 = PCA(n_components=2)
pca2.fit(fdf)
last_vector = pca2.transform(fdf)
names = ndf_slim['name']
nn2 = NearestNeighbors(n_neighbors=10, radius=0.5, algorithm='ball_tree', n_jobs=2)
X = last_vector
nn2.fit(X)
names = ndf_slim['name']
def song_suggestion(song):
new_song = ([song])
new = get_word_vectors(new_song)
new = s1_PCA_song_names.transform(new)
song_list = []
for number in nn1.kneighbors(new)[1]:
song_list.append(df_name[number])
temp = pd.DataFrame(song_list).values[0]
temp_list = []
for val in temp:
temp_list.append(val)
return temp_list
def get_prediced_songs(user_song):
index = df[df['name'] == user_song].index[0]
song_list = []
for number in nn2.kneighbors([X[index]])[1]:
song_list.append(df_name[number])
temp = | pd.DataFrame(song_list) | pandas.DataFrame |
from datetime import datetime, time
from itertools import product
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
date_range,
period_range,
to_datetime,
)
import pandas.util.testing as tm
import pandas.tseries.offsets as offsets
@pytest.fixture(params=product([True, False], [True, False]))
def close_open_fixture(request):
return request.param
class TestDataFrameTimeSeriesMethods:
def test_pct_change(self, datetime_frame):
rs = datetime_frame.pct_change(fill_method=None)
tm.assert_frame_equal(rs, datetime_frame / datetime_frame.shift(1) - 1)
rs = datetime_frame.pct_change(2)
filled = datetime_frame.fillna(method="pad")
tm.assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = datetime_frame.pct_change(fill_method="bfill", limit=1)
filled = datetime_frame.fillna(method="bfill", limit=1)
tm.assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = datetime_frame.pct_change(freq="5D")
filled = datetime_frame.fillna(method="pad")
tm.assert_frame_equal(
rs, (filled / filled.shift(freq="5D") - 1).reindex_like(filled)
)
def test_pct_change_shift_over_nas(self):
s = Series([1.0, 1.5, np.nan, 2.5, 3.0])
df = DataFrame({"a": s, "b": s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])
edf = DataFrame({"a": expected, "b": expected})
tm.assert_frame_equal(chg, edf)
@pytest.mark.parametrize(
"freq, periods, fill_method, limit",
[
("5B", 5, None, None),
("3B", 3, None, None),
("3B", 3, "bfill", None),
("7B", 7, "pad", 1),
("7B", 7, "bfill", 3),
("14B", 14, None, None),
],
)
def test_pct_change_periods_freq(
self, datetime_frame, freq, periods, fill_method, limit
):
# GH 7292
rs_freq = datetime_frame.pct_change(
freq=freq, fill_method=fill_method, limit=limit
)
rs_periods = datetime_frame.pct_change(
periods, fill_method=fill_method, limit=limit
)
tm.assert_frame_equal(rs_freq, rs_periods)
empty_ts = DataFrame(index=datetime_frame.index, columns=datetime_frame.columns)
rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit)
rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit)
tm.assert_frame_equal(rs_freq, rs_periods)
def test_frame_ctor_datetime64_column(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
dates = np.asarray(rng)
df = DataFrame({"A": np.random.randn(len(rng)), "B": dates})
assert np.issubdtype(df["B"].dtype, np.dtype("M8[ns]"))
def test_frame_append_datetime64_column(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert np.issubdtype(df["A"].dtype, np.dtype("M8[ns]"))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="A-DEC")})
# it works!
repr(df)
def test_frame_append_datetime64_col_other_units(self):
n = 100
units = ["h", "m", "s", "ms", "D", "M", "Y"]
ns_dtype = np.dtype("M8[ns]")
for unit in units:
dtype = np.dtype("M8[{unit}]".format(unit=unit))
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype("O")).values
assert df[unit].dtype == ns_dtype
assert (df[unit].values == ex_vals).all()
# Test insertion into existing datetime64 column
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype("M8[{unit}]".format(unit=unit))
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp["dates"] = vals
ex_vals = to_datetime(vals.astype("O")).values
assert (tmp["dates"].values == ex_vals).all()
def test_asfreq(self, datetime_frame):
offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd())
rule_monthly = datetime_frame.asfreq("BM")
tm.assert_almost_equal(offset_monthly["A"], rule_monthly["A"])
filled = rule_monthly.asfreq("B", method="pad") # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq("B", method="pad") # noqa
# test does not blow up on length-0 DataFrame
zero_length = datetime_frame.reindex([])
result = zero_length.asfreq("BM")
assert result is not zero_length
def test_asfreq_datetimeindex(self):
df = DataFrame(
{"A": [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)],
)
df = df.asfreq("B")
assert isinstance(df.index, DatetimeIndex)
ts = df["A"].asfreq("B")
assert isinstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self):
# test for fill value during upsampling, related to issue 3715
# setup
rng = pd.date_range("1/1/2016", periods=10, freq="2S")
ts = pd.Series(np.arange(len(rng)), index=rng)
df = pd.DataFrame({"one": ts})
# insert pre-existing missing value
df.loc["2016-01-01 00:00:08", "one"] = None
actual_df = df.asfreq(freq="1S", fill_value=9.0)
expected_df = df.asfreq(freq="1S").fillna(9.0)
expected_df.loc["2016-01-01 00:00:08", "one"] = None
tm.assert_frame_equal(expected_df, actual_df)
expected_series = ts.asfreq(freq="1S").fillna(9.0)
actual_series = ts.asfreq(freq="1S", fill_value=9.0)
tm.assert_series_equal(expected_series, actual_series)
@pytest.mark.parametrize(
"data,idx,expected_first,expected_last",
[
({"A": [1, 2, 3]}, [1, 1, 2], 1, 2),
({"A": [1, 2, 3]}, [1, 2, 2], 1, 2),
({"A": [1, 2, 3, 4]}, ["d", "d", "d", "d"], "d", "d"),
({"A": [1, np.nan, 3]}, [1, 1, 2], 1, 2),
({"A": [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2),
({"A": [1, np.nan, 3]}, [1, 2, 2], 1, 2),
],
)
def test_first_last_valid(
self, float_frame, data, idx, expected_first, expected_last
):
N = len(float_frame.index)
mat = np.random.randn(N)
mat[:5] = np.nan
mat[-5:] = np.nan
frame = DataFrame({"foo": mat}, index=float_frame.index)
index = frame.first_valid_index()
assert index == frame.index[5]
index = frame.last_valid_index()
assert index == frame.index[-6]
# GH12800
empty = DataFrame()
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
# GH17400: no valid entries
frame[:] = np.nan
assert frame.last_valid_index() is None
assert frame.first_valid_index() is None
# GH20499: its preserves freq with holes
frame.index = date_range("20110101", periods=N, freq="B")
frame.iloc[1] = 1
frame.iloc[-2] = 1
assert frame.first_valid_index() == frame.index[1]
assert frame.last_valid_index() == frame.index[-2]
assert frame.first_valid_index().freq == frame.index.freq
assert frame.last_valid_index().freq == frame.index.freq
# GH 21441
df = DataFrame(data, index=idx)
assert expected_first == df.first_valid_index()
assert expected_last == df.last_valid_index()
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_first_valid_index_all_nan(self, klass):
# GH#9752 Series/DataFrame should both return None, not raise
obj = klass([np.nan])
assert obj.first_valid_index() is None
assert obj.iloc[:0].first_valid_index() is None
def test_first_subset(self):
ts = | tm.makeTimeDataFrame(freq="12h") | pandas.util.testing.makeTimeDataFrame |
# <NAME>
# python 3.6
""" Input:
------
It reads the individual driver's correlation nc files
Also uses regional masks of SREX regions to find dominant drivers regionally
Output:
-------
* Timeseries of the percent distribution of dominant drivers at different lags
"""
from scipy import stats
from scipy import ndimage
import glob
import sys
import netCDF4 as nc4
import numpy as np
import datetime as dt
from calendar import monthrange
import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
#importing my functions
from functions import time_dim_dates, index_and_dates_slicing, geo_idx, mpi_local_and_global_index, create_seq_mat, cumsum_lagged,patch_with_gaps_and_eventsize, norm
from timeit import default_timer as timer
from scipy.stats.stats import pearsonr
import pandas as pd
import argparse
import collections
import os
import xarray as xr
#1- Hack to fix missing PROJ4 env var for Basemaps Error
import os
"""
import conda
conda_file_dir = conda.__file__
conda_dir = conda_file_dir.split('lib')[0]
proj_lib = os.path.join(os.path.join(conda_dir, 'share'), 'proj')
os.environ["PROJ_LIB"] = proj_lib
#-1 Hack end
from mpl_toolkits.basemap import Basemap
from matplotlib import cm
import matplotlib.patches as patches
"""
parser = argparse.ArgumentParser()
#parser.add_argument('--driver_ano' , '-dri_a' , help = "Driver anomalies" , type= str , default= 'pr' ) #pr
parser.add_argument('--variable' , '-var' , help = "Anomalies of carbon cycle variable" , type= str , default= 'gpp' )
parser.add_argument('--source' , '-src' , help = "Model (Source_Run)" , type= str , default= 'CESM2' ) # Model Name
parser.add_argument('--member_idx' , '-m_idx' , help = "Member Index" , type= int , default= 0 ) # Index of the member
#parser.add_argument ('--cum_lag' ,'-lag' , help = 'cum lag months? (multiple lag optional) use , to add multiple' , type = str , default = '01,02,03' )
args = parser.parse_args()
# run plot_dominant_climate_driver_correlation_tce_regional_graphs.py -var gpp -src CESM2
print (args)
variable = args.variable
#drivers_string = args.driver_ano
source_run = args.source
member_idx = args.member_idx
# List of the drivers that will be considered and their names for Plotting
# -----------
if source_run == 'CESM2':
driver_consider = 4
drivers = np.array(['pr','mrso','tas','fFireAll']) [:driver_consider]
drivers_names = np.array(['Prcp','Soil Moisture', 'TAS','Fire']) [:driver_consider]
drivers_code = np.array([ 10, 20, 30, 40]) [:driver_consider]
else:
driver_consider = 3
drivers = np.array(['pr','mrso','tas']) [:driver_consider]
drivers_names = np.array(['Prcp','Soil Moisture', 'TAS']) [:driver_consider]
drivers_code = np.array([ 10, 20, 30]) [:driver_consider]
# Paths for reading the main files
# --------------------------------
cori_scratch = '/global/cscratch1/sd/bharat/'
members_list = os.listdir(cori_scratch+"add_cmip6_data/%s/ssp585/"%source_run)
member_run = members_list[member_idx]
# Storing the file name and abr of the drivers to be considered
# -------------------------------------------------------------
features = {}
features['abr'] = drivers
features['filenames'] = {}
# The name with which the variables are stored in the nc files:
features['Names'] = {}
features['Names']['pr'] = 'pr'
features['Names']['mrso'] = 'mrso'
features['Names']['tas'] = 'tas'
if source_run == 'CESM2':
features['Names']['fFireAll'] = 'Fire'
#features['Names']['tasmax'] = 'tasmax'
features['filenames'][variable] = {} # Creating a empty directory for storing multi members if needed
features['filenames'][variable][member_run] = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/CESM2_ssp585_%s_%s_anomalies_gC.nc"%(source_run,member_run, variable,member_run,variable)
# Reading the Correlations Data
# -----------------------------
exp= 'ssp585'
path_corr = cori_scratch + 'add_cmip6_data/%s/%s/%s/%s/Correlations/'%(source_run,exp,member_run,variable)
nc_corr = nc4.Dataset(path_corr + 'dominant_driver_correlation_%s.nc'%(variable))
# Reading the variables from the variable (gpp) anomalies file
# ------------------------------------------------------------
nc_var = nc4.Dataset(features['filenames'][variable][member_run])
time = nc_var .variables['time']
# Reading the variables from the correlation file
# -----------------------------------------------
ranks = nc_corr .variables['rank' ]
wins = nc_corr .variables['win' ]
lags = nc_corr .variables['lag' ]
dom_dri_ids = nc_corr .variables['dri_id' ]
dom_dri_cc = nc_corr .variables['dri_coeff']
# Grids:
# -------
lat = nc_var .variables ['lat']
lon = nc_var .variables ['lon']
lat_bounds = nc_var .variables [nc_var.variables['lat'].bounds ]
lon_bounds = nc_var .variables [nc_var.variables['lon'].bounds ]
lon_edges = np.hstack (( lon_bounds[:,0], lon_bounds[-1,-1]))
lat_edges = np.hstack (( lat_bounds[:,0], lat_bounds[-1,-1]))
# Creating mask of the regions based on the resolution of the model
import regionmask
srex_mask = regionmask.defined_regions.srex.mask(lon[...], lat[...]).values # it has nans
srex_mask_ma= np.ma.masked_invalid(srex_mask) # got rid of nans; values from 1 to 26
# important regional information:
srex_abr = regionmask.defined_regions.srex.abbrevs
srex_names = regionmask.defined_regions.srex.names
srex_nums = regionmask.defined_regions.srex.numbers
srex_centroids = regionmask.defined_regions.srex.centroids
srex_polygons = regionmask.defined_regions.srex.polygons
# Organizing time
# ---------------
window = 25 #years
win_len = 12 * window #number of months in window years
nwin = int(time.size/win_len) #number of windows
#wins = np.array([format(i,'02' ) for i in range(nwin)])
dates_ar = time_dim_dates ( base_date= dt.date(1850,1,1), total_timestamps=time.size)
start_dates = [dates_ar[i*win_len] for i in range(nwin)]#list of start dates of 25 year window
end_dates = [dates_ar[i*win_len+win_len -1] for i in range(nwin)]#list of end dates of the 25 year window
# String
# ------
wins_str = [format(int(i),'02') for i in wins[...]]
lags_str = [format(int(i),'02') for i in lags[...]]
ranks_str = [format(int(i),'02') for i in ranks[...]]
# Regional masks
# --------------
import regionmask
# To store all the DataFrames of counts of dominant climate drivers in a dictionnary for every region
DataFrames_counts = {}
#format>>> DataFrames [regions] [wins] [lags] [ranks]
# range: regions: 26
# wins : 10
# lags : 1
# ranks: 1
for region_abr in srex_abr:
DataFrames_counts[region_abr] = {}
for w in wins_str:
DataFrames_counts[region_abr][w] = {}
for l in lags_str[1:3]:
DataFrames_counts[region_abr][w][l] = {}
save_path = "/global/cscratch1/sd/bharat/add_cmip6_data/%s/ssp585/%s/%s/Correlations/Regional/DataFrames/"%(
source_run, member_run, variable)
if os.path.isdir(save_path) == False:
os.makedirs(save_path)
# Storing the dataframes for regions,win,lag, rk
# ----------------------------------------------
dict_counts = {}
for region_abr in srex_abr:
dict_counts[region_abr] = {}
for win in np.asarray(wins[...], dtype =int):
dict_counts[region_abr][win] = {}
for lg in np.asarray(lags[...] [1:],dtype = int):
dict_counts[region_abr][win][lg] = {}
for rk in np.asarray(ranks[...][0:1], dtype = int):
dict_counts[region_abr][win][lg][rk] = {}
# Computing the DataFrames
# ------------------------
for region_abr in srex_abr: #testing for AMZ only
srex_idxs = np.arange(len(srex_names))
filter_region = np.array(srex_abr) == region_abr
region_idx = srex_idxs[filter_region][0]
region_number = np.array(srex_nums)[filter_region][0]
region_name = np.array(srex_names)[filter_region][0]
region_abr = np.array(srex_abr)[filter_region][0]
region_mask_not = np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region
region_mask = ~region_mask_not # Only the regions is masked
for win in np.asarray(wins[...], dtype =int):
for lg in np.asarray(lags[...] [1:3],dtype = int): # interested in lag = 1 month i.e. index = 1
for rk in np.asarray(ranks[...][0:1], dtype = int): # interested in the dominant driver only
counts = np.unique( np.ma.masked_equal( np.ma.masked_invalid(
dom_dri_ids[rk,win,lg,:,:][region_mask]),0),
return_counts=True)
# there are np.nans and 0's in the array that have to be masked
counts_drivers = np.array([counts[1][i] for i in range(counts[1].size)])
#since many drivers were not dominant for most part so only limiting the plot to the relevant ones
print ("counts for dom rank %s and lag %s...:"%(format(rk,'002'), format(lg,'002')))
tmp_drivers_code = np.copy(drivers_code)
for d in counts[0].data:
tmp_drivers_code = np.ma.masked_equal (tmp_drivers_code, d)
df_counts = pd.DataFrame({'Counts':counts_drivers[:-1]}) #the last value corresponds to the masks
df_counts.index = drivers [tmp_drivers_code.mask]
perc = [round(i*100./sum(df_counts['Counts'].values),2) for i in df_counts['Counts'].values]
df_counts['percentage']=perc
#Calculating the mean and std of the climate drivers
mean_cc = []
std_cc = []
for code_id in drivers_code[tmp_drivers_code.mask]:
#print "code_ID...", code_id
mean_cc.append(np.ma.mean(dom_dri_cc[rk,win,lg,:,:][~np.ma.masked_not_equal(dom_dri_ids[rk,win,lg,:,:],code_id).mask]))
std_cc.append(np.ma.std(dom_dri_cc[rk,win,lg,:,:][~np.ma.masked_not_equal(dom_dri_ids[rk,win,lg,:,:],code_id).mask]))
df_counts['mean_coeff'] = mean_cc
df_counts['std_coeff'] = std_cc
# Saving the Data Frame in a dic:
DataFrames_counts[ region_abr] [wins_str[win]] [lags_str[lg]] [ranks_str[rk]] = df_counts #since the numbers are indexs are same
print ('dataframe_win_%s_lag_%s_and_rank_%s.csv'%(format(win,'02'),format(lg,'02'),format(rk,'02')))
df_counts .to_csv(save_path + 'df_reg_%s_win_%s_lag_%s_and_rank_%s.csv'%(region_abr, format(win,'02'),format(lg,'02'),format(rk,'02')),sep=',')
# Regional dominant driver to dictionary
# --------------
df_counts_t = df_counts[df_counts.loc[:,'percentage'] == df_counts.loc[:,'percentage'].max()]
if df_counts_t.size > 0:
dict_counts[region_abr][win][lg][rk] ['Dri_Name'] = df_counts_t.index[0]
dict_counts[region_abr][win][lg][rk] ['Corr_Coeff'] = df_counts_t['mean_coeff'][0]
dict_counts[region_abr][win][lg][rk] ['Dri_Code'] = drivers_code[drivers == df_counts_t.index[0]][0]
elif df_counts_t.size == 0:
dict_counts[region_abr][win][lg][rk] ['Dri_Name'] = np.nan
dict_counts[region_abr][win][lg][rk] ['Corr_Coeff'] = np.nan
dict_counts[region_abr][win][lg][rk] ['Dri_Code'] = np.nan
#df_counts .to_csv(path_corr + 'dataframes/dataframe_win_%s_lag_%s_and_rank_%s_np2.csv'%(format(win,'02'),format(lg,'02'),format(rk,'02')),sep=',') # [Changed] No pvalue filter
#print(breakit)
"""
# =============================================================
# based on " ecp_triggers_percent_distribution_dom_drivers.py "
# =============================================================
# Plotting the timeseries of dominant drivers
# -------------------------------------------
in_yr = 1850
win_yr = [str(in_yr+i*25) + '-'+str(in_yr +(i+1)*25-1)[2:] for i in range(wins.size)]
plot_lags = ['01','02','03']
data_percent = np.zeros((len(win_yr), len(drivers_names)))
print ("data_percent shape: ", data_percent.shape)
data_lag = {}
for LAG in plot_lags :
data_percent = np.zeros((len(win_yr), len(drivers_names)))
print ("data_percent shape: ", data_percent.shape)
print ("data shape", np.transpose(DataFrames_counts[w] [LAG] [ranks_str[rk]]['percentage']).shape)
df = pd.DataFrame( data_percent , index = win_yr, columns = drivers_names) #main dataframe
for w in wins_str:
data = DataFrames_counts [w] [LAG] [ranks_str[rk]]
drivers = data.iloc[:,0]
data_df = pd.DataFrame( DataFrames_counts[w] [LAG] [ranks_str[rk]]['percentage'].values.reshape(1,len(drivers)),index = [win_yr[int(w)]], columns = drivers) # dataframe for a particuar window
for idx,dr in enumerate (drivers):
df.loc[data_df.index,drivers_names[idx]] = data_df[dr]
data_lag [LAG] = df
# Plotting Subplots
# -----------------
if source_run == 'CESM2':
color_list = ['b','b','g','r']
linestyle_list = ['--','-','-','-']
else:
color_list = ['b','g','r']
linestyle_list = ['-','-','-']
fig,ax = plt.subplots(nrows=3,ncols= 1,gridspec_kw = {'wspace':0, 'hspace':0.02},tight_layout = True, figsize = (7,8.5), dpi = 400)
ax = ax.ravel()
for lag_idx,LAG in enumerate(plot_lags):
for dri_idx in range(len(drivers_names)):
ax[lag_idx].plot( range(wins.size), data_lag[LAG].iloc[:,dri_idx], label = drivers_names[dri_idx], color=color_list[dri_idx],linestyle=linestyle_list[dri_idx], linewidth = 1)
ax[lag_idx].set_xticks(range(wins.size))
ax[lag_idx].tick_params(axis="x",direction="in")
ax[lag_idx].set_xticklabels([])
ax[lag_idx].set_ylabel("Lag: %s"%(LAG))
ax[lag_idx].set_ylim([0,50])
ax[lag_idx].grid(which='major', linestyle=':', linewidth='0.3', color='gray')
ax[lag_idx].set_xticklabels(df.index,fontsize =9)
for tick in ax[lag_idx].get_xticklabels():
tick.set_rotation(90)
ax[lag_idx].set_xlabel('Time ($25-yr)$ wins',fontsize =14)
fig.text(0.03, 0.5, 'Percent Distribution of Climate Drivers', va='center', ha='center', rotation='vertical', fontsize=14)
ax[0].legend(loc = 'upper center',ncol=len(drivers_names), bbox_to_anchor=(.44,1.15),frameon =False,fontsize=9,handletextpad=0.1)
plt.gcf().subplots_adjust(bottom=0.1)
fig.savefig(path_corr + 'per_dom/percent_dominance_multilag_123_rank_%s.pdf'%(ranks_str[rk]))
#fig.savefig(path_corr + 'per_dom/percent_dominance_multilag_123_rank_%s_np2.pdf'%(ranks_str[rk])) # [changed] no p-value filter
"""
# Plotting Subplots
# -----------------
if source_run == 'CESM2':
color_list = ['b','b','g','r']
linestyle_list = ['--','-','-','-']
else:
color_list = ['b','g','r']
linestyle_list = ['-','-','-']
web_path = '/global/homes/b/bharat/results/web/Regional/Attribution/'
path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/"%(source_run,member_run, variable)
# x - axis
in_yr = 1850
win_yr = [str(in_yr+i*25) + '-'+str(in_yr +(i+1)*25-1)[2:] for i in range(wins.size)]
# Initializing the dataframe
data_percent = np.zeros((len(win_yr), len(drivers_names)))
# Choose the lag
# -------------
lg =1
#Creating an empty dict for storing the dataframes:
# ------------------------------------------------
dict_dataframe = {}
for r_idx, region_abr in enumerate(srex_abr):
df = pd.DataFrame( data_percent , index = win_yr, columns = drivers) #main dataframe
for w_idx, w in enumerate (wins_str):
data = DataFrames_counts[region_abr][w][lags_str[lg]] [ranks_str[rk]]
drivers_tmp = data.iloc[:,0]
for col in df.columns :
try:
df .loc[win_yr[w_idx],col] = data.loc[col,'percentage']
except:
df .loc[win_yr[w_idx],col] = 0
dict_dataframe[region_abr] = df.copy(deep = True)
del df
# Plotting the dominant driver distribution for all the regions:
# --------------------------------------------------------------
import pylab as plot
params = {'legend.fontsize': 6,
'legend.handlelength': 1,
'legend.frameon': 'False',
'axes.labelsize':'small',
'ytick.labelsize': 'small',
'font.size':5 }
plot.rcParams.update(params)
fig, axs = plt.subplots(nrows=9, ncols=3, sharex='col',
gridspec_kw={'hspace': .4, 'wspace': .4}, figsize=(6,9))
plt.suptitle ("TS of dominant drivers during TCEs (lag:%d)"%lg, fontsize = 14)
txt ="The left y-axis represents the percent count of drivers in that region"
axs = axs.ravel()
for k_idx, key in enumerate(dict_dataframe.keys()):
df = dict_dataframe[key]
for dri_idx in range(len(drivers)):
axs[k_idx].plot( range(wins.size), df.iloc[:,dri_idx], label = drivers_names[dri_idx], color=color_list[dri_idx],linestyle=linestyle_list[dri_idx], linewidth = 0.8)
#axs[k_idx].set_xticks(range(wins.size))
axs[k_idx].set_ylabel("%s"%key)
axs[k_idx].grid(which='major', linestyle=':', linewidth='0.3', color='gray')
for tick in axs[-3].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-2].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-1].get_xticklabels():
tick.set_rotation(45)
#axs[1].legend(loc = 'upper center',ncol=len(drivers_names), bbox_to_anchor=(.5,1.15),frameon =False,fontsize=9,handletextpad=0.1)
plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=12) #Caption
fig.savefig(web_path + 'percent_dom_%s_%s_lag_%s_regions_%s.pdf'%(source_run, member_run, format(lg,'02'),variable.upper()) )
# Common Information for spatial plots
# ====================================
sub_fig_text = ['(a)', '(b)', '(c)',
'(d)', '(e)', '(f)']
Wins_to_Plot = ['1850-74', '1900-24', '1950-74', '2000-24', '2050-74', '2075-99']
Wins_to_Plot_idxs = [0,2,4,6,8,9]
import cartopy.crs as ccrs
from matplotlib.axes import Axes
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from mpl_toolkits.axes_grid1 import AxesGrid
proj_trans = ccrs.PlateCarree()
proj_output = ccrs.PlateCarree()
# Plotting individual drivers
# ===========================
# Spatial plot of individual driver correlatons
# for idx, dri in enumerate (drivers_names):
sub_fig_text = ['(a)', '(b)', '(c)',
'(d)', '(e)', '(f)']
Wins_to_Plot = ['1850-74', '1900-24', '1950-74', '2000-24', '2050-74', '2075-99']
Wins_to_Plot_idxs = [0,2,4,6,8,9]
ymax = 1
ymin = -1
import cartopy.crs as ccrs
from matplotlib.axes import Axes
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from mpl_toolkits.axes_grid1 import AxesGrid
proj_trans = ccrs.PlateCarree()
proj_output = ccrs.PlateCarree()
for dri_idx, dri in enumerate (drivers_names):
fig = plt.figure(figsize = (12,9), dpi = 200)
#pwin = Wins_to_Plot_idxs[0]
plag = 1
ax = {}
gl = {}
for plot_idx, win_idx in enumerate(Wins_to_Plot_idxs):
#plot_idx = 0 #
gl[plot_idx] = 0
if plot_idx == 0:
ax[plot_idx] = fig.add_subplot(
2, 3, plot_idx+1, projection= proj_output
)
# Mean Correlation Coefficient of the Selected climate Drivers at any rank
plot_data = np.ma.mean(np.ma.masked_array(data=dom_dri_cc[:,win_idx,plag,:,:],
mask = np.ma.masked_not_equal(dom_dri_ids[:,win_idx,plag,:,:],
drivers_code[dri_idx]) .mask),axis = 0)
h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data,
transform=ccrs.PlateCarree(), vmax=ymax, vmin=ymin, cmap='PuOr')
for srex_idx,abr in enumerate (srex_abr):
ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
elif plot_idx>0:
ax[plot_idx] = fig.add_subplot(
2, 3, plot_idx+1, projection= proj_output,
sharex=ax[0], sharey=ax[0]
)
# Mean Correlation Coefficient of the Selected climate Drivers at any rank
plot_data = np.ma.mean(np.ma.masked_array(data=dom_dri_cc[:,win_idx,plag,:,:],
mask = np.ma.masked_not_equal(dom_dri_ids[:,win_idx,plag,:,:],
drivers_code[dri_idx]) .mask),axis = 0)
h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data,
transform=ccrs.PlateCarree(), vmax=ymax, vmin=ymin, cmap='PuOr')
for srex_idx,abr in enumerate (srex_abr):
ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
for plot_idx in range(len(Wins_to_Plot)):
ax[plot_idx].coastlines(alpha=0.75)
ax[plot_idx].text(-85, -10, sub_fig_text[plot_idx] + ' '+ Wins_to_Plot[plot_idx],
horizontalalignment="right",
verticalalignment='center',
fontsize = 9)
gl[plot_idx] = ax[plot_idx].gridlines(crs=ccrs.PlateCarree(), draw_labels=False,
linewidth=.5, color='gray', alpha=0.5, linestyle='--')
gl[3].xlabels_bottom = True
gl[4].xlabels_bottom = True
gl[5].xlabels_bottom = True
gl[3].xformatter = LONGITUDE_FORMATTER
gl[4].xformatter = LONGITUDE_FORMATTER
gl[5].xformatter = LONGITUDE_FORMATTER
gl[0].ylabels_left = True
gl[3].ylabels_left = True
gl[0].yformatter = LATITUDE_FORMATTER
gl[3].yformatter = LATITUDE_FORMATTER
plt.subplots_adjust(wspace=0.02,hspace=-.695)
cax = plt.axes([0.92, 0.335, 0.015, 0.34])
plt.colorbar( h, cax=cax, orientation='vertical', pad=0.04, shrink=0.95);
ax[1].set_title("Correlation Coefficient of %s with %s extremes"%(dri,variable.upper()), fontsize=14)
fig.savefig(web_path + "Spatial_Corr_%s_%s_lag_%d.pdf"%(variable,dri,plag),
bbox_inches = "tight", edgecolor="w")
fig.savefig(web_path + "Spatial_Corr_%s_%s_lag_%d.png"%(variable,dri,plag),
bbox_inches = "tight", edgecolor="w")
fig.savefig(path_save + "Correlations/Spatial_Maps/Spatial_Corr_%s_%s_lag_%d.pdf"%(variable,dri,plag),
bbox_inches = "tight", edgecolor="w")
del fig
# Dominant Driver spatial plot at lag =1 month
# ===========================================
# Spatial plot of Dominant driver correlatons
# for idx, dri in enumerate (drivers_names):
ymax = 45
ymin = 5
rk = 0 #Dominant driver
plag = 1 # lag =1 month
fig = plt.figure(figsize = (12,9), dpi = 200)
ax = {}
gl = {}
for plot_idx, win_idx in enumerate(Wins_to_Plot_idxs):
#plot_idx = 0 #
gl[plot_idx] = 0
if plot_idx == 0:
ax[plot_idx] = fig.add_subplot(
2, 3, plot_idx+1, projection= proj_output
)
plot_data = np.ma.masked_equal(np.ma.masked_invalid(dom_dri_ids[rk,win_idx,plag,:,:]),0)
cmap = plt.get_cmap('rainbow', drivers_code.size)
h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data,
transform=ccrs.PlateCarree(), vmax=ymax, vmin=ymin, cmap=cmap)
for srex_idx,abr in enumerate (srex_abr):
ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
elif plot_idx>0:
ax[plot_idx] = fig.add_subplot(
2, 3, plot_idx+1, projection= proj_output,
sharex=ax[0], sharey=ax[0]
)
plot_data = np.ma.masked_equal(np.ma.masked_invalid(dom_dri_ids[rk,win_idx,plag,:,:]),0)
h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data,
transform=ccrs.PlateCarree(), vmax=ymax, vmin=ymin, cmap= cmap)
for srex_idx,abr in enumerate (srex_abr):
ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
for plot_idx in range(len(Wins_to_Plot)):
ax[plot_idx].coastlines(alpha=0.75)
ax[plot_idx].text(-85, -10, sub_fig_text[plot_idx] + ' '+ Wins_to_Plot[plot_idx],
horizontalalignment="right",
verticalalignment='center',
fontsize = 9)
gl[plot_idx] = ax[plot_idx].gridlines(crs=ccrs.PlateCarree(), draw_labels=False,
linewidth=.5, color='gray', alpha=0.5, linestyle='--')
gl[3].xlabels_bottom = True
gl[4].xlabels_bottom = True
gl[5].xlabels_bottom = True
gl[3].xformatter = LONGITUDE_FORMATTER
gl[4].xformatter = LONGITUDE_FORMATTER
gl[5].xformatter = LONGITUDE_FORMATTER
gl[0].ylabels_left = True
gl[3].ylabels_left = True
gl[0].yformatter = LATITUDE_FORMATTER
gl[3].yformatter = LATITUDE_FORMATTER
plt.subplots_adjust(wspace=0.02,hspace=-.695)
cax = plt.axes([0.92, 0.335, 0.015, 0.34])
cbar = plt.colorbar(h, cax=cax, ticks = range(drivers_code[0],drivers_code[-1]+1,10))
cbar .ax.set_yticklabels(drivers_names)
#plt.colorbar( h, cax=cax, orientation='vertical', pad=0.04, shrink=0.95);
ax[1].set_title("Dominant Drivers of %s extremes"%(variable.upper()), fontsize=14)
fig.savefig(web_path + "Spatial_Dominant_Driver_%s_lag_%d.pdf"%(variable,plag),
bbox_inches = "tight", edgecolor="w")
fig.savefig(web_path + "Spatial_Dominant_Driver_%s_lag_%d.png"%(variable,plag),
bbox_inches = "tight", edgecolor="w")
fig.savefig(path_save + "Correlations/Spatial_Maps/Dominant_Driver_%s_lag_%d.pdf"%(variable,plag),
bbox_inches = "tight", edgecolor="w")
del fig
# Plotting of "Regional Dominance"
# =====================================
#dict_counts[region_abr][win][lg][rk] ['Dri_Name']
#dict_counts[region_abr][win][lg][rk] ['Corr_Coeff']
rk=0
lg=1
plag=1
values_range = []
sign = {}
for r in srex_abr:
sign[r] = {}
for win_idx, wi in enumerate(Wins_to_Plot):
values_range.append(dict_counts[r][Wins_to_Plot_idxs[win_idx]][lg][rk] ['Corr_Coeff'])
#print(win_idx,dict_counts[r][Wins_to_Plot_idxs[win_idx]][lg][rk] ['Corr_Coeff'] )
if dict_counts[r][Wins_to_Plot_idxs[win_idx]][lg][rk] ['Corr_Coeff'] > 0:
sign[r][wi] = '+'
elif dict_counts[r][Wins_to_Plot_idxs[win_idx]][lg][rk] ['Corr_Coeff'] < 0:
sign[r][wi] = u"\u2212"
else:
sign[r][wi] = ' '
print ("To check for the range of values")
print (np.array(values_range).min())
print (np.array(values_range).max())
ymax = 45
ymin = 5
# Creating the NBP Values for 1850-74 for all regions for NBP du Ext
ploting_stats = {}
for win_idx, wi in enumerate(Wins_to_Plot):
ploting_stats[wi] = {}
all_masked = np.ma.masked_equal(np.ma.zeros(srex_mask_ma.shape),0)
for s_idx in srex_idxs:
tmp = np.ma.masked_equal(srex_mask_ma,s_idx+ 1).mask # +1 because srex_idxs start from 1
all_masked[tmp] = dict_counts[srex_abr[s_idx]][Wins_to_Plot_idxs[win_idx]][lg][rk] ['Dri_Code']
del tmp
all_masked = np.ma.masked_array(all_masked, mask = srex_mask_ma.mask)
ploting_stats[wi] ['Dri_Codes'] = np.ma.masked_equal(np.ma.masked_invalid(all_masked),0)
# test plot
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from mpl_toolkits.axes_grid1 import AxesGrid
proj_trans = ccrs.PlateCarree()
#proj_output = ccrs.Robinson(central_longitude=0)
proj_output = ccrs.PlateCarree()
fig = plt.figure(figsize = (12,9), dpi = 400)
plt.style.use("classic")
ax = {}
gl = {}
for plot_idx in range(len(Wins_to_Plot)):
gl[plot_idx] = 0
if plot_idx == 0 :
ax[plot_idx] = fig.add_subplot(
2, 3, plot_idx+1, projection= proj_output
)
plot_data = np.ma.masked_equal(np.ma.masked_invalid(ploting_stats[Wins_to_Plot[plot_idx]]['Dri_Codes']),0)
cmap = plt.get_cmap('rainbow', drivers_code.size)
h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data,
transform=ccrs.PlateCarree(),vmax=ymax, vmin=ymin,cmap= cmap)
for srex_idx,abr in enumerate (srex_abr):
ax[plot_idx].text ( srex_centroids[srex_idx][0], srex_centroids[srex_idx][-1], sign[abr][Wins_to_Plot[plot_idx]],
horizontalalignment='center',
color = 'white', fontweight = 'bold',fontsize=10,
transform = proj_trans)
ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
elif plot_idx>0:
ax[plot_idx] = fig.add_subplot(
2, 3, plot_idx+1, projection= proj_output,
sharex=ax[0], sharey=ax[0]
)
plot_data = np.ma.masked_equal(np.ma.masked_invalid(ploting_stats[Wins_to_Plot[plot_idx]]['Dri_Codes']),0)
h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data,
transform=ccrs.PlateCarree(),vmax=ymax,vmin=ymin,cmap= cmap)
for srex_idx,abr in enumerate (srex_abr):
ax[plot_idx].text ( srex_centroids[srex_idx][0], srex_centroids[srex_idx][-1],
sign[abr][Wins_to_Plot[plot_idx]],
horizontalalignment='center',
color = 'white', fontweight = 'bold',fontsize=10,
transform = proj_trans)
ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
for plot_idx in range(len(Wins_to_Plot)):
ax[plot_idx].coastlines(alpha=0.75)
ax[plot_idx].text(80, -60, sub_fig_text[plot_idx] + ' '+ Wins_to_Plot[plot_idx],
horizontalalignment="right",
verticalalignment='center',
fontsize = 12)
gl[plot_idx] = ax[plot_idx].gridlines(crs=ccrs.PlateCarree(), draw_labels=False,
linewidth=.5, color='gray', alpha=0.5, linestyle='--')
gl[3].xlabels_bottom = True
gl[4].xlabels_bottom = True
gl[5].xlabels_bottom = True
gl[3].xformatter = LONGITUDE_FORMATTER
gl[4].xformatter = LONGITUDE_FORMATTER
gl[5].xformatter = LONGITUDE_FORMATTER
gl[0].ylabels_left = True
gl[3].ylabels_left = True
gl[0].yformatter = LATITUDE_FORMATTER
gl[3].yformatter = LATITUDE_FORMATTER
plt.subplots_adjust(wspace=0.02,hspace=-.695)
cax = plt.axes([0.92, 0.335, 0.015, 0.34])
cbar = plt.colorbar(h, cax=cax, ticks = range(drivers_code[0],drivers_code[-1]+1,10))
drivers_names_plotting = np.array(['Prcp', 'SM','TAS','Fire'])
cbar .ax.set_yticklabels(drivers_names_plotting)
# cbar .ax.set_yticklabels(drivers_names)
#plt.colorbar(h, orientation='horizontal', pad=0.04);
ax[1].set_title("Regional Distribution of Dominant Drivers of %s extremes \n"%(variable.upper()), fontsize=14)
fig.savefig(web_path + "Spatial_Regional_Dominant_Driver_%s_lag_%d.pdf"%(variable,plag),
edgecolor = "w", bbox_inches = "tight")
fig.savefig(web_path + "Spatial_Regional_Dominant_Driver_%s_lag_%d.png"%(variable,plag),
bbox_inches = "tight")
fig.savefig(path_save + "Correlations/Spatial_Maps/Dominant_Regional_Driver_%s_lag_%d.pdf"%(variable,plag),
edgecolor = "w", bbox_inches = "tight")
# Calculation of the count of pixels of different regions...
# ...with positive and negative correlation coefficients!
# ========================================================
# For MRSO
# --------
dri_idx = 1 #for MRSO
plag = 1
# Dict to store the counts of pos/neg extremes
# --------------------------------------------
dict_mrso_cc_count = {}
for region_abr in srex_abr:
dict_mrso_cc_count[region_abr] = {}
for win_idx, win_str in enumerate(win_yr):
dict_mrso_cc_count[region_abr][win_str] = {}
del region_abr,win_idx, win_str
# Calculation of counts:
for region_abr in srex_abr:
for win_idx, win_str in enumerate(win_yr):
driver_cc_win_tmp = np.ma.masked_array(data=dom_dri_cc[:,win_idx,plag,:,:],
mask = np.ma.masked_not_equal(dom_dri_ids[:,win_idx,plag,:,:],
drivers_code[dri_idx]) .mask)
filter_region = np.array(srex_abr) == region_abr
region_idx = srex_idxs[filter_region][0]
region_number = np.array(srex_nums)[filter_region][0]
region_name = np.array(srex_names)[filter_region][0]
region_abr = np.array(srex_abr)[filter_region][0]
region_mask_not = np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region
region_mask = ~region_mask_not # Only the regions is masked
cc_values_tmp = driver_cc_win_tmp[np.array([region_mask]*4)][driver_cc_win_tmp[np.array([region_mask]*4)].mask ==False]
dict_mrso_cc_count[region_abr][win_str]['pos'] = (cc_values_tmp > 0).sum()
dict_mrso_cc_count[region_abr][win_str]['neg'] = (cc_values_tmp < 0).sum()
del region_abr,win_idx, win_str,cc_values_tmp,region_mask
# For TAS
# --------
dri_idx = 2 #for TAS
plag = 1
# Dict to store the counts of pos/neg extremes
# --------------------------------------------
dict_tas_cc_count = {}
for region_abr in srex_abr:
dict_tas_cc_count[region_abr] = {}
for win_idx, win_str in enumerate(win_yr):
dict_tas_cc_count[region_abr][win_str] = {}
del region_abr,win_idx, win_str
# Calculation of counts:
for region_abr in srex_abr:
for win_idx, win_str in enumerate(win_yr):
driver_cc_win_tmp = np.ma.masked_array(data=dom_dri_cc[:,win_idx,plag,:,:],
mask = np.ma.masked_not_equal(dom_dri_ids[:,win_idx,plag,:,:],
drivers_code[dri_idx]) .mask)
filter_region = np.array(srex_abr) == region_abr
region_idx = srex_idxs[filter_region][0]
region_number = np.array(srex_nums)[filter_region][0]
region_name = np.array(srex_names)[filter_region][0]
region_abr = np.array(srex_abr)[filter_region][0]
region_mask_not = np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region
region_mask = ~region_mask_not # Only the regions is masked
cc_values_tmp = driver_cc_win_tmp[np.array([region_mask]*4)][driver_cc_win_tmp[np.array([region_mask]*4)].mask ==False]
dict_tas_cc_count[region_abr][win_str]['pos'] = (cc_values_tmp > 0).sum()
dict_tas_cc_count[region_abr][win_str]['neg'] = (cc_values_tmp < 0).sum()
del region_abr,win_idx, win_str,cc_values_tmp,region_mask
# Analysis and presentation of data on correlation coefficient:
# -------------------------------------------------------------
# MRSO
df_mrso_cc = {}
for region_abr in srex_abr:
df_mrso_cc[region_abr] = pd.DataFrame.from_dict(dict_mrso_cc_count[region_abr], orient='index')
df_mrso_cc[region_abr].loc[:,"%pos"] = (df_mrso_cc[region_abr].loc[:,"pos"]*100/(
df_mrso_cc[region_abr].loc[:,"pos"] +
df_mrso_cc[region_abr].loc[:,"neg"])
).round(decimals=1)
df_mrso_cc[region_abr].loc[:,"%neg"] = (df_mrso_cc[region_abr].loc[:,"neg"]*100/(
df_mrso_cc[region_abr].loc[:,"pos"] +
df_mrso_cc[region_abr].loc[:,"neg"])
).round(decimals=1)
del region_abr
#TAS
df_tas_cc = {}
for region_abr in srex_abr:
df_tas_cc[region_abr] = pd.DataFrame.from_dict(dict_tas_cc_count[region_abr], orient='index')
df_tas_cc[region_abr].loc[:,"%pos"] = (df_tas_cc[region_abr].loc[:,"pos"]*100/(
df_tas_cc[region_abr].loc[:,"pos"] +
df_tas_cc[region_abr].loc[:,"neg"])
).round(decimals=1)
df_tas_cc[region_abr].loc[:,"%neg"] = (df_tas_cc[region_abr].loc[:,"neg"]*100/(
df_tas_cc[region_abr].loc[:,"pos"] +
df_tas_cc[region_abr].loc[:,"neg"])
).round(decimals=1)
del region_abr
# Ploting in Jupyter Notebook
# ---------------------------
# Percent count of pixels that are positively...
# ...or negatively correlated with MRSO
region_abr = srex_abr[2]
import pylab as plot
params = {'legend.fontsize': 20,
'legend.handlelength': 2}
plot.rcParams.update(params)
df_mrso_cc[region_abr].iloc[2:,2:].plot.bar(stacked =False,
figsize=(9,4),
fontsize = 14,
grid='--')
plt.legend(loc='upper right', bbox_to_anchor=(1.25,.6), fontsize=14, ncol=1)
plt.ylim([0,100])
plt.title(f"Percent count of the pixel with pos/neg correlation with TAS for {region_abr}",
loc='left',fontsize =15)
#plt.text(0,18,"Total Regions: 26", fontsize=14, fontweight='bold', color='brown')
# The number 10 or y axis represents the number of pixels
for w_idx,w_str in enumerate(win_yr[2:]):
plt.text(w_idx,10,f"{int(np.round(df_mrso_cc[region_abr].loc[w_str,'pos']))}",
ha='right', va='top',color='white',rotation=90,fontsize=10,weight='bold')
plt.text(w_idx,10,f"{int(np.round(df_mrso_cc[region_abr].loc[w_str,'neg']))}",
ha='left', va='top',color='white',rotation=90,fontsize=10,weight='bold')
# Percent count of pixels that are positively...
# ...or negatively correlated with TAS
# The Srex_index for NAS is 17
region_abr = srex_abr[17]
#fig1 = plt.figure(figsize = (9,5), dpi = 400)
import pylab as plot
params = {'legend.fontsize': 20,
'legend.handlelength': 2}
plot.rcParams.update(params)
plt.style.use("classic")
df_tas_cc[region_abr].iloc[2:,2:].plot.bar(stacked =False,
figsize=(9,4),
fontsize = 14,
color = ['royalblue','darkorange'])
plt.legend(['Positive Correlation', 'Negative Correlation'],
loc='upper right', fontsize=12, ncol=1)
plt.ylim([0,100])
plt.title(f"Correlation of {variable.upper()} Extremes with TAS for {region_abr}",
fontsize =16)
#plt.text(0,18,"Total Regions: 26", fontsize=14, fontweight='bold', color='brown')
plt.ylabel ("Percent Count of Grid-cells", fontsize=14)
plt.xlabel ("Time", fontsize=14)
plt.yticks (fontsize=12)
plt.xticks (fontsize=12, rotation=60)
plt.grid (which='both', ls='--', lw='.5', alpha=.4 )
# The number 10 or y axis represents the number of pixels
for w_idx,w_str in enumerate(win_yr[2:]):
plt.text(w_idx+.04,10,f"{int(np.round(df_tas_cc[region_abr].loc[w_str,'pos']))}",
ha='right', va='top',color='white',rotation=90,fontsize=12)
plt.text(w_idx+.04,10,f"{int(np.round(df_tas_cc[region_abr].loc[w_str,'neg']))}",
ha='left', va='top',color='white',rotation=90,fontsize=12)
plt.savefig(web_path + f"Change_in_Corr_of_{variable}_for_{region_abr}_lag_{plag}.pdf",
edgecolor = "w", bbox_inches = "tight")
plt.savefig(web_path + f"Change_in_Corr_of_{variable}_for_{region_abr}_lag_{plag}.png",
edgecolor = "w", bbox_inches = "tight")
plt.savefig(path_save + f"Change_in_Corr_of_{variable}_for_{region_abr}_lag_{plag}.pdf",
edgecolor = "w", bbox_inches = "tight")
# Finding the locations of the extremes TCEs and correlations with TAS in NAS
save_txt_tas_nas = 'n'
if save_txt_tas_nas in ['y','Y','yes']:
import sys
stdout_fileno = sys.stdout
# Redirect sys.stdout to the file
cli_var = 'tas'
path_tas_nas = f"{cori_scratch}add_cmip6_data/{source_run}/ssp585/{member_run}/{cli_var}/Correlations/Region_NAS/"
if os.path.isdir(path_tas_nas) == False:
os.makedirs(path_tas_nas)
sys.stdout = open(path_tas_nas+'loc_nbp_tas_nas.txt', 'w')
sys.stdout.write (f"win_idx,lt_idx,ln_idx,lag1,lag2,lag3,lag4],Dom-T/F,Dom-T/F,Dom-T/F,Dom-T/F" + '\n')
# Dom-T/F: if True indicates the Dominant Drivers
# TAS is often dominant at lag 2
locs_tas_nas = {}
list_all_wins_tas_nas = []
for win_idx in range(len(win_yr)):
# Correlation coefficients for lag = 1 for 'NAS'
CC_TAS_all_ranks = np.ma.masked_array(data=dom_dri_cc[:,win_idx,plag,:,:],
mask = np.ma.masked_not_equal(dom_dri_ids[:,win_idx,plag,:,:],
drivers_code[dri_idx]) .mask)
# figure out how to read only the non masked lat_lon for NAS
tas_mask_true = (np.max(abs(CC_TAS_all_ranks),0).mask )
lt_ln_mat = create_seq_mat(nlat=lat.size, nlon=lon.size)
# list of all location_ids in the global with a valid cc of TAS :
tas_global_1d_locs = lt_ln_mat[~tas_mask_true]
# list of all location_ids in the SREX region:
region_1d_locs = lt_ln_mat[region_mask]
# list of location_ids in a region with a valid tas cc
tas_region_1d_locs = np.intersect1d ( tas_global_1d_locs,region_1d_locs )
list_locs_tmp = []
for pixel in tas_region_1d_locs:
lt,ln = np.argwhere(lt_ln_mat == pixel)[0]
#print (win_idx, lt,ln, CC_TAS_all_ranks[:,lt,ln].data,CC_TAS_all_ranks[:,lt,ln].mask)
tmp_text= (f"{win_idx},{lt},{ln},{CC_TAS_all_ranks[:,lt,ln].data[0]},"
f"{CC_TAS_all_ranks[:,lt,ln].data[1]},{CC_TAS_all_ranks[:,lt,ln].data[2]},"
+ f"{CC_TAS_all_ranks[:,lt,ln].data[3]},{CC_TAS_all_ranks[:,lt,ln].mask[0]},"
+ f"{CC_TAS_all_ranks[:,lt,ln].mask[1]},{CC_TAS_all_ranks[:,lt,ln].mask[2]},"
+ f"{CC_TAS_all_ranks[:,lt,ln].mask[3]}")
list_locs_tmp.append(f"{lt}_{ln}")
list_all_wins_tas_nas.append(f"{lt}_{ln}")
# Prints to the redirected stdout (Output.txt)
sys.stdout.write(tmp_text + '\n')
# Prints to the actual saved stdout handler
stdout_fileno.write(tmp_text + '\n')
locs_tas_nas[win_idx] = np.array(list_locs_tmp)
# List and count of the locations with correlation coefficients
tas_nas_unique_locs,tas_nas_counts= np.unique(np.array(list_all_wins_tas_nas), return_counts=1)
# Saving the Common locationa and count of the occurance for all wins
tas_nas_unique_locs,tas_nas_counts= np.unique(np.array(list_all_wins_tas_nas), return_counts=1)
stdout_fileno = sys.stdout
sys.stdout = open(path_tas_nas+'locs_count_nbp_tas_nas.txt', 'w')
sys.stdout.write (f"locs, count" + '\n')
for idx in range(len(tas_nas_unique_locs)):
tmp_text = f"{tas_nas_unique_locs[idx]},{tas_nas_counts[idx]}"
sys.stdout.write(tmp_text + '\n')
stdout_fileno.write(tmp_text + '\n')
# Analysis of the locations are done in an Excel sheet in the Document/cmip6/Region_NAS
# Calculating the change in TAS at different quantiles
# ====================================================
Calculate_quantile = 'n'
if Calculate_quantile in ['y','Y','yes']:
# Calculating the quantiles of climate variable at pixel levels
import xarray as xr
cli_var = 'tas'
path_cli_var = f"{cori_scratch}add_cmip6_data/{source_run}/ssp585/{member_run}/{cli_var}"
file_cli_var = f"{path_cli_var}/{source_run}_ssp585_{member_run}_{cli_var}.nc"
# Reading the tas
nc_cli = xr.open_dataset(file_cli_var) # reading nc file
tas = nc_cli.tas # tas as object
# extracting data for a location
# -------------------------------
lat_idx = 167
lon_idx = 90
# cli variable for a pixel
tas_px = tas.isel(lat=lat_idx,lon=lon_idx).data
# extracting data for a location
# -------------------------------
lat_idx = 157
lon_idx = 52
# cli variable for a pixel
tas_px = tas.isel(lat=lat_idx,lon=lon_idx).data
Quantiles = np.arange(0.1,1,.1)
# Saving the quantiles and tas in Celsius
tas_quant_px = {}
for Quant in Quantiles:
tas_quant_px[Quant] = {}
# Finding the lowest and highest temperatures of tas:
tas_low = 0
tas_high = 0
for Quant in Quantiles:
for w_idx in range(10):
tas_px_win = tas_px[w_idx*300:(w_idx+1)*300] - 273.15
tas_q_px = np.quantile(tas_px_win,Quant)
tas_quant_px[Quant][w_idx] = tas_q_px
if tas_q_px < tas_low:
tas_low = tas_q_px
if tas_q_px > tas_high:
tas_high = tas_q_px
# Dataframe from dict of quantiles of a pixel
df_quant_px = pd.DataFrame.from_dict(tas_quant_px)
# the columns are the quantiles and the rows are the window index
# rate of increase of Tas per window
slope_px = []
for Quant in Quantiles:
slope_px.append(stats.linregress(range(10),list(tas_quant_px[Quant].values())))
df_quant_px = pd.DataFrame.from_dict(tas_quant_px)
#q = .1
fig = plt.figure()
ax = plt.subplot(111)
for q in Quantiles:
ax.plot(range(10),df_quant_px.loc[:,q], label= f"{q:.1f}")
# text of slope of rise in temperature per window
ax.text(8,df_quant_px.loc[7,.1], f"{slope_px[0][0]:.2f}" ) # Quant = .1
ax.text(8,df_quant_px.loc[7,.5], f"{slope_px[4][0]:.2f}" ) # Quant = .5
ax.text(8,df_quant_px.loc[7,.9], f"{slope_px[-1][0]:.2f}" ) # Quant = .9
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * .9])
# Put a legend below current axis
ax.legend(loc='center left', bbox_to_anchor=(1.1, 0.5),
fancybox=True, shadow=True, ncol=1,
title='Quantiles')
#ax.set_ylim(np.floor(tas_low)-1,np.ceil(tas_high)+1)
# Show duplicate y-axis:
plt.tick_params(labeltop=False, labelright=True)
# Show grid
ax.grid (which='both', ls='--', lw='.5', alpha=.4 )
ax.set_ylabel ("Temperature (Celsius)", fontsize=14)
#ax.set_yticklabels(fontsize= 10)
ax.set_xticklabels(win_yr)
for tick in ax.get_xticklabels():
tick.set_rotation(60)
ax.set_xlabel ("Time", fontsize=14)
ax = plt.gca()
ax.tick_params(axis = 'both', which = 'major', labelsize = 12)
ax.tick_params(axis = 'both', which = 'minor', labelsize = 12)
plt.title (f"TAS at lat={lat_idx},lon={lon_idx}", fontsize = 14)
# Area weighted mean and quantile tas distribution of the region of TAS
# =====================================================================
# To calculate the area-weighted average of temperature:
# Reading files/data
tas = nc_cli.tas # tas as object
area = nc_cli.areacella # area as object
#mask of the region
region_abr = srex_abr[5] # for NAS
filter_region = np.array(srex_abr) == region_abr # for NAS
region_number = np.array(srex_nums)[filter_region][0]
region_name = np.array(srex_names)[filter_region][0]
region_mask_not = np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region
region_mask = ~region_mask_not # Only the regions is True or 1s
# Masking the area to only the region of interest
area_region= np.ma.masked_array(area,mask=region_mask_not)
# mean area weighted average for every window
print (f"Region: {srex_abr[5]}")
print ("Area Weighted Mean Temperature")
print ("---------")
for w_idx in range(10):
tas_awm_region = np.ma.average(np.mean(tas[w_idx*300:(w_idx+1)*300].data,axis=0), weights=area_region) - 273.15
print (f"for window {win_yr[w_idx]}, AWM: {tas_awm_region:.2f} Celsius")
# Quantiles for the region of NAS
# Saving the quantiles and tas in Celsius
tas_quant_reg = {}
for Quant in Quantiles:
tas_quant_reg[Quant] = {}
tas_reg = tas.data * np.array([region_mask]*tas.shape[0])
# Finding the lowest and highest temperatures of tas:
tas_low_reg = 0
tas_high_reg = 0
for Quant in Quantiles:
for w_idx in range(10):
tas_reg_win = tas_reg[w_idx*300:(w_idx+1)*300]
tas_q_reg = np.quantile(tas_reg_win[tas_reg_win!=0],Quant) # finding quantiles for non-zero outside the domain values
tas_quant_reg[Quant][w_idx] = tas_q_reg - 273.15
if tas_q_reg < tas_low_reg:
tas_low_reg = tas_q_reg
if tas_q_reg > tas_high_reg:
tas_high_reg = tas_q_reg
# Dataframe from dict of quantiles of a region
df_quant_reg = pd.DataFrame.from_dict(tas_quant_reg)
# the columns are the quantiles and the rows are the window index
# rate of increase of Tas per window
slope_reg = []
for Quant in Quantiles:
slope_reg.append(stats.linregress(range(10),list(tas_quant_reg[Quant].values())))
# Plot of Quantilies of regions
fig1 = plt.figure()
ax = plt.subplot(111)
color_list = []
for q in Quantiles:
p = ax.plot(range(10),df_quant_reg.loc[:,q], label= f"{q:.1f}")
color_list.append(p[0].get_color())
# text of slope of rise in temperature per window
ax.text(6.5,df_quant_reg.loc[7,.1], f"{slope_reg[0][0]:.2f}", color=color_list[0] ) # Quant = .1
ax.text(6.5,df_quant_reg.loc[7,.5], f"{slope_reg[4][0]:.2f}", color=color_list[4] ) # Quant = .5
ax.text(6.5,df_quant_reg.loc[7,.9], f"{slope_reg[-1][0]:.2f}", color=color_list[-1] ) # Quant = .9
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * .9])
# Put a legend below current axis
ax.legend(loc='center left', bbox_to_anchor=(1.1, 0.5),
fancybox=True, shadow=True, ncol=1,
title='Quantiles')
#ax.set_ylim(np.floor(tas_low)-1,np.ceil(tas_high)+1)
# Show duplicate y-axis:
plt.tick_params(labeltop=False, labelright=True)
# Show grid
ax.grid (which='both', ls='--', lw='.5', alpha=.4 )
ax.set_ylabel ("Temperature (Celsius)", fontsize=14)
#ax.set_yticklabels(fontsize= 10)
ax.set_xticklabels(win_yr)
for tick in ax.get_xticklabels():
tick.set_rotation(60)
ax.set_xlabel ("Time", fontsize=14)
ax = plt.gca()
ax.tick_params(axis = 'both', which = 'major', labelsize = 12)
ax.tick_params(axis = 'both', which = 'minor', labelsize = 12)
#plt.title (f"TAS at {region_abr}", fontsize = 14)
plt.savefig(web_path + f"TAS_quantiles_at_{region_abr}.pdf",
edgecolor = "w", bbox_inches = "tight")
plt.savefig(web_path + f"TAS_quantiles_at_{region_abr}.png",
edgecolor = "w", bbox_inches = "tight")
# Senisitivity test: First order of TAS on NBP (regional) Similar to Pan 2020
# ===========================================================================
Sensitivity_test = 'n'
if Sensitivity_test in ['y','Y','yes']:
file_nbp = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/CESM2_ssp585_%s_%s_gC.nc"%(
source_run,member_run, variable,member_run,variable)
nc_nbp = xr.open_dataset(file_nbp)
nbp = nc_nbp.nbp # nbp
lf = nc_nbp.sftlf # land fraction
#mask of the region
region_abr = srex_abr[5] # for NAS
filter_region = np.array(srex_abr) == region_abr # for NAS
region_number = np.array(srex_nums)[filter_region][0]
region_name = np.array(srex_names)[filter_region][0]
region_mask_not = np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region
region_mask = ~region_mask_not # Only the regions is masked, True: region
# area of the regions
area_region= np.ma.masked_array(area, mask=region_mask_not)
# mean area weighted average of TAS,NBP for NAS
nas_awm_tas = []
nas_awm_nbp = []
nas_90q_tas = []
nas_10q_tas = []
for idx in range(tas.shape[0]):
tas_awm_region = np.ma.average(tas[idx].data, weights=area_region) - 273.15
nbp_awm_region = np.ma.average(np.ma.masked_invalid(nbp[idx].data), weights=area_region * lf)
nas_90q_region = np.quantile(tas[idx].data,.9) - 273.15
nas_10q_region = np.quantile(tas[idx].data,.1) - 273.15
nas_awm_tas.append(tas_awm_region)
nas_awm_nbp.append(nbp_awm_region)
nas_90q_tas.append(nas_90q_region)
nas_10q_tas.append(nas_10q_region)
del tas_awm_region, nbp_awm_region,nas_90q_region
# print (f"for window {win_yr[w_idx]}, AWM: {tas_awm_region:.2f} Celsius")
# rolling mean of a month of NBP (10 years)
#pd_con_var_global_yr_tot_awm = pd.Series (con_var_global_yr_tot_awm)
#con_var_global_rm5yr_tot_awm = pd_con_var_global_yr_tot_awm.rolling(window=5,center = False).mean()# 5 year rolling mean
pd_nas_tas_awm = pd.Series (nas_awm_tas)
pd_nas_tas_awm_rm_10yrs = pd_nas_tas_awm.rolling(window=10*12,center = True).mean()
pd_nas_nbp_awm = pd.Series (nas_awm_nbp)
pd_nas_nbp_awm_rm_10yrs = pd_nas_nbp_awm.rolling(window=10*12,center = True).mean()
pd_nas_tas_90q = pd.Series (nas_90q_tas)
pd_nas_tas_90q_rm_10yrs = pd_nas_tas_90q.rolling(window=10*12,center = True).mean()
pd_nas_tas_10q = pd.Series (nas_10q_tas)
pd_nas_tas_10q_rm_10yrs = pd_nas_tas_10q.rolling(window=10*12,center = True).mean()
# Detrended anomalies of TAS and NBP ignoring the first 5 and last 5 years
nas_awm_tas_detrend = nas_awm_tas [60:-59] - pd_nas_tas_awm_rm_10yrs[60:-59]
nas_awm_nbp_detrend = nas_awm_nbp [60:-59] - pd_nas_nbp_awm_rm_10yrs[60:-59]
nas_90q_tas_detrend = nas_90q_tas [60:-59] - pd_nas_tas_90q_rm_10yrs[60:-59]
nas_10q_tas_detrend = nas_10q_tas [60:-59] - pd_nas_tas_10q_rm_10yrs[60:-59]
# Sensitivity of the variable_ detrended
step_size = 120 # 10 years
Sensitivity_detrend = [region_abr]
for i in range (int(nas_awm_nbp_detrend.size/step_size)):
frame = { 'nbp': nas_awm_nbp_detrend[i*step_size:(i+1)*step_size], 'tas': nas_awm_tas_detrend[i*step_size:(i+1)*step_size] }
df_nbp_tas_detrend = pd.DataFrame(frame)
lm = smf.ols(formula='nbp ~ tas', data=df_nbp_tas_detrend).fit()
#print(lm.params)
Sensitivity_detrend.append(np.format_float_scientific(lm.params[-1], exp_digits=2))
print ("Sensitivity Test Result: ",Sensitivity_detrend )
# Sensitivity at a pixel
# ----------------------
nas_awm_tas = []
nas_awm_nbp = []
nas_90q_tas = []
lat_idx = 150
lon_idx = 91
tas_px = tas.isel(lat=lat_idx,lon=lon_idx).data - 273.15
nbp_px = nbp.isel(lat=lat_idx,lon=lon_idx).data
pd_nas_tas_px = pd.Series(tas_px)
pd_nas_tas_px_rm_10yrs = pd_nas_tas_px.rolling(window=10*12,center = True).mean()
pd_nas_nbp_px = pd.Series(nbp_px)
pd_nas_nbp_px_rm_10yrs = pd_nas_nbp_px.rolling(window=10*12,center = True).mean()
# Detrended anomalies of TAS and NBP ignoring the first 5 and last 5 years
nas_tas_px_detrend = tas_px [60:-59] - pd_nas_tas_px_rm_10yrs[60:-59]
nas_nbp_px_detrend = nbp_px [60:-59] - pd_nas_nbp_px_rm_10yrs[60:-59]
# Sensitivity of the variable_ detrended
step_size = 60 # 10 years
Sensitivity_detrend_px = [f"{lat_idx}_{lon_idx}"]
for i in range (int(nas_nbp_px_detrend.size/step_size)):
frame = { 'nbp': nas_nbp_px_detrend[i*step_size:(i+1)*step_size], 'tas': nas_tas_px_detrend[i*step_size:(i+1)*step_size] }
df_nbp_tas_detrend = pd.DataFrame(frame)
lm = smf.ols(formula='nbp ~ tas', data=df_nbp_tas_detrend).fit()
#print(lm.params)
Sensitivity_detrend_px.append(np.format_float_scientific(lm.params[-1], exp_digits=2))
print ("Sensitivity Test Result: ",Sensitivity_detrend_px )
Concurrency_test = "no"
if Concurrency_test in ['y','Y','yes']:
# To find the concurrency of the higher quantile Tas and NBP (lower quantile) at a pixel
# ======================================================================================
w_idx = 7
lat_idx = 150
lon_idx = 91
per_tas = .9 #percentile of tas
per_nbp = .1 # percentile of nbp
lag = 3 # lag months for lag>1
tas_px = tas.isel(lat=lat_idx,lon=lon_idx).data - 273.15
nbp_px = nbp.isel(lat=lat_idx,lon=lon_idx).data
pd_nas_tas_px = pd.Series(tas_px)
pd_nas_tas_px_rm_10yrs = pd_nas_tas_px.rolling(window=10*12,center = True).mean()
pd_nas_nbp_px = pd.Series(nbp_px)
pd_nas_nbp_px_rm_10yrs = pd_nas_nbp_px.rolling(window=10*12,center = True).mean()
# Detrended anomalies of TAS and NBP ignoring the first 5 and last 5 years
nas_tas_px_detrend = tas_px [60:-59] - pd_nas_tas_px_rm_10yrs[60:-59]
nas_nbp_px_detrend = nbp_px [60:-59] - pd_nas_nbp_px_rm_10yrs[60:-59]
tas_win_px = tas_px[w_idx*300:(w_idx+1)*300]
nbp_win_px = nbp_px[w_idx*300:(w_idx+1)*300]
nas_tas_win_px_detrend = nas_tas_px_detrend[w_idx*300:(w_idx+1)*300]
nas_nbp_win_px_detrend = nas_nbp_px_detrend[w_idx*300:(w_idx+1)*300]
print ("What is the concurrency of higher quantile of TAS and lower quantile of NBP?")
print ("or checking the concurrency of hot temperatures and negative NBPs")
print ("-----------------")
print (f"Window: {win_yr[w_idx]}")
print(f"At location Lat: {round(nbp.lat.data[lat_idx],2)}, Lon: {round(nbp.lon.data[lon_idx],2)}")
print(f"Lag: {lag} months")
print (f"Quantile of TAS: {per_tas}")
print (f"Quantile of NBP: {per_nbp}")
print (f"Total Pixels : {round(tas_win_px.size*(1-per_tas))}")
print (f"Concurrency : {((tas_win_px>np.quantile(tas_win_px,per_tas))[:-lag] * (nbp_win_px<np.quantile(nbp_win_px,per_nbp))[lag:]).sum()}")
print ("Hot Months :", np.array(['1:Jan','2:Feb','3:Mar','4:Apr','5:May','6:Jun','7:July','8:Aug','9:Sep','10:Oct','11:Nov','12:Dec']*25)[
(tas_win_px>np.quantile(tas_win_px,per_tas))])
# Sensitivity Analysis
# Senisitivity test: First order of TAS on NBP (regional) Similar to Pan 2020
Sensivity_test_reg = 'no'
if Sensivity_test_reg in ['yes', 'y', 'Y']:
file_nbp = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/CESM2_ssp585_%s_%s_gC.nc"%(
source_run,member_run, variable,member_run,variable)
nc_nbp = xr.open_dataset(file_nbp)
nbp = nc_nbp.nbp # nbp
lf = nc_nbp.sftlf # land fraction
#mask of the region
region_abr = srex_abr[5] # for NAS
filter_region = np.array(srex_abr) == region_abr # for NAS
region_number = np.array(srex_nums)[filter_region][0]
region_name = np.array(srex_names)[filter_region][0]
region_mask_not = np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region
region_mask = ~region_mask_not # Only the regions is masked, True: region
# area of the regions
area_region= np.ma.masked_array(area, mask=region_mask_not)
# mean area weighted average of TAS,NBP for NAS
nas_awm_tas = []
nas_awm_nbp = []
nas_90q_tas = []
nas_10q_tas = []
for idx in range(tas.shape[0]):
tas_awm_region = np.ma.average(tas[idx].data, weights=area_region) - 273.15
nbp_awm_region = np.ma.average(np.ma.masked_invalid(nbp[idx].data), weights=area_region * lf)
nas_90q_region = np.quantile(tas[idx].data,.9) - 273.15
nas_10q_region = np.quantile(tas[idx].data,.1) - 273.15
nas_awm_tas.append(tas_awm_region)
nas_awm_nbp.append(nbp_awm_region)
nas_90q_tas.append(nas_90q_region)
nas_10q_tas.append(nas_10q_region)
del tas_awm_region, nbp_awm_region,nas_90q_region
# print (f"for window {win_yr[w_idx]}, AWM: {tas_awm_region:.2f} Celsius")
# rolling mean of a month of NBP (10 years)
#pd_con_var_global_yr_tot_awm = pd.Series (con_var_global_yr_tot_awm)
#con_var_global_rm5yr_tot_awm = pd_con_var_global_yr_tot_awm.rolling(window=5,center = False).mean()# 5 year rolling mean
pd_nas_tas_awm = pd.Series (nas_awm_tas)
pd_nas_tas_awm_rm_10yrs = pd_nas_tas_awm.rolling(window=10*12,center = True).mean()
pd_nas_nbp_awm = pd.Series (nas_awm_nbp)
pd_nas_nbp_awm_rm_10yrs = pd_nas_nbp_awm.rolling(window=10*12,center = True).mean()
pd_nas_tas_90q = pd.Series (nas_90q_tas)
pd_nas_tas_90q_rm_10yrs = pd_nas_tas_90q.rolling(window=10*12,center = True).mean()
pd_nas_tas_10q = pd.Series (nas_10q_tas)
pd_nas_tas_10q_rm_10yrs = pd_nas_tas_10q.rolling(window=10*12,center = True).mean()
# Detrended anomalies of TAS and NBP ignoring the first 5 and last 5 years
nas_awm_tas_detrend = nas_awm_tas [60:-59] - pd_nas_tas_awm_rm_10yrs[60:-59]
nas_awm_nbp_detrend = nas_awm_nbp [60:-59] - pd_nas_nbp_awm_rm_10yrs[60:-59]
nas_90q_tas_detrend = nas_90q_tas [60:-59] - pd_nas_tas_90q_rm_10yrs[60:-59]
nas_10q_tas_detrend = nas_10q_tas [60:-59] - pd_nas_tas_10q_rm_10yrs[60:-59]
# Sensitivity of the variable
import statsmodels.formula.api as smf
step_size = 120 # 10 years
Sensitivity = [region_abr]
for i in range (int(nas_awm_nbp_detrend.size/step_size)):
frame = { 'nbp': nas_awm_nbp[i*step_size:(i+1)*step_size], 'tas': nas_awm_tas[i*step_size:(i+1)*step_size] }
df_nbp_tas_detrend = pd.DataFrame(frame)
lm = smf.ols(formula='nbp ~ tas', data=df_nbp_tas_detrend).fit()
#print(lm.params)
Sensitivity.append(np.format_float_scientific(lm.params[-1], exp_digits=2))
print (Sensitivity)
# Sensitivity of the variable_ detrended
step_size = 120 # 10 years
Sensitivity_detrend = [region_abr]
for i in range (int(nas_awm_nbp_detrend.size/step_size)):
frame = { 'nbp': nas_awm_nbp_detrend[i*step_size:(i+1)*step_size], 'tas': nas_awm_tas_detrend[i*step_size:(i+1)*step_size] }
df_nbp_tas_detrend = pd.DataFrame(frame)
lm = smf.ols(formula='nbp ~ tas', data=df_nbp_tas_detrend).fit()
#print(lm.params)
Sensitivity_detrend.append(np.format_float_scientific(lm.params[-1], exp_digits=2))
print (Sensitivity_detrend)
# Sensitivity at a pixel
nas_awm_tas = []
nas_awm_nbp = []
nas_90q_tas = []
lat_idx = 150
lon_idx = 91
tas_px = tas.isel(lat=lat_idx,lon=lon_idx).data - 273.15
nbp_px = nbp.isel(lat=lat_idx,lon=lon_idx).data
pd_nas_tas_px = | pd.Series(tas_px) | pandas.Series |
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import pandas as pd
import numpy as np
from os.path import isfile, join
from os import listdir
import os
from collections import OrderedDict
from hrate.data_handling.selfloops import read_selfloops_file
import logging
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO)
app = dash.Dash(__name__)
server = app.server
server.secret_key = os.environ.get('SECRET_KEY', 'my-secret-key')
# LOADING data
DATADIR = 'data/selfloops'
file_paths = [join(DATADIR, f) for f in listdir(DATADIR) if isfile(join(DATADIR, f)) if f.endswith('.txt')]
data = OrderedDict()
for f in file_paths:
data[f.split('/')[-1]] = read_selfloops_file(f)
available_files = list(data.keys())
app.layout = html.Div([
html.H4('Select File'),
html.Div([
dcc.Dropdown(
id='file_name',
options=[{'label': i, 'value': i} for i in available_files],
value=available_files[0]
)
]),
html.H3('Heart Rate'),
dcc.Markdown(
'''
- To zoom in, simply use your mouse
- To visualise RR intervals in the bottom plot, use the __Box Select__ option.
- The light pink area is the selected one, and on the histogram you see the HR distribution in that range
- The grey area is the one which is visualised in the bottom plot. It is restricted to 5 minutes due to rendering problems.
'''
),
html.Div([
html.Div([dcc.Graph(id='HR_plot')], style={'width': '60%', 'display': 'inline-block', 'padding': '0 20'}),
html.Div([dcc.Graph(id='HR_summary')], style={'width': '35%', 'display': 'inline-block', 'padding': '20 0'}),
]),
html.H4('Beat-by-beat Data'),
html.Div(
[
html.Div([dcc.Graph(id='RR_plot')], style={'width': '60%', 'display': 'inline-block', 'padding': '0'}),
html.H4('Summary RR events', style={'width': '35%', 'padding': '0'}),
html.Div([html.Pre(id='RR_summary')], style={'width': '35%', 'padding': '0'}),
]
),
html.Div(
[
html.H4('Summary HR events', style={'width': '35%', 'padding': '0'}),
html.Div([html.Pre(id='HR_summary_stats')], style={'width': '35%', 'padding': '0'}),
]
)
])
@app.callback(
dash.dependencies.Output('HR_plot', 'figure'),
[dash.dependencies.Input('file_name', 'value'),
dash.dependencies.Input('RR_plot', 'figure'),
dash.dependencies.Input('HR_plot', 'selectedData')])
def update_HR_graph(file_name, rr_plot, hr_selected_data):
dff = data[file_name]
# get range of RR, to be drawn here
plot_data = []
if hr_selected_data is not None:
x_stamp_min = pd.to_datetime(hr_selected_data['range']['x'][0])
x_stamp_max = pd.to_datetime(hr_selected_data['range']['x'][1])
y_max = dff['HR'].max()
RR_range_plot = go.Scatter(
x=[x_stamp_min, x_stamp_max],
y=[y_max, y_max],
fill='tozeroy',
mode='lines',
line=dict(
color='#EB89B5',
),
showlegend=False,
)
plot_data.append(RR_range_plot)
if 'range' in rr_plot['layout']['xaxis'].keys():
RR_plot_range = rr_plot['layout']['xaxis']['range']
y_max = dff['HR'].max()
y_min = dff['HR'].min()
RR_range_plot = go.Scatter(
x=RR_plot_range,
y=[y_max, y_max],
fill='tozeroy',
mode='lines',
line=dict(
color='rgb(80, 80, 80)',
),
showlegend=False,
)
plot_data.append(RR_range_plot)
dff = resample_df(dff)
HR_data = go.Scatter(
x=dff['Time_stamp'],
y=dff['HR'],
mode='lines+markers',
line={
'color': ('rgb(205, 12, 24)'),
'width': 2
},
marker={
'size': 4
},
showlegend=False
)
plot_data.append(HR_data)
figure_hr_plot = {
'data': plot_data,
'layout': go.Layout(
xaxis={
"rangeselector": {
"buttons": [
{
"count": 5,
"step": "minute",
"stepmode": "backward",
"label": "5m"
},
{
"count": 30,
"step": "minute",
"stepmode": "backward",
"label": "30m"
},
{
"count": 1,
"step": "hour",
"stepmode": "backward",
"label": "1h"
},
{
"count": 4,
"step": "hour",
"stepmode": "backward",
"label": "4h"
},
{
"step": "all"
}
]
},
"rangeslider": {},
"type": "date"
},
yaxis={
'title': 'Heart Rate (bpm)',
},
margin={'l': 40, 'b': 40, 't': 10, 'r': 0},
hovermode='closest'
)}
return figure_hr_plot
@app.callback(
dash.dependencies.Output('HR_summary', 'figure'),
[dash.dependencies.Input('file_name', 'value'),
dash.dependencies.Input('HR_plot', 'selectedData'),
])
def update_hr_summary_figure(file_name, HR_plot_selected):
# TODO: do create and update!
# TODO: histogram is wrong, it needs to have resampled uniform data on the x-axis!!
dff = data[file_name]
plot_data = [go.Histogram(
y=dff['HR'].values,
marker=dict(
color=('rgb(205, 12, 24)')
),
histnorm='probability',
name='All data',
opacity=0.4
)]
# TODO: bug! this is called rarely and does not work, because the range is not the xaxis lim!!!
if HR_plot_selected is not None:
x_stamp_min = pd.to_datetime(HR_plot_selected['range']['x'][0])
x_stamp_max = | pd.to_datetime(HR_plot_selected['range']['x'][1]) | pandas.to_datetime |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": | pd.date_range("20010101", periods=10) | pandas.date_range |
from unittest import TestCase, main
import datetime
import pandas as pd
import numpy as np
import numpy.testing as npt
import pandas.util.testing as pdt
from break4w.question import (Question,
_check_cmap
)
class QuestionTest(TestCase):
def setUp(self):
self.name = 'player_name'
self.description = '<NAME> Players'
self.dtype = str
self.map_ = pd.DataFrame([['Bitty', 'Ransom', 'Holster'],
['2', '4', '4'],
['False', 'True', 'True']],
index=['player_name', 'years_on_team',
'team_captain']).T
self.q = Question(name=self.name,
description=self.description,
dtype=self.dtype,
free_response=True,
)
def test_init_default(self):
self.assertEqual(self.name, self.q.name)
self.assertEqual(self.description, self.q.description)
self.assertEqual(self.dtype, self.q.dtype)
self.assertEqual('Player Name', self.q.clean_name)
self.assertEqual('Question', self.q.type)
self.assertTrue(self.q.free_response)
self.assertFalse(self.q.mimarks)
self.assertFalse(self.q.ontology)
self.assertEqual(self.q.missing,
{'not applicable', 'missing: not provided',
'missing: not collected', 'missing: restricted',
'not provided', 'not collected', 'restricted'})
self.assertEqual(self.q.colormap, None)
self.assertEqual(self.q.blanks, None)
self.assertEqual(self.q.log, [])
self.assertEqual(self.q.source_columns, [])
self.assertEqual(self.q.derivative_columns, [])
self.assertEqual(self.q.notes, None)
def test_init_source_derivative_list(self):
q = Question(name=self.name,
description=self.description,
dtype=self.dtype,
source_columns=['SMH'],
derivative_columns=['next_step'],
school='Samwell',
)
self.assertEqual(q.source_columns, ['SMH'])
self.assertEqual(q.derivative_columns, ['next_step'])
self.assertTrue(hasattr(q, 'school'))
self.assertEqual(q.school, 'Samwell')
def test_init_error_name(self):
with self.assertRaises(TypeError):
Question(name=1,
description=self.description,
dtype=self.dtype,
)
def test_init_error_description(self):
with self.assertRaises(TypeError):
Question(name=self.name,
description=self.dtype,
dtype=self.dtype,
)
def test_init_error_description_length(self):
d = ('Check, Please! is a 2013 webcomic written and '
'illustrated by <NAME>. The webcomic follows '
'vlogger and figure-turned-ice hockey skater Eric '
'"Bitty" Bittle as he deals with hockey culture in '
'college, as well as his identity as a gay man.')
with self.assertRaises(ValueError):
Question(name=self.name,
description=d,
dtype=self.dtype,
)
def test_init_error_dtype(self):
with self.assertRaises(TypeError):
Question(name=self.name,
description=self.description,
dtype=self.description,
)
def test_init_error_clean_name(self):
with self.assertRaises(TypeError):
Question(name=self.name,
description=self.description,
dtype=self.dtype,
clean_name=self.dtype
)
def test_init_clean_name_missing_str(self):
q = Question(name=self.name,
description=self.description,
dtype=self.dtype,
clean_name='Player',
missing='Bitty')
self.assertEqual(q.clean_name, 'Player')
self.assertEqual(q.missing, set(['Bitty']))
def test_init_missing_list(self):
q = Question(name=self.name,
description=self.description,
dtype=self.dtype,
missing=['Bitty'])
self.assertEqual(q.missing, set(['Bitty']))
def test__str__(self):
known = """
------------------------------------------------------------------------------------
player_name (Question str)
<NAME> Players
------------------------------------------------------------------------------------
"""
test = self.q.__str__()
self.assertEqual(known, test)
def test_update_log(self):
self.assertEqual(len(self.q.log), 0)
self.q._update_log(
command='dibs',
transform_type='replace',
transformation='metaphysical goalie johnson > Bitty'
)
self.assertEqual(len(self.q.log), 1)
log_ = self.q.log[0]
self.assertTrue(isinstance(log_, dict))
self.assertEqual({'timestamp', 'column', 'command', 'transform_type',
'transformation'}, set(log_.keys()))
self.assertTrue(isinstance(log_['timestamp'], datetime.datetime))
self.assertEqual(log_['column'], 'player_name')
self.assertEqual(log_['command'], 'dibs')
self.assertEqual(log_['transform_type'], 'replace')
self.assertEqual(log_['transformation'],
'metaphysical goalie johnson > Bitty')
def test_write_provenance(self):
known_log = pd.DataFrame(
np.array([[datetime.datetime.now(), 'Write Log', 'team_captain',
'recording', '']]),
columns=['timestamp', 'command', 'column', 'transform_type',
'transformation']
)
q = Question(name='team_captain',
description='who is has the C or AC',
dtype=bool
)
log_ = q.write_provenance()
self.assertEqual(known_log.shape, log_.shape)
pdt.assert_index_equal(known_log.columns, log_.columns)
pdt.assert_series_equal(known_log['column'], log_['column'])
pdt.assert_series_equal(known_log['command'], log_['command'])
pdt.assert_series_equal(known_log['transform_type'],
log_['transform_type'])
pdt.assert_series_equal(known_log['transformation'],
log_['transformation'])
def test_read_provenance(self):
with self.assertRaises(NotImplementedError):
self.q._read_provenance('fp_')
def test_check_ontology(self):
with self.assertRaises(NotImplementedError):
self.q._check_ontology()
def test_identify_remap_function_bool_placeholder(self):
iseries = pd.Series(['True', 'true', 1, 'nope',
'False', 'false', 0, 0.0])
# Sets the know values
kseries = pd.Series([True, True, True, 'nope',
False, False, False, False])
f_ = self.q._identify_remap_function(bool, {'nope'})
tseries = iseries.apply(f_)
pdt.assert_series_equal(kseries, tseries)
def test_identify_remap_function_bool_no_placeholder(self):
iseries = pd.Series(['True', 'true', 1, 'nope',
'False', 'false', 0, 0.0])
# Sets the know values
kseries = pd.Series([True, True, True, 'error',
False, False, False, False])
# Gets the test values
f_ = self.q._identify_remap_function(bool, {'cool '})
tseries = iseries.apply(f_)
| pdt.assert_series_equal(kseries, tseries) | pandas.util.testing.assert_series_equal |
# coding: utf-8
# # Leave-One-Patient-Out classification of individual volumes
#
# Here, we train a classifier for each patient, based on the data of all the other patients except the current one (Leave One Out Cross-Validation). To this end, we treat each volume as an independent observation, so we have a very large sample of volumes which are used for training; and later, we do not classify the patient as a whole, but the classifier makes a decision for each of the held-out patient's 200 volumes. Therefore, at this stage, we have not made a decision on the patient level, but only at the volume-as-unit-of-observation level.
# ### import modules
# In[1]:
import os
import pickle
import numpy as np
import pandas as pd
from sklearn import svm, preprocessing, metrics
from PIL import Image
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('ticks')
sns.set_context('poster')
# In[2]:
sns.set_context('poster')
# In[3]:
# after converstion to .py, we can use __file__ to get the module folder
try:
thisDir = os.path.realpath(__file__)
# in notebook form, we take the current working directory (we need to be in 'notebooks/' for this!)
except:
thisDir = '.'
# convert relative path into absolute path, so this will work with notebooks and py modules
supDir = os.path.abspath(os.path.join(os.path.dirname(thisDir), '..'))
supDir
# ### get meta df
# We need this e.g. to get information about conclusiveness
#
## In[4]:
#
#
#data_df = pd.read_csv(
# '../data/interim/csv/info_epi_zscored_zdiff_summarymaps_2dpredclean_corr_df.csv',
# index_col=[0, 1],
# header=0)
#
#
#
## In[5]:
#
#
#data_df.tail()
#
#
# #### conclusiveness filters
#
## In[6]:
#
#
#is_conclusive = data_df.loc[:, 'pred'] != 'inconclusive'
#
#
#
## In[7]:
#
#
#is_conclusive.sum()
#
#
# ### get data
#
## In[8]:
#
#
#def make_group_df(data_df,metric='corr_df'):
# '''load correlation data of all patients'''
#
# group_df = pd.DataFrame()
#
# for p in data_df.index:
# # get data
# filename = data_df.loc[p, metric]
# this_df = pd.read_csv(filename, index_col=[0], header=0)
# # add patient infos to index
# this_df.index = [[p[0]], [p[1]]]
#
# group_df = pd.concat([group_df, this_df])
#
# # reorder the colums and make sure volumes are integer values
# group_df.columns = group_df.columns.astype(int)
#
# # sort across rows, then across columns, to make sure that volumes
# # are in the right order
# group_df = group_df.sort_index(axis=0)
# group_df = group_df.sort_index(axis=1)
#
# assert all(group_df.columns == range(200)), 'wrong order of volumes'
#
# return group_df
#
#
#
## In[9]:
#
#
#group_df = make_group_df(data_df)
#
#
#
## In[10]:
#
#
#group_df.tail()
#
#
# #### filter data
#
## In[11]:
#
#
## only conclusive cases
#conclusive_df = group_df[is_conclusive]
## only inconclusive cases
#inconclusive_df = group_df[is_conclusive == False]
## all cases unfiltered
#withinconclusive_df = group_df.copy()
#
#
#
## In[12]:
#
#
#print(conclusive_df.shape, inconclusive_df.shape, withinconclusive_df.shape)
#
#
# ### get design
# In[13]:
conds_file = os.path.join(supDir,'models','conds.p')
with open(conds_file, 'rb') as f:
conds = pickle.load(f)
#
## In[14]:
#
#
#print(conds)
#
#
# ### get colors
#
## In[15]:
#
#
#with open('../models/colors.p', 'rb') as f:
# color_dict = pickle.load(f)
#
#my_cols = {}
#for i, j in zip(['red', 'blue', 'yellow'], ['left', 'right', 'bilateral']):
# my_cols[j] = color_dict[i]
#
#
# ### invert the resting timepoints
#
## In[16]:
#
#
#inv_df = conclusive_df*conds
#
#
#
## In[17]:
#
#
#inv_df.tail()
#
#
# ### train the classifier
#
## In[18]:
#
#
#stack_df = pd.DataFrame(inv_df.stack())
#stack_df.tail()
#
#
#
## In[19]:
#
#
#stack_df.shape
#
#
#
## In[20]:
#
#
#my_groups = ['left','bilateral','right']
#
#
#
## In[21]:
#
#
#dynamite_df = stack_df.copy()
#dynamite_df.columns = ['correlation']
#dynamite_df['group'] = dynamite_df.index.get_level_values(0)
#sns.catplot(data=dynamite_df,y='group',x='correlation',kind='bar',orient='h',palette=my_cols,order=my_groups,aspect=1)
#plt.axvline(0,color='k',linewidth=3)
#plt.xlim(0.05,-0.05,-0.01)
#sns.despine(left=True,trim=True)
#plt.ylabel('')
#plt.savefig('../reports/figures/10-dynamite-plot.png',dpi=300,bbox_inches='tight')
#plt.show()
#
#
#
## In[22]:
#
#
#from scipy import stats
#
#
#
## In[23]:
#
#
#t,p = stats.ttest_ind(dynamite_df.loc['bilateral','correlation'],dynamite_df.loc['left','correlation'])
#print('\nt=%.2f,p=%.64f'%(t,p))
#t,p = stats.ttest_ind(dynamite_df.loc['bilateral','correlation'],dynamite_df.loc['right','correlation'])
#print('\nt=%.2f,p=%.38f'%(t,p))
#t,p = stats.ttest_ind(dynamite_df.loc['left','correlation'],dynamite_df.loc['right','correlation'])
#print('\nt=%.2f,p=%.248f'%(t,p))
#
#
# ### as histogram
#
## In[24]:
#
#
#fig,ax = plt.subplots(1,1,figsize=(8,5))
#for group in my_groups:
# sns.distplot(stack_df.loc[group,:],color=my_cols[group],label=group,ax=ax)
#plt.legend()
#plt.xlim(0.4,-0.4,-0.2)
#sns.despine()
#plt.show()
#
#
# ### set up the classifier
#
## In[25]:
#
#
#clf = svm.SVC(kernel='linear',C=1.0,probability=False,class_weight='balanced')
#
#
# In[26]:
def scale_features(X):
'''z-transform the features before applying a SVC.
The scaler is also stored so it can later be re-used on test data'''
my_scaler = preprocessing.StandardScaler()
my_scaler.fit(X)
X_scaled = my_scaler.transform(X)
return X_scaled,my_scaler
# In[27]:
def encode_labels(y):
'''get from number labels to strings and back'''
my_labeler = preprocessing.LabelEncoder()
my_labeler.fit(np.unique(y))
y_labels = my_labeler.transform(y)
return y_labels, my_labeler
# In[28]:
def train_classifier(df):
'''get features and labels
* scale the features
* transform the labels
* apply the classifier
'''
X = df.values
y = df.index.get_level_values(0)
X_scaled,my_scaler = scale_features(X)
y_labels, my_labeler = encode_labels(y)
clf.fit(X_scaled,y_labels)
return clf,my_scaler,my_labeler
#
## In[29]:
#
#
#example_clf, example_scaler, example_labeler = train_classifier(stack_df)
#
#
#
## In[30]:
#
#
#example_clf
#
#
#
## In[31]:
#
#
#example_scaler
#
#
#
## In[32]:
#
#
#example_labeler.classes_
#
#
#
## In[33]:
#
#
#def get_boundaries(clf,my_scaler):
# '''find the point where the classifier changes its prediction;
# this is an ugly brute-force approach and probably there is a much
# easier way to do this
# '''
#
# d = {}
# for i in np.linspace(-1,1,10000):
# this_val = my_scaler.transform(np.array([i]).reshape(1,-1))
# this_predict = clf.predict(this_val)
# d[i] = this_predict[-1]
# df = pd.DataFrame(d,index=['pred']).T
# return df[(df-df.shift(1))!=0].dropna().index[1:]
#
#
#
## In[34]:
#
#
#from datetime import datetime
#
#
# ### get class boundaries of all folds
#
## In[35]:
#
#
#import tqdm
#
#
#
## In[36]:
#
#
#def get_all_boundaries(stack_df):
# '''for each fold, get the boundaries, by
# training on everybody but the held-out patient
# and storing the boundaries'''
#
# all_boundaries = {}
#
# conclusive_pats = np.unique(stack_df.index.get_level_values(1))
#
# for p in tqdm.tqdm(conclusive_pats):
#
# # in the current fold, we drop one patient
# df = stack_df.drop(p,level=1)
#
# # train on this fold's data
# clf,my_scaler,my_labeler = train_classifier(df)
#
# # get the classifier boundaries
# boundaries = get_boundaries(clf,my_scaler)
# all_boundaries[p] = boundaries
#
# return all_boundaries
#
#
# Compute the boundaries and store them for later re-use:
#
## In[37]:
#
#
#all_boundaries = get_all_boundaries(stack_df)
#bound_df = pd.DataFrame(all_boundaries).T
#bound_df.tail()
#
#
#
## In[38]:
#
#
#bound_df.to_csv('../data/processed/csv/bound_df.csv')
#
#
# To make things faster, we can re-load the computed boundaries here:
#
## In[39]:
#
#
#bound_df = pd.read_csv('../data/processed/csv/bound_df.csv',index_col=[0],header=0)
#bound_df.tail()
#
#
# rename so boundaries have meaningful descriptions:
#
## In[40]:
#
#
#bound_df = bound_df.rename(columns={'0':'B/R','1':'L/B'})
#bound_df.tail()
#
#
#
## In[41]:
#
#
#bound_df.describe()
#
#
# #### show the class boundaries overlaid on the data distribution
#
## In[42]:
#
#
#fig,ax = plt.subplots(1,1,figsize=(8,5))
#for group in my_groups:
# sns.distplot(stack_df.loc[group,:],color=my_cols[group],label=group,ax=ax)
#
#for b in bound_df.values.flatten():
# plt.axvline(b,alpha=0.1,color=color_dict['black'])
#
#plt.legend()
#plt.xlabel('correlation')
#plt.ylabel('density')
#plt.xlim(0.4,-0.4,-0.2)
#plt.ylim(0,8)
#plt.legend(loc=(0.65,0.65))
#sns.despine(trim=True,offset=5)
#plt.savefig('../reports/figures/10-distribution-plot.png',dpi=300,bbox_inches='tight')
#plt.show()
#
#
# #### make swarm/factorplot with boundary values
#
## In[43]:
#
#
#sns_df = pd.DataFrame(bound_df.stack())
#sns_df.columns = ['correlation']
#sns_df.loc[:,'boundary'] = sns_df.index.get_level_values(1)
#sns_df.loc[:,'dummy'] = 0
#
#
#
## In[44]:
#
#
#sns_df.tail()
#
#
#
## In[45]:
#
#
#fig,ax = plt.subplots(1,1,figsize=(4,5))
#sns.swarmplot(data=sns_df,
# x='correlation',
# y='dummy',
# hue='boundary',
# orient='h',
# palette={'L/B':my_cols['left'],'B/R':my_cols['right']},
# size=4,
# alpha=0.9,
# ax=ax
# )
#plt.xlim(0.04,-0.02,-0.02)
#ax.set_ylabel('')
#ax.set_yticks([])
#sns.despine(left=True,trim=True)
#plt.savefig('../reports/figures/10-boundary-swarm-plot.png',dpi=300,bbox_inches='tight')
#
#plt.show()
#
#
# ### combine above into one plot
#
## In[46]:
#
#
#sns.set_style('dark')
#
#
#
## In[47]:
#
#
#fig = plt.figure(figsize=(16,6))
#
#ax1 = fig.add_axes([0.36, .999, 1, .7], xticklabels=[], yticklabels=[])
#ax1.imshow(Image.open('../reports/figures/10-dynamite-plot.png'))
#
#ax2 = fig.add_axes([0, 1, 1, 0.8], xticklabels=[], yticklabels=[])
#ax2.imshow(Image.open('../reports/figures/10-distribution-plot.png'))
#
#ax3 = fig.add_axes([0.65, 1, 1, 0.8], xticklabels=[], yticklabels=[])
#ax3.imshow(Image.open('../reports/figures/10-boundary-swarm-plot.png'))
#
#plt.text(0,1, 'A',transform=ax2.transAxes, fontsize=32)
#plt.text(1.04,1, 'B',transform=ax2.transAxes, fontsize=32)
#plt.text(1.63,1, 'C',transform=ax2.transAxes, fontsize=32)
#
#plt.savefig('../reports/figures/10-training-overview.png',dpi=300,bbox_inches='tight')
#plt.show()
#
#
# ### make predictions for all patients (conc and inconc)
# #### invert
#
## In[48]:
#
#
#all_inv_df = group_df*conds
#
#
#
## In[49]:
#
#
#all_inv_df.tail()
#
#
# In[50]:
def make_preds(this_df,clf,my_scaler,my_labeler):
'''apply fitted classifier to the held-out patient;
based on what has been done during training, we
* scale the features using the stored scaler
* transform the labels using the stored labeler
* apply the classifier using the stored classfier
'''
scaled_features = my_scaler.transform(this_df.T)
predictions = clf.predict(scaled_features)
labeled_predictions = my_labeler.inverse_transform(predictions)
counts = pd.Series(labeled_predictions).value_counts()
counts_df = pd.DataFrame(counts).T
counts_df.index = | pd.MultiIndex.from_tuples(this_df.index) | pandas.MultiIndex.from_tuples |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Merge England data to GCP's Covid-19 Open Data format: https://github.com/GoogleCloudPlatform/covid-19-open-data."""
import re
from typing import Tuple
from dm_c19_modelling.england_data import constants
import pandas as pd
_DATE = "date"
_KEY = "key"
_COUNTRY_CODE = "country_code"
_SUBREGION1_CODE = "subregion1_code"
_SUBREGION1_NAME = "subregion1_name"
_SUBREGION2_CODE = "subregion2_code"
_SUBREGION2_NAME = "subregion2_name"
_LOCALITY_CODE = "locality_code"
_LOCALITY_NAME = "locality_name"
_INDEX_DEFAULT_COLUMNS = {
_KEY: None,
"wikidata": float("nan"),
"datacommons": float("nan"),
_COUNTRY_CODE: "GB",
"country_name": "United Kingdom",
_SUBREGION1_CODE: "ENG",
_SUBREGION1_NAME: "England",
_SUBREGION2_CODE: float("nan"),
_SUBREGION2_NAME: float("nan"),
_LOCALITY_CODE: float("nan"),
_LOCALITY_NAME: float("nan"),
"3166-1-alpha-2": "GB",
"3166-1-alpha-3": "GBR",
"aggregation_level": 3,
}
_COLUMNS = constants.Columns
def _create_index_row_for_trust(regions_row: pd.Series) -> pd.Series:
index_dict = _INDEX_DEFAULT_COLUMNS.copy()
index_dict[_LOCALITY_CODE] = regions_row[_COLUMNS.REG_TRUST_CODE.value]
index_dict[_LOCALITY_NAME] = regions_row[_COLUMNS.REG_TRUST_NAME.value]
index_dict[_KEY] = (f"{index_dict[_COUNTRY_CODE]}_"
f"{index_dict[_SUBREGION1_CODE]}_"
f"{index_dict[_LOCALITY_CODE]}")
return pd.Series(index_dict)
def _create_index_row_for_ltla(regions_row: pd.Series) -> pd.Series:
index_dict = _INDEX_DEFAULT_COLUMNS.copy()
index_dict[_LOCALITY_CODE] = regions_row[_COLUMNS.REG_LTLA_CODE.value]
index_dict[_LOCALITY_NAME] = regions_row[_COLUMNS.REG_LTLA_NAME.value]
index_dict[_KEY] = (f"{index_dict[_COUNTRY_CODE]}_"
f"{index_dict[_SUBREGION1_CODE]}_"
f"{index_dict[_LOCALITY_CODE]}")
return pd.Series(index_dict)
def _create_index_row_for_ccg(regions_row: pd.Series) -> pd.Series:
index_dict = _INDEX_DEFAULT_COLUMNS.copy()
index_dict[_LOCALITY_CODE] = regions_row[_COLUMNS.REG_CCG_CODE.value]
index_dict[_LOCALITY_NAME] = regions_row[_COLUMNS.REG_CCG_NAME.value]
index_dict[_KEY] = (f"{index_dict[_COUNTRY_CODE]}_"
f"{index_dict[_SUBREGION1_CODE]}_"
f"{index_dict[_LOCALITY_CODE]}")
return pd.Series(index_dict)
def _create_region_index(
df: pd.DataFrame,
data_type: constants.DataTypes) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Creates a region index dataframe from an input dataframe."""
if data_type == constants.DataTypes.DAILY_DEATHS:
create_index = _create_index_row_for_trust
elif data_type == constants.DataTypes.DAILY_CASES:
create_index = _create_index_row_for_ltla
elif data_type == constants.DataTypes.ONLINE_111_AND_CALLS_111_999:
create_index = _create_index_row_for_ccg
elif data_type == constants.DataTypes.POPULATION:
create_index = _create_index_row_for_ccg
else:
raise ValueError(f"Unknown data_type: '{data_type}'")
reg_columns = [
col for col in df.columns if col.startswith(constants.REGION_PREFIX)
]
index_rows = []
key_mapping = {}
for _, regions_row in df[reg_columns].drop_duplicates().iterrows():
index_row = create_index(regions_row)
index_rows.append(index_row)
key_mapping[tuple(regions_row)] = index_row[_KEY]
index = pd.DataFrame(index_rows)
# Create a new column for the input df which contains the generated keys.
key_column = df[reg_columns].apply(
lambda row: key_mapping[tuple(row)], axis=1)
# Create a modified df which replaces the region columns with the key.
keyed_df = df.copy()
key_column_iloc = 1 if _DATE in keyed_df.columns else 0
keyed_df.insert(key_column_iloc, _KEY, key_column)
keyed_df.drop(reg_columns, axis=1, inplace=True)
return index, keyed_df
def _pool_population_data_to_match_cloud(df: pd.DataFrame) -> pd.DataFrame:
"""Pool our population data columns to match Cloud's."""
pooling = {
"obs_population": r"^obs_population_[mf]_\d{2}$",
"obs_population_male": r"^obs_population_m_\d{2}$",
"obs_population_female": r"^obs_population_f_\d{2}$",
"obs_population_age_00_09": r"^obs_population_[mf]_0\d$",
"obs_population_age_10_19": r"^obs_population_[mf]_1\d$",
"obs_population_age_20_29": r"^obs_population_[mf]_2\d$",
"obs_population_age_30_39": r"^obs_population_[mf]_3\d$",
"obs_population_age_40_49": r"^obs_population_[mf]_4\d$",
"obs_population_age_50_59": r"^obs_population_[mf]_5\d$",
"obs_population_age_60_69": r"^obs_population_[mf]_6\d$",
"obs_population_age_70_79": r"^obs_population_[mf]_7\d$",
"obs_population_age_80_89": r"^obs_population_[mf]_8\d$",
"obs_population_age_90_99": r"^obs_population_[mf]_9\d$",
"obs_population_age_80_and_older": r"^obs_population_[mf]_[89]\d$"
}
pooled = {"key": df["key"]}
for cloud_col, pat in pooling.items():
columns_to_pool = [col for col in df if re.match(pat, col) is not None]
pooled[cloud_col] = df[columns_to_pool].sum(axis=1)
return | pd.DataFrame(pooled) | pandas.DataFrame |
import os
import datetime
import logging
import json
import uuid
import pandas as pd
from collections import Counter
from installed_clients.GenomeAnnotationAPIClient import GenomeAnnotationAPI
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.GenomeFileUtilClient import GenomeFileUtil
from installed_clients.WorkspaceClient import Workspace as Workspace
from installed_clients.KBaseReportClient import KBaseReport
import MergeMetabolicAnnotations.utils.utils as mu
class MergeAnnotationsUtil:
workdir = 'tmp/work/'
staging_dir = "/staging/"
datadir = "/kb/module/data/"
def __init__(self, config):
os.makedirs(self.workdir, exist_ok=True)
self.config = config
self.timestamp = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
self.callback_url = config['SDK_CALLBACK_URL']
self.scratch = config['scratch']
self.genome_api = GenomeAnnotationAPI(self.callback_url)
self.dfu = DataFileUtil(self.callback_url)
self.gfu = GenomeFileUtil(self.callback_url)
self.kbr = KBaseReport(self.callback_url)
self.ws_client = Workspace(config["workspace-url"])
self.events = {}
self.weights = {}
self.genes = {}
def get_ontology_events(self, params):
if 'ontology_events' in self.genome:
for event, ontology in enumerate(self.genome['ontology_events']):
# fix some legacy problems
if 'description' not in ontology:
ontology['description'] = ontology['method']
ontology["id"] = mu.legacy_fix(ontology["id"])
if len(params['annotations_to_merge']) == 0:
self.weights[event] = 1
self.events[event] = {}
for term in ontology:
self.events[event][term] = ontology[term]
else:
for annotations_to_merge in params['annotations_to_merge']:
if ontology['description'] in annotations_to_merge['annotation_source']:
self.events[event] = {}
self.weights[event] = annotations_to_merge['annotation_weight']
for term in ontology:
self.events[event][term] = ontology[term]
else:
logging.info("No ontology events in this genome!")
def merge_annotations(self):
merged_annotations = {}
# add gene id to summary
for feature in self.genome['features']:
gene_id = feature['id']
merged_annotations[gene_id] = {}
# get ontology term
if "ontology_terms" in feature:
for type in feature['ontology_terms']:
term_dict = feature['ontology_terms'][type]
# fix potential legacy problems after getting feature
type = mu.legacy_fix(type)
for term in term_dict:
# logging.info(term)
# logging.info(mu.standardize_annotation(term, type))
for ontology_event in term_dict[term]:
# is this ontology event in the user-selected list?
if ontology_event in self.events:
rxn = "none"
# convert terms to rxns
standardized_term = mu.standardize_annotation(term, type)
if standardized_term in self.translations[type]:
rxn = self.translations[type][standardized_term]
if rxn != "none":
if rxn in merged_annotations[gene_id]:
merged_annotations[gene_id][rxn]['events'].append(
ontology_event)
# clean up duplicates... eg old versions of prokka added many of the same reaction
merged_annotations[gene_id][rxn]['events'] = list(
set(merged_annotations[gene_id][rxn]['events']))
else:
merged_annotations[gene_id][rxn] = {'events': []}
merged_annotations[gene_id][rxn]['events'] = [
ontology_event]
return merged_annotations
def score_annotations(self, annotations, threshold, best_only):
'''
returns a pandas dataframe suitable for import annotations
'''
df = | pd.DataFrame(columns=['gene', 'term', 'events', 'score']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 14 15:41:39 2018
@author: elcok
"""
import geopandas as gpd
import pandas as pd
import os
import igraph as ig
import numpy as np
import sys
import subprocess
from shapely.geometry import Point
from geoalchemy2 import Geometry, WKTElement
from vtra.utils import load_config,extract_value_from_gdf,get_nearest_node,gdf_clip,gdf_geom_clip,count_points_in_polygon
from vtra.transport_network_creation import province_shapefile_to_network, add_igraph_time_costs_province_roads
def cropflows_edges(region_name,start_points,end_points,graph,excel_sheet,excel_writer =''):
"""
Assign net revenue to roads assets in Vietnam
Inputs are:
start_points - GeoDataFrame of start points for shortest path analysis.
end_points - GeoDataFrame of potential end points for shorest path analysis.
G - iGraph network of the province.
save_edges -
Outputs are:
Shapefile with all edges and the total net reveneu transferred along each edge
GeoDataFrame of total net revenue transferred along each edge
"""
save_paths = []
# path_index = 0
for iter_,place in start_points.iterrows():
try:
closest_center = end_points.loc[end_points['OBJECTID']
== place['NEAREST_C_CENTER']]['NEAREST_G_NODE'].values[0]
pos0_i = graph.vs[node_dict[place['NEAREST_G_NODE']]]
pos1_i = graph.vs[node_dict[closest_center]]
if pos0_i != pos1_i:
path = graph.get_shortest_paths(pos0_i,pos1_i,weights='min_cost',output="epath")
get_od_pair = (place['NEAREST_G_NODE'],closest_center)
get_path = [graph.es[n]['edge_id'] for n in path][0]
get_dist = sum([graph.es[n]['length'] for n in path][0])
get_time = sum([graph.es[n]['min_time'] for n in path][0])
get_travel_cost = sum([graph.es[n]['min_cost'] for n in path][0])
# path_index += 1
save_paths.append((str(get_od_pair),str(get_path),get_travel_cost,get_dist,get_time,place['tons']))
except:
print(iter_)
save_paths_df = pd.DataFrame(save_paths,columns = ['od_nodes','edge_path','travel_cost','distance','time','tons'])
# print (save_paths_df)
save_paths_df = save_paths_df.groupby(['od_nodes','edge_path','travel_cost','distance','time'])['tons'].sum().reset_index()
save_paths_df.to_excel(excel_writer,excel_sheet,index = False)
excel_writer.save()
# del save_paths_df
all_edges = [x['edge_id'] for x in graph.es]
# all_edges_geom = [x['geometry'] for x in G.es]
crop_tot = []
for edge in all_edges:
edge_path_index = list(set(save_paths_df.loc[save_paths_df['edge_path'].str.contains(edge)].index.tolist()))
if edge_path_index:
crop_tot.append(sum([save_paths_df.iloc[e]['tons'] for e in edge_path_index]))
else:
crop_tot.append(0)
del save_paths_df
gd_edges = pd.DataFrame(list(zip(all_edges,crop_tot)))
gd_edges.columns = ['edge_id',excel_sheet]
# gd_edges = pd.DataFrame(all_edges,all_edges_geom)
# gd_edges.columns = ['edge_id']
# gd_edges[crop_name] = 0
# flow_dataframe.loc[flow_dataframe['edge_path'].str.contains(edge)].index.tolist()
# for path in save_paths:
# gd_edges.loc[gd_edges['edge_id'].isin(path[2]),crop_name] += path[3]
# if save_edges == True:
# gdf_edges.to_file(os.path.join(output_path,'weighted_edges_district_center_flows_{}.shp'.format(region_name)))
return gd_edges
if __name__ == '__main__':
data_path,calc_path,output_path = load_config()['paths']['data'],load_config()['paths']['calc'],load_config()['paths']['output']
# provinces to consider
province_list = ['Lao Cai','Binh Dinh','Thanh Hoa']
# district_committe_names = ['district_people_committee_points_thanh_hoa.shp','district_province_peoples_committee_point_binh_dinh.shp','district_people_committee_points_lao_cai.shp']
district_committe_names = ['district_people_committee_points_lao_cai.shp','district_province_peoples_committee_point_binh_dinh.shp','district_people_committee_points_thanh_hoa.shp']
shp_output_path = os.path.join(output_path,'flow_mapping_shapefiles')
province_path = os.path.join(data_path,'Vietnam_boundaries','who_boundaries','who_provinces.shp')
provinces = gpd.read_file(province_path)
provinces = provinces.to_crs({'init': 'epsg:4326'})
crop_data_path = os.path.join(data_path,'Agriculture_crops','crop_data')
crop_names = ['rice','cash','cass','teas','maiz','rubb','swpo','acof','rcof','pepp']
for prn in range(len(province_list)):
province = province_list[prn]
# set all paths for all input files we are going to use
province_name = province.replace(' ','').lower()
edges_in = os.path.join(data_path,'Roads','{}_roads'.format(province_name),'vietbando_{}_edges.shp'.format(province_name))
nodes_in = os.path.join(data_path,'Roads','{}_roads'.format(province_name),'vietbando_{}_nodes.shp'.format(province_name))
commune_center_in = os.path.join(data_path,'Points_of_interest',district_committe_names[prn])
flow_output_excel = os.path.join(output_path,'crop_flows','{}_province_roads_crop_flow_paths.xlsx'.format(province_name))
excl_wrtr = | pd.ExcelWriter(flow_output_excel) | pandas.ExcelWriter |
import argparse
import sys
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.neighbors import BallTree
STEERING_ANGLE_RATIO = 14.7
def calculate_closed_loop_metrics(model_frames, expert_frames, fps=30, failure_rate_threshold=1.0):
lat_errors = calculate_lateral_errors(model_frames, expert_frames, True)
autonomous_frames = model_frames[model_frames.autonomous].reset_index(drop=True)
model_steering = autonomous_frames.steering_angle.to_numpy() / np.pi * 180
cmd_model_steering = autonomous_frames.cmd_steering_angle.to_numpy() / np.pi * 180
cmd_model_steering = cmd_model_steering * STEERING_ANGLE_RATIO
true_steering = expert_frames.steering_angle.to_numpy() / np.pi * 180
whiteness = calculate_whiteness(model_steering, fps)
cmd_whiteness = calculate_whiteness(cmd_model_steering, fps)
expert_whiteness = calculate_whiteness(true_steering, fps)
max = lat_errors.max()
mae = lat_errors.mean()
rmse = np.sqrt((lat_errors ** 2).mean())
failure_rate = len(lat_errors[lat_errors > failure_rate_threshold]) / float(len(lat_errors)) * 100
distance = calculate_distance(model_frames)
interventions = calculate_interventions(model_frames)
return {
'mae': mae,
'rmse': rmse,
'max': max,
'failure_rate': failure_rate,
'distance': distance,
'distance_per_intervention': distance / interventions,
'interventions': interventions,
'whiteness': whiteness,
'cmd_whiteness': cmd_whiteness,
'expert_whiteness': expert_whiteness,
}
def calculate_open_loop_metrics(predicted_steering, true_steering, fps):
predicted_degrees = predicted_steering / np.pi * 180
true_degrees = true_steering / np.pi * 180
errors = np.abs(true_degrees - predicted_degrees)
mae = errors.mean()
rmse = np.sqrt((errors ** 2).mean())
max = errors.max()
whiteness = calculate_whiteness(predicted_degrees, fps)
expert_whiteness = calculate_whiteness(true_degrees, fps)
return {
'mae': mae,
'rmse': rmse,
'max': max,
'whiteness': whiteness,
'expert_whiteness': expert_whiteness
}
def calculate_trajectory_open_loop_metrics(predicted_waypoints, true_waypoints, fps):
first_wp_error = np.hypot(predicted_waypoints[:, 0] - true_waypoints[:, 0],
predicted_waypoints[:, 1] - true_waypoints[:, 1])
first_wp_whiteness = calculate_whiteness(predicted_waypoints[:, 1], fps=fps)
first_wp_expert_whiteness = calculate_whiteness(true_waypoints[:, 1], fps=fps)
sixth_wp_error = np.hypot(predicted_waypoints[:, 10] - true_waypoints[:, 10],
predicted_waypoints[:, 11] - true_waypoints[:, 11])
sixth_wp_whiteness = calculate_whiteness(predicted_waypoints[:, 11], fps=fps)
sixth_wp_expert_whiteness = calculate_whiteness(true_waypoints[:, 11], fps=fps)
# number of predicted waypoints can be different, just take equal number of ground truth waypoints
true_waypoints = true_waypoints[:, 0:predicted_waypoints.shape[1]]
last_wp_error = np.hypot(predicted_waypoints[:, -2] - true_waypoints[:, -2],
predicted_waypoints[:, -1] - true_waypoints[:, -1])
last_wp_whiteness = calculate_whiteness(predicted_waypoints[:, -1], fps=fps)
last_wp_expert_whiteness = calculate_whiteness(true_waypoints[:, -1], fps=fps)
#zipped_waypoints = tqdm(zip(predicted_waypoints, true_waypoints), total=len(true_waypoints))
#zipped_waypoints.set_description("Calculating frechet distances")
#zipped_waypoints = zip(predicted_waypoints, true_waypoints)
#frechet_distances = np.array(
# [frdist(z[0].reshape(-1, 2), z[1].reshape(-1, 2)) for z in zipped_waypoints])
return {
'first_wp_mae': first_wp_error.mean(),
'first_wp_rmse': np.sqrt((first_wp_error ** 2).mean()),
'first_wp_max': first_wp_error.max(),
'first_wp_whiteness': first_wp_whiteness,
'first_wp_expert_whiteness': first_wp_expert_whiteness,
'sixth_wp_mae': sixth_wp_error.mean(),
'sixth_wp_whiteness': sixth_wp_whiteness,
'sixth_wp_expert_whiteness': sixth_wp_expert_whiteness,
'last_wp_mae': last_wp_error.mean(),
'last_wp_rmse': np.sqrt((last_wp_error ** 2).mean()),
'last_wp_max': last_wp_error.max(),
'last_wp_whiteness': last_wp_whiteness,
'last_wp_expert_whiteness': last_wp_expert_whiteness,
#'frechet_distance': frechet_distances.mean()
}
def calculate_whiteness(steering_angles, fps=30):
current_angles = steering_angles[:-1]
next_angles = steering_angles[1:]
delta_angles = next_angles - current_angles
whiteness = np.sqrt(((delta_angles * fps) ** 2).mean())
return whiteness
def calculate_lateral_errors(model_frames, expert_frames, only_autonomous=True):
model_trajectory_df = model_frames[["position_x", "position_y", "autonomous"]].rename(
columns={"position_x": "X", "position_y": "Y"}).dropna()
expert_trajectory_df = expert_frames[["position_x", "position_y", "autonomous"]].rename(
columns={"position_x": "X", "position_y": "Y"}).dropna()
if only_autonomous:
model_trajectory_df = model_trajectory_df[model_trajectory_df.autonomous].reset_index(drop=True)
tree = BallTree(expert_trajectory_df.values)
inds, dists = tree.query_radius(model_trajectory_df.values, r=2, sort_results=True, return_distance=True)
closest_l = []
for i, ind in enumerate(inds):
if len(ind) >= 2:
closest = pd.DataFrame({
'X1': [expert_trajectory_df.iloc[ind[0]].X],
'Y1': [expert_trajectory_df.iloc[ind[0]].Y],
'X2': [expert_trajectory_df.iloc[ind[1]].X],
'Y2': [expert_trajectory_df.iloc[ind[1]].Y]},
index=[i])
closest_l.append(closest)
closest_df = pd.concat(closest_l)
f = model_trajectory_df.join(closest_df)
lat_errors = abs((f.X2 - f.X1) * (f.Y1 - f.Y) - (f.X1 - f.X) * (f.Y2 - f.Y1)) / np.sqrt(
(f.X2 - f.X1) ** 2 + (f.Y2 - f.Y1) ** 2)
# lat_errors.dropna(inplace=True) # Why na-s?
return lat_errors
def calculate_interventions(frames):
frames['autonomous_next'] = frames.shift(-1)['autonomous']
return len(frames[frames.autonomous & (frames.autonomous_next == False)])
def calculate_distance(frames):
x1 = frames['position_x']
y1 = frames['position_y']
x2 = frames.shift(-1)['position_x']
y2 = frames.shift(-1)['position_y']
frames['distance'] = np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
return np.sum(frames[frames["autonomous"]]["distance"])
# Duplicated with read_frames_driving, should be removed
# when expert frames are re-extracted and have cmd_steering_angle column
def read_frames_expert(dataset_paths, filename):
datasets = [pd.read_csv(dataset_path / filename) for dataset_path in dataset_paths]
frames_df = pd.concat(datasets)
frames_df = frames_df[['steering_angle', 'position_x', 'position_y', 'autonomous']].dropna()
return frames_df
def read_frames_driving(dataset_paths, filename="nvidia_frames.csv"):
datasets = [pd.read_csv(dataset_path / filename) for dataset_path in dataset_paths]
frames_df = | pd.concat(datasets) | pandas.concat |
"""
Author: <NAME>
Main class for Jaal network visualization dashboard
"""
# import
import dash
import visdcc
import pandas as pd
from dash import dcc, html
# import dash_core_components as dcc
# import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.exceptions import PreventUpdate
from dash.dependencies import Input, Output, State
from .datasets.parse_dataframe import parse_dataframe
from .layout import get_app_layout, get_distinct_colors, create_color_legend, DEFAULT_COLOR, DEFAULT_NODE_SIZE, DEFAULT_EDGE_SIZE
# class
class Jaal:
"""The main visualization class
"""
def __init__(self, edge_df, node_df=None):
"""
Parameters
-------------
edge_df: pandas dataframe
The network edge data stored in format of pandas dataframe
node_df: pandas dataframe (optional)
The network node data stored in format of pandas dataframe
"""
print("Parsing the data...", end="")
self.data, self.scaling_vars = parse_dataframe(edge_df, node_df)
self.filtered_data = self.data.copy()
self.node_value_color_mapping = {}
self.edge_value_color_mapping = {}
print("Done")
def _callback_search_graph(self, graph_data, search_text):
"""Only show the nodes which match the search text
"""
nodes = graph_data['nodes']
for node in nodes:
if search_text not in node['label'].lower():
node['hidden'] = True
else:
node['hidden'] = False
graph_data['nodes'] = nodes
return graph_data
def _callback_filter_nodes(self, graph_data, filter_nodes_text):
"""Filter the nodes based on the Python query syntax
"""
self.filtered_data = self.data.copy()
node_df = pd.DataFrame(self.filtered_data['nodes'])
try:
node_list = node_df.query(filter_nodes_text)['id'].tolist()
nodes = []
for node in self.filtered_data['nodes']:
if node['id'] in node_list:
nodes.append(node)
self.filtered_data['nodes'] = nodes
graph_data = self.filtered_data
except:
graph_data = self.data
print("wrong node filter query!!")
return graph_data
def _callback_filter_edges(self, graph_data, filter_edges_text):
"""Filter the edges based on the Python query syntax
"""
self.filtered_data = self.data.copy()
edges_df = pd.DataFrame(self.filtered_data['edges'])
try:
edges_list = edges_df.query(filter_edges_text)['id'].tolist()
edges = []
for edge in self.filtered_data['edges']:
if edge['id'] in edges_list:
edges.append(edge)
self.filtered_data['edges'] = edges
graph_data = self.filtered_data
except:
graph_data = self.data
print("wrong edge filter query!!")
return graph_data
def _callback_color_nodes(self, graph_data, color_nodes_value):
value_color_mapping = {}
# color option is None, revert back all changes
if color_nodes_value == 'None':
# revert to default color
for node in self.data['nodes']:
node['color'] = DEFAULT_COLOR
else:
print("inside color node", color_nodes_value)
unique_values = | pd.DataFrame(self.data['nodes']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright 2017 <NAME> <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Python modules
import glob
import os
# Third party modules
import gpxpy
from gpxpy.gpx import GPXBounds
import pandas as pd
from pandas import DataFrame
from tqdm import tqdm
import geopy
# Own modules
from trackanimation import utils as trk_utils
from trackanimation.utils import TrackException
class DFTrack:
def __init__(self, df_points=None, columns=None):
if df_points is None:
self.df = DataFrame()
if isinstance(df_points, pd.DataFrame):
self.df = df_points
else:
if columns is None:
columns = ['CodeRoute', 'Latitude', 'Longitude', 'Altitude', 'Date',
'Speed', 'TimeDifference', 'Distance', 'FileName']
self.df = DataFrame(df_points, columns=columns)
def export(self, filename='exported_file', export_format='csv'):
"""
Export a data frame of DFTrack to JSON or CSV.
Parameters
----------
export_format: string
Format to export: JSON or CSV
filename: string
Name of the exported file
"""
if export_format.lower() == 'json':
self.df.reset_index().to_json(orient='records', path_or_buf=filename+'.json')
elif export_format.lower() == 'csv':
self.df.to_csv(path_or_buf=filename+'.csv')
else:
raise TrackException('Must specify a valid format to export', "'%s'" % export_format)
def getTracks(self):
"""
Makes a copy of the DFTrack.
Explanation:
http://stackoverflow.com/questions/27673231/why-should-i-make-a-copy-of-a-data-frame-in-pandas
Returns
-------
copy: DFTrack
The copy of DFTrack.
"""
return self.__class__(self.df.copy(), list(self.df))
def sort(self, column_name):
"""
Sorts the data frame by the specified column.
Parameters
----------
column_name: string
Column name to sort
Returns
-------
sort: DFTrack
DFTrack sorted
"""
if isinstance(column_name, list):
for column in column_name:
if column not in self.df:
raise TrackException('Column name not found', "'%s'" % column)
else:
if column_name not in self.df:
raise TrackException('Column name not found', "'%s'" % column_name)
return self.__class__(self.df.sort_values(column_name), list(self.df))
def getTracksByPlace(self, place, timeout=10, only_points=True):
"""
Gets the points of the specified place searching in Google's API
and, if it does not get anything, it tries with OpenStreetMap's API.
Parameters
----------
place: string
Place to get the points
timeout: int
Time, in seconds, to wait for the geocoding service to respond
before returning a None value.
only_points: boolean
True to retrieve only the points that cross a place. False to
retrive all the points of the tracks that cross a place.
Returns
-------
place: DFTrack
A DFTrack with the points of the specified place or
None if anything is found.
"""
track_place = self.getTracksByPlaceGoogle(place, timeout=timeout, only_points=only_points)
if track_place is not None:
return track_place
track_place = self.getTracksByPlaceOSM(place, timeout=timeout, only_points=only_points)
if track_place is not None:
return track_place
return None
def getTracksByPlaceGoogle(self, place, timeout=10, only_points=True):
"""
Gets the points of the specified place searching in Google's API.
Parameters
----------
place: string
Place to get the points
timeout: int
Time, in seconds, to wait for the geocoding service to respond
before returning a None value.
only_points: boolean
True to retrieve only the points that cross a place. False to
retrive all the points of the tracks that cross a place.
Returns
-------
place: DFTrack
A DFTrack with the points of the specified place or
None if anything is found.
"""
try:
geolocator = geopy.GoogleV3()
location = geolocator.geocode(place, timeout=timeout)
except geopy.exc.GeopyError as geo_error:
return None
southwest_lat = float(location.raw['geometry']['bounds']['southwest']['lat'])
northeast_lat = float(location.raw['geometry']['bounds']['northeast']['lat'])
southwest_lng = float(location.raw['geometry']['bounds']['southwest']['lng'])
northeast_lng = float(location.raw['geometry']['bounds']['northeast']['lng'])
df_place = self.df[(self.df['Latitude'] < northeast_lat) & (self.df['Longitude'] < northeast_lng) &
(self.df['Latitude'] > southwest_lat) & (self.df['Longitude'] > southwest_lng)]
if only_points:
return self.__class__(df_place)
track_list = df_place['CodeRoute'].unique().tolist()
return self.__class__(self.df[self.df['CodeRoute'].isin(track_list)])
def getTracksByPlaceOSM(self, place, timeout=10, only_points=True):
"""
Gets the points of the specified place searching in OpenStreetMap's API.
Parameters
----------
place: string
Place to get the points
timeout: int
Time, in seconds, to wait for the geocoding service to respond
before returning a None value.
only_points: boolean
True to retrieve only the points that cross a place. False to
retrive all the points of the tracks that cross a place.
Returns
-------
place: DFTrack
A DFTrack with the points of the specified place or
None if anything is found.
"""
try:
geolocator = geopy.Nominatim()
location = geolocator.geocode(place, timeout=timeout)
except geopy.exc.GeopyError as geo_error:
return None
southwest_lat = float(location.raw['boundingbox'][0])
northeast_lat = float(location.raw['boundingbox'][1])
southwest_lng = float(location.raw['boundingbox'][2])
northeast_lng = float(location.raw['boundingbox'][3])
df_place = self.df[(self.df['Latitude'] < northeast_lat) & (self.df['Longitude'] < northeast_lng) &
(self.df['Latitude'] > southwest_lat) & (self.df['Longitude'] > southwest_lng)]
if only_points:
return self.__class__(df_place)
track_list = df_place['CodeRoute'].unique().tolist()
return self.__class__(self.df[self.df['CodeRoute'].isin(track_list)])
def getTracksByDate(self, start=None, end=None, periods=None, freq='D'):
"""
Gets the points of the specified date range
using various combinations of parameters.
2 of 'start', 'end', or 'periods' must be specified.
Date format recommended: 'yyyy-mm-dd'
Parameters
----------
start: date
Date start period
end: date
Date end period
periods: int
Number of periods. If None, must specify 'start' and 'end'
freq: string
Frequency of the date range
Returns
-------
df_date: DFTrack
A DFTrack with the points of the specified date range.
"""
if trk_utils.isTimeFormat(start) or trk_utils.isTimeFormat(end):
raise TrackException('Must specify an appropiate date format', 'Time format found')
rng = pd.date_range(start=start, end=end, periods=periods, freq=freq)
df_date = self.df.copy()
df_date['Date'] = pd.to_datetime(df_date['Date'])
df_date['ShortDate'] = df_date['Date'].apply(lambda date: date.date().strftime('%Y-%m-%d'))
df_date = df_date[df_date['ShortDate'].apply(lambda date: date in rng)]
del df_date['ShortDate']
df_date = df_date.reset_index(drop=True)
return self.__class__(df_date, list(df_date))
def getTracksByTime(self, start, end, include_start=True, include_end=True):
"""
Gets the points between the specified time range.
Parameters
----------
start: datetime.time
Time start period
end: datetime.time
Time end period
include_start: boolean
include_end: boolean
Returns
-------
df_time: DFTrack
A DFTrack with the points of the specified date and time periods.
"""
if not trk_utils.isTimeFormat(start) or not trk_utils.isTimeFormat(end):
raise TrackException('Must specify an appropiate time format', trk_utils.TIME_FORMATS)
df_time = self.df.copy()
index = pd.DatetimeIndex(df_time['Date'])
df_time = df_time.iloc[index.indexer_between_time(start_time=start, end_time=end, include_start=include_start, include_end=include_end)]
df_time = df_time.reset_index(drop=True)
return self.__class__(df_time, list(df_time))
def pointVideoNormalize(self):
df = self.df.copy()
df_norm = pd.DataFrame()
group_size = df.groupby('CodeRoute').size()
max_value = group_size.max()
name_max_value = group_size.idxmax()
grouped = df['CodeRoute'].unique()
for name in tqdm(grouped, desc='Groups'):
df_slice = df[df['CodeRoute'] == name]
df_slice = df_slice.reset_index(drop=True)
div = int(max_value / len(df_slice)) + 1
df_index = DataFrame(df_slice.index)
df_slice['VideoFrame'] = df_index.apply(lambda x: x + 1 if name_max_value == name else x * div)
df_norm = pd.concat([df_norm, df_slice])
df_norm = df_norm.reset_index(drop=True)
return self.__class__(df_norm, list(df_norm))
def timeVideoNormalize(self, time, framerate=5):
df = self.df.copy()
if time == 0:
df['VideoFrame'] = 0
df = df.reset_index(drop=True)
return self.__class__(df, list(df))
n_fps = time*framerate
df = df.sort_values('Date')
df_cum = trk_utils.calculateCumTimeDiff(df)
grouped = df_cum['CodeRoute'].unique()
df_norm = pd.DataFrame()
point_idx = 1
for name in tqdm(grouped, desc='Groups'):
df_slice = df_cum[df_cum['CodeRoute'] == name]
time_diff = float((df_slice[['TimeDifference']].sum() / time) / framerate) # Track duration divided by time and framerate
df_range = df_slice[df_slice['CumTimeDiff'] == 0]
df_range = df_range.reset_index(drop=True)
df_range['VideoFrame'] = 0
df_norm = | pd.concat([df_norm, df_range]) | pandas.concat |
import datetime
import json
import pathlib
import numpy as np
import pandas as pd
def downsample(df, offset):
"""Reduce dataframe by resampling according to frequency offset/rule
Parameters
----------
df : pandas.core.frame.DataFrame
A pandas dataframe where the index is the date.
offset : str
offset rule to apply for downsampling
see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
Returns
-------
pandas.core.frame.DataFrame
A pandas dataframe that is downsampled
"""
# convert index to DateIndex
df.index = pd.to_datetime(df.index)
# downsample based on offset
resampled = df.resample(offset).asfreq()
# remove dates for which no data is available
resampled.dropna(how='all', inplace=True)
# add last date if it is not present
if df.iloc[-1].name not in resampled.index:
resampled = | pd.concat([resampled, df.iloc[[-1]]]) | pandas.concat |
'''
Collect computational performance from a collection of GNU time reports.
Usage:
```
python collect_perf.py -a bt2_all.time_log -l lift.time_log -l collate.time_log \
-l to_fastq.time_log -l aln_paired.time_log -l aln_unpaired.time_log \
-l merge.time_log -l sort_all.time_log
```
<NAME>
Johns Hopkins University
2021-2022
'''
import argparse
import os
import pandas as pd
import sys
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-a', '--aln-log',
help='Paths to the GNU time log for full alignment.')
parser.add_argument(
'-an', '--aln-name', default='aln',
help='Label of the full alignment experiment [aln].')
parser.add_argument(
'-l', '--leviosam-logs',
action='append', required=True,
help='Paths to GNU time logs for the levioSAM pipeline.')
parser.add_argument(
'-ln', '--leviosam-name', default='leviosam',
help='Label of the levioSAM experiment [leviosam].')
parser.add_argument(
'-ll', '--labels', default='',
help=('Customized labels for each `-l` items, separated by commas.'
'Length should be equal to number of `-l`'))
parser.add_argument(
'-o', '--output',
help='Path to the output TSV file.')
args = parser.parse_args()
return args
def collect_perf_core(f, ls_perf) -> None:
for line in f:
line = line.rstrip()
if line.count('Command being timed:') > 0:
cmd = line.split('"')[1].split()
program = cmd[0].split('/')[-1]
task = program + '_' + cmd[1]
elif line.count('User time (seconds):') > 0:
usr_time = float(line.split('):')[1])
elif line.count('System time (seconds):') > 0:
sys_time = float(line.split('):')[1])
elif line.count('Elapsed (wall clock) time (h:mm:ss or m:ss):') > 0:
wt = line.split('):')[1].split(':')
if len(wt) == 3:
wall_time = 60 * 60 * float(wt[0]) + 60 * float(wt[1]) + float(wt[2])
elif len(wt) == 2:
wall_time = 60 * float(wt[0]) + float(wt[1])
else:
print('error - invalid wall time format', file=sys.stderr)
exit(1)
elif line.count('Maximum resident set size (kbytes):') > 0:
max_rss = int(line.split('):')[1])
cpu_time = usr_time + sys_time
ls_perf.append([
task, usr_time, sys_time,
cpu_time, wall_time, max_rss])
return
def collect_perf_list(logs_list, cols):
print(logs_list, file=sys.stderr)
ls_perf = []
for log in logs_list:
f = open(log, 'r')
# task = os.path.basename(log).split('.')[0]
collect_perf_core(f, ls_perf)
f.close()
df = pd.DataFrame(ls_perf, columns=cols)
return df
def summarize_df(df, cols):
l_sum = ['summary']
for i in range(1, 5):
l_sum.append(sum(df.iloc[:, i]))
l_sum.append(max(df.iloc[:, 5]))
df_sum = | pd.DataFrame([l_sum], columns=cols) | pandas.DataFrame |
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve
from sklearn.utils import resample
from keras.utils import to_categorical
from sklearn.cross_validation import KFold
from keras.models import Sequential
from keras.layers.core import Dense, Dropout
from keras.callbacks import EarlyStopping
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Functions
def find_optimal_cutoff( target, predicted ):
fpr, tpr, threshold = roc_curve(target, predicted)
i = np.arange( len( tpr ) )
roc=pd.DataFrame({'tf':pd.Series(tpr-(1-fpr), index=i), 'threshold':pd.Series(threshold, index=i)})
roc_t = roc.ix[(roc.tf-0).abs().argsort()[:1]]
return list(roc_t['threshold'])
# Load dataset
print( '===> loading dataset' )
data = pd.read_csv( '~/repos/dataset/HR.csv' )
dataset = data.rename( columns = {'left':'class'} )
# Unbalanced dataset
# Upsampling
minority = dataset[ dataset['class'] == 1 ]
minority_upsampled = resample( minority, replace = True, n_samples = 11428, random_state = 123 )
# Downsampling
majority = dataset[ dataset['class'] == 0 ]
majority_downsampled = resample( majority, replace = False, n_samples = 3571, random_state = 123 )
dataset = pd.concat( [minority, majority_downsampled] )
# Transform features
dataset = pd.get_dummies( dataset, columns = ['sales', 'salary'] )
# Selection features
features = dataset.drop( 'class', axis = 1 )
labels = dataset[['class']]
rf = RandomForestClassifier( n_estimators = 100, criterion = 'entropy', max_depth = 15,
min_samples_leaf = 50, min_samples_split = 100, random_state = 10 )
# Train the selector
rf.fit( features, labels.values.ravel() )
features_imp = | pd.Series(rf.feature_importances_,index=features.columns) | pandas.Series |
import numpy as np
import pandas as pd
import warnings
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
warnings.filterwarnings('ignore')
columns_name=["user_id","item_id","rating","timestamp"]
df= | pd.read_csv("ml-100k/u.data",sep='\t',names=columns_name) | pandas.read_csv |
from binance.client import Client
import keys
from pandas import DataFrame as df
from datetime import datetime
import trading_key
client=Client(api_key=keys.Pkeys, api_secret=keys.Skeys)
#get candle data
def candle_data(symbols, intervals):
candles=client.get_klines(symbol=symbols, interval=intervals)
#create (date) dataframe
candles_data_frame= | df(candles) | pandas.DataFrame |
import pandas as pd
import numpy as np
import click
import h5py
import os
import logging
from array import array
from copy import deepcopy
from tqdm import tqdm
from astropy.io import fits
from fact.credentials import create_factdb_engine
from zfits import FactFits
from scipy.optimize import curve_fit
from joblib import Parallel, delayed
import drs4Calibration.config as config
from drs4Calibration.constants import NRCHID, NRCELL, NRTEMPSENSOR, ROI, ADCCOUNTSTOMILIVOLT
from drs4Calibration.tools import safety_stuff
import matplotlib.pyplot as plt
from time import time
def print_delta_time(time, string=""):
hours = int(time / 3600)
rest = time % 3600
minutes = int(rest / 60)
seconds = round(rest % 60, 2)
print(string+" deltaTime: ", hours, minutes, seconds)
@click.command()
@click.argument('drs_file_list_doc_path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/drsFitsFiles.txt",
type=click.Path(exists=False))
def search_drs_fits_files(drs_file_list_doc_path: str):
'''
Search through the fact-database and store the path of all drsFiles
under the given storePath
Args:
drs_file_list_doc_path (str):
Full path to the storeFile
with the extension '.txt'
'''
# TODO check safety stuff. maybe remove
#safety_stuff(drs_file_list_doc_path)
def filename(row):
return os.path.join(
str(row.date.year),
"{:02d}".format(row.date.month),
"{:02d}".format(row.date.day),
"{}_{:03d}.fits.fz".format(row.fNight, row.fRunID),
)
# 40drs4320Bias
drs_infos = pd.read_sql(
"RunInfo",
create_factdb_engine(),
columns=[
"fNight", "fRunID",
"fRunTypeKey", "fDrsStep",
"fNumEvents"])
drs_file_infos = drs_infos.query("fRunTypeKey == 2 &" +
"fDrsStep == 2 &" +
"fNumEvents == 1000").copy()
# fNumEvents == 1000 prevent for unfinished/broken files
drs_file_infos["date"] = pd.to_datetime(drs_file_infos.fNight.astype(str),
format="%Y%m%d")
drs_files = drs_file_infos.apply(filename, axis=1).tolist()
pd.DataFrame(drs_files).to_csv(drs_file_list_doc_path, index=False,
header=False)
@click.command()
@click.argument('drs_file_list_doc_path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/selectedDrsFitsFiles.txt",
type=click.Path(exists=True))
@click.argument('store_file_path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/newBaseline_timeTest.h5",
type=click.Path(exists=False))
@click.argument('source_folder_path',
default="/net/big-tank/POOL/projects/fact/drs4_calibration_data/",
type=click.Path(exists=False))
def store_drs_values(drs_file_list_doc_path, store_file_path, source_folder_path):
with h5py.File(store_file_path, 'w') as hf:
hf.create_dataset(
name="Time", dtype="float32",
shape=(0, 1), maxshape=(None, 1),
compression="gzip", compression_opts=9,
fletcher32=True)
hf.create_dataset(
name="Temperature", dtype="float32",
shape=(0, NRTEMPSENSOR), maxshape=(None, NRTEMPSENSOR),
compression="gzip", compression_opts=9,
fletcher32=True)
hf.create_dataset(
name="NewBaseline", dtype="float32",
shape=(0, NRCHID*NRCELL*ROI), maxshape=(None, NRCHID*NRCELL*ROI),
compression="gzip", compression_opts=9,
fletcher32=True)
class SourceDataSet:
# @resettable
run_begin = | pd.to_datetime("") | pandas.to_datetime |
from __future__ import division
import pytest
import numpy as np
from pandas import (Interval, IntervalIndex, Index, isna,
interval_range, Timestamp, Timedelta,
compat)
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self):
return IntervalIndex.from_breaks(np.arange(10))
def test_constructors(self):
expected = self.index
actual = IntervalIndex.from_breaks(np.arange(3), closed='right')
assert expected.equals(actual)
alternate = IntervalIndex.from_breaks(np.arange(3), closed='left')
assert not expected.equals(alternate)
actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1,
closed='right')
assert expected.equals(actual)
actual = Index([Interval(0, 1), Interval(1, 2)])
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
actual = Index(expected)
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
def test_constructors_other(self):
# all-nan
result = IntervalIndex.from_intervals([np.nan])
expected = np.array([np.nan], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
# empty
result = IntervalIndex.from_intervals([])
expected = np.array([], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
def test_constructors_errors(self):
# scalar
with pytest.raises(TypeError):
IntervalIndex(5)
# not an interval
with pytest.raises(TypeError):
IntervalIndex([0, 1])
with pytest.raises(TypeError):
IntervalIndex.from_intervals([0, 1])
# invalid closed
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed
with pytest.raises(ValueError):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 10], [3, 5])
with pytest.raises(ValueError):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# no point in nesting periods in an IntervalIndex
with pytest.raises(ValueError):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
def test_constructors_datetimelike(self):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = | IntervalIndex.from_breaks(idx) | pandas.IntervalIndex.from_breaks |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Import OptionMetrics data.
"""
from __future__ import print_function, division
import os
import zipfile
import numpy as np
import pandas as pd
import datetime as dt
from scipy.interpolate import interp1d
from impvol import lfmoneyness, delta, vega
from datastorage.quandlweb import load_spx
path = os.getenv("HOME") + '/Dropbox/Research/data/OptionMetrics/data/'
# __location__ = os.path.realpath(os.path.join(os.getcwd(),
# os.path.dirname(__file__)))
# path = os.path.join(__location__, path + 'OptionMetrics/data/')
def convert_dates(string):
return dt.datetime.strptime(string, '%d-%m-%Y')
def import_dividends():
"""Import dividends.
"""
zf = zipfile.ZipFile(path + 'SPX_dividend.zip', 'r')
name = zf.namelist()[0]
dividends = pd.read_csv(zf.open(name), converters={'date': convert_dates})
dividends.set_index('date', inplace=True)
dividends.sort_index(inplace=True)
print(dividends.head())
dividends.to_hdf(path + 'dividends.h5', 'dividends')
def import_yield_curve():
"""Import zero yield curve.
"""
zf = zipfile.ZipFile(path + 'yield_curve.zip', 'r')
name = zf.namelist()[0]
yields = pd.read_csv(zf.open(name), converters={'date': convert_dates})
# Remove weird observations
yields = yields[yields['rate'] < 10]
# Fill in the blanks in the yield curve
# yields = interpolate_curve(yields)
yields.rename(columns={'rate': 'riskfree'}, inplace=True)
yields.set_index(['date', 'days'], inplace=True)
yields.sort_index(inplace=True)
print(yields.head())
yields.to_hdf(path + 'yields.h5', 'yields')
def interpolate_curve_group(group):
"""Interpolate yields for one day.
"""
y = np.array(group.riskfree)
x = np.array(group.days)
a = group.days.min()
b = group.days.max()
new_x = np.linspace(a, b, b-a+1).astype(int)
try:
new_y = interp1d(x, y, kind='cubic')(new_x)
except:
new_y = interp1d(x, y, kind='linear')(new_x)
group = pd.DataFrame(new_y, index=pd.Index(new_x, name='days'))
return group
def interpolate_curve(yields):
"""Fill in the blanks in the yield curve.
"""
yields.reset_index(inplace=True)
yields = yields.groupby('date').apply(interpolate_curve_group)
yields = yields.unstack('days')
yields.fillna(method='ffill', axis=1, inplace=True)
yields.fillna(method='bfill', axis=1, inplace=True)
yields = yields.stack('days')
yields.rename(columns={0: 'riskfree'}, inplace=True)
return yields
def import_riskfree():
"""Take the last value of the yield curve as a risk-free rate.
Saves annualized rate in percentage points.
"""
yields = load_yields()
riskfree = yields.groupby(level='date').last()
print(riskfree.head())
riskfree.to_hdf(path + 'riskfree.h5', 'riskfree')
def import_standard_options():
"""Import standardized options.
"""
zf = zipfile.ZipFile(path + 'SPX_standard_options.zip', 'r')
name = zf.namelist()[0]
data = pd.read_csv(zf.open(name), converters={'date': convert_dates})
cols = {'forward_price': 'forward', 'impl_volatility': 'imp_vol'}
data.rename(columns=cols, inplace=True)
data = data.set_index(['cp_flag', 'date', 'days']).sort_index()
print(data.head())
data.to_hdf(path + 'std_options.h5', 'std_options')
def import_vol_surface():
"""Import volatility surface.
Infer risk-free rate directly from data.
"""
zf = zipfile.ZipFile(path + 'SPX_surface.zip', 'r')
name = zf.namelist()[0]
df = pd.read_csv(zf.open(name), converters={'date': convert_dates})
df.loc[:, 'weekday'] = df['date'].apply(lambda x: x.weekday())
# Apply some filters
df = df[df['weekday'] == 2]
df = df[df['days'] <= 365]
df = df.drop('weekday', axis=1)
surface = df#.set_index(['cp_flag', 'date', 'days']).sort_index()
cols = {'impl_volatility': 'imp_vol', 'impl_strike': 'strike',
'impl_premium': 'premium'}
surface.rename(columns=cols, inplace=True)
# TODO : who term structure should be imported and merged!
riskfree = load_riskfree().reset_index()
dividends = load_dividends().reset_index()
spx = load_spx().reset_index()
surface = pd.merge(surface, riskfree)
surface = pd.merge(surface, spx)
surface = pd.merge(surface, dividends)
# Adjust riskfree by dividend yield
surface['riskfree'] -= surface['rate']
# Remove percentage point
surface['riskfree'] /= 100
# Replace 'cp_flag' with True/False 'call' variable
surface.loc[:, 'call'] = True
surface.loc[surface['cp_flag'] == 'P', 'call'] = False
# Normalize maturity to being a share of the year
surface['maturity'] = surface['days'] / 365
# Rename columns
surface.rename(columns={'spx': 'price'}, inplace=True)
# Compute lf-moneyness
surface['moneyness'] = lfmoneyness(surface['price'], surface['strike'],
surface['riskfree'],
surface['maturity'])
# Compute option Delta normalized by current price
surface['delta'] = delta(surface['moneyness'], surface['maturity'],
surface['imp_vol'], surface['call'])
# Compute option Vega normalized by current price
surface['vega'] = vega(surface['moneyness'], surface['maturity'],
surface['imp_vol'])
# Sort index
surface.sort_index(by=['date', 'maturity', 'moneyness'], inplace=True)
print(surface.head())
surface.to_hdf(path + 'surface.h5', 'surface')
def import_vol_surface_simple():
"""Import volatility surface. Simple version.
"""
zf = zipfile.ZipFile(path + 'SPX_surface.zip', 'r')
name = zf.namelist()[0]
df = pd.read_csv(zf.open(name), converters={'date': convert_dates})
df.loc[:, 'weekday'] = df['date'].apply(lambda x: x.weekday())
# Apply some filters
df = df[df['weekday'] == 2]
df = df[df['days'] <= 365]
surface = df.drop('weekday', axis=1)
cols = {'impl_volatility': 'imp_vol', 'impl_strike': 'strike',
'impl_premium': 'premium'}
surface.rename(columns=cols, inplace=True)
spx = load_spx().reset_index()
standard_options = load_standard_options()[['forward']].reset_index()
surface = pd.merge(surface, standard_options)
surface = pd.merge(surface, spx)
# Normalize maturity to being a share of the year
surface['maturity'] = surface['days'] / 365
surface['riskfree'] = np.log(surface['forward'] / surface['spx'])
surface['riskfree'] /= surface['maturity']
# Remove percentage point
# Replace 'cp_flag' with True/False 'call' variable
surface.loc[:, 'call'] = True
surface.loc[surface['cp_flag'] == 'P', 'call'] = False
# Rename columns
surface.rename(columns={'spx': 'price'}, inplace=True)
# Compute lf-moneyness
surface['moneyness'] = lfmoneyness(surface['price'], surface['strike'],
surface['riskfree'],
surface['maturity'])
# Compute option Delta normalized by current price
surface['delta'] = delta(surface['moneyness'], surface['maturity'],
surface['imp_vol'], surface['call'])
# Compute option Vega normalized by current price
surface['vega'] = vega(surface['moneyness'], surface['maturity'],
surface['imp_vol'])
# Take out-of-the-money options
calls = surface['call'] & (surface['moneyness'] >= 0)
puts = np.logical_not(surface['call']) & (surface['moneyness'] < 0)
surface = pd.concat([surface[calls], surface[puts]])
# Sort index
surface.sort_index(by=['date', 'maturity', 'moneyness'], inplace=True)
print(surface.head())
surface.to_hdf(path + 'surface.h5', 'surface')
def load_dividends():
"""Load dividends from the disk (annualized, percentage points).
Typical output:
rate
date
1996-01-04 2.460
1996-01-05 2.492
1996-01-08 2.612
1996-01-09 2.455
1996-01-10 2.511
"""
return pd.read_hdf(path + 'dividends.h5', 'dividends')
def load_yields():
"""Load zero yield curve from the disk (annualized, percentage points).
Typical output:
riskfree
date days
1996-01-02 9 5.763
15 5.746
50 5.673
78 5.609
169 5.474
"""
return pd.read_hdf(path + 'yields.h5', 'yields')
def load_riskfree():
"""Load risk-free rate (annualized, percentage points).
Returns
-------
DataFrame
Annualized rate in percentage points
Typical output:
riskfree
date
1996-01-02 6.138
1996-01-03 6.125
1996-01-04 6.142
1996-01-05 6.219
1996-01-08 6.220
"""
return | pd.read_hdf(path + 'riskfree.h5', 'riskfree') | pandas.read_hdf |
import os
import sys
import requests
import numpy as np
import pandas as pd
import kauffman.constants as c
from kauffman.tools._etl import county_msa_cross_walk as cw
# https://www.census.gov/programs-surveys/popest.html
| pd.set_option('max_columns', 1000) | pandas.set_option |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.