repo
stringlengths 7
55
| path
stringlengths 4
223
| func_name
stringlengths 1
134
| original_string
stringlengths 75
104k
| language
stringclasses 1
value | code
stringlengths 75
104k
| code_tokens
listlengths 19
28.4k
| docstring
stringlengths 1
46.9k
| docstring_tokens
listlengths 1
1.97k
| sha
stringlengths 40
40
| url
stringlengths 87
315
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
hammerlab/cohorts
|
cohorts/cohort.py
|
Cohort._load_single_patient_kallisto
|
def _load_single_patient_kallisto(self, patient):
"""
Load Kallisto gene quantification given a patient
Parameters
----------
patient : Patient
Returns
-------
data: Pandas dataframe
Pandas dataframe of sample's Kallisto data
columns include patient_id, target_id, length, eff_length, est_counts, tpm
"""
data = pd.read_csv(patient.tumor_sample.kallisto_path, sep="\t")
data["patient_id"] = patient.id
return data
|
python
|
def _load_single_patient_kallisto(self, patient):
"""
Load Kallisto gene quantification given a patient
Parameters
----------
patient : Patient
Returns
-------
data: Pandas dataframe
Pandas dataframe of sample's Kallisto data
columns include patient_id, target_id, length, eff_length, est_counts, tpm
"""
data = pd.read_csv(patient.tumor_sample.kallisto_path, sep="\t")
data["patient_id"] = patient.id
return data
|
[
"def",
"_load_single_patient_kallisto",
"(",
"self",
",",
"patient",
")",
":",
"data",
"=",
"pd",
".",
"read_csv",
"(",
"patient",
".",
"tumor_sample",
".",
"kallisto_path",
",",
"sep",
"=",
"\"\\t\"",
")",
"data",
"[",
"\"patient_id\"",
"]",
"=",
"patient",
".",
"id",
"return",
"data"
] |
Load Kallisto gene quantification given a patient
Parameters
----------
patient : Patient
Returns
-------
data: Pandas dataframe
Pandas dataframe of sample's Kallisto data
columns include patient_id, target_id, length, eff_length, est_counts, tpm
|
[
"Load",
"Kallisto",
"gene",
"quantification",
"given",
"a",
"patient"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L897-L913
|
train
|
hammerlab/cohorts
|
cohorts/cohort.py
|
Cohort.load_cufflinks
|
def load_cufflinks(self, filter_ok=True):
"""
Load a Cufflinks gene expression data for a cohort
Parameters
----------
filter_ok : bool, optional
If true, filter Cufflinks data to row with FPKM_status == "OK"
Returns
-------
cufflinks_data : Pandas dataframe
Pandas dataframe with Cufflinks data for all patients
columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi
"""
return \
pd.concat(
[self._load_single_patient_cufflinks(patient, filter_ok) for patient in self],
copy=False
)
|
python
|
def load_cufflinks(self, filter_ok=True):
"""
Load a Cufflinks gene expression data for a cohort
Parameters
----------
filter_ok : bool, optional
If true, filter Cufflinks data to row with FPKM_status == "OK"
Returns
-------
cufflinks_data : Pandas dataframe
Pandas dataframe with Cufflinks data for all patients
columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi
"""
return \
pd.concat(
[self._load_single_patient_cufflinks(patient, filter_ok) for patient in self],
copy=False
)
|
[
"def",
"load_cufflinks",
"(",
"self",
",",
"filter_ok",
"=",
"True",
")",
":",
"return",
"pd",
".",
"concat",
"(",
"[",
"self",
".",
"_load_single_patient_cufflinks",
"(",
"patient",
",",
"filter_ok",
")",
"for",
"patient",
"in",
"self",
"]",
",",
"copy",
"=",
"False",
")"
] |
Load a Cufflinks gene expression data for a cohort
Parameters
----------
filter_ok : bool, optional
If true, filter Cufflinks data to row with FPKM_status == "OK"
Returns
-------
cufflinks_data : Pandas dataframe
Pandas dataframe with Cufflinks data for all patients
columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi
|
[
"Load",
"a",
"Cufflinks",
"gene",
"expression",
"data",
"for",
"a",
"cohort"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L915-L934
|
train
|
hammerlab/cohorts
|
cohorts/cohort.py
|
Cohort._load_single_patient_cufflinks
|
def _load_single_patient_cufflinks(self, patient, filter_ok):
"""
Load Cufflinks gene quantification given a patient
Parameters
----------
patient : Patient
filter_ok : bool, optional
If true, filter Cufflinks data to row with FPKM_status == "OK"
Returns
-------
data: Pandas dataframe
Pandas dataframe of sample's Cufflinks data
columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi
"""
data = pd.read_csv(patient.tumor_sample.cufflinks_path, sep="\t")
data["patient_id"] = patient.id
if filter_ok:
# Filter to OK FPKM counts
data = data[data["FPKM_status"] == "OK"]
return data
|
python
|
def _load_single_patient_cufflinks(self, patient, filter_ok):
"""
Load Cufflinks gene quantification given a patient
Parameters
----------
patient : Patient
filter_ok : bool, optional
If true, filter Cufflinks data to row with FPKM_status == "OK"
Returns
-------
data: Pandas dataframe
Pandas dataframe of sample's Cufflinks data
columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi
"""
data = pd.read_csv(patient.tumor_sample.cufflinks_path, sep="\t")
data["patient_id"] = patient.id
if filter_ok:
# Filter to OK FPKM counts
data = data[data["FPKM_status"] == "OK"]
return data
|
[
"def",
"_load_single_patient_cufflinks",
"(",
"self",
",",
"patient",
",",
"filter_ok",
")",
":",
"data",
"=",
"pd",
".",
"read_csv",
"(",
"patient",
".",
"tumor_sample",
".",
"cufflinks_path",
",",
"sep",
"=",
"\"\\t\"",
")",
"data",
"[",
"\"patient_id\"",
"]",
"=",
"patient",
".",
"id",
"if",
"filter_ok",
":",
"# Filter to OK FPKM counts",
"data",
"=",
"data",
"[",
"data",
"[",
"\"FPKM_status\"",
"]",
"==",
"\"OK\"",
"]",
"return",
"data"
] |
Load Cufflinks gene quantification given a patient
Parameters
----------
patient : Patient
filter_ok : bool, optional
If true, filter Cufflinks data to row with FPKM_status == "OK"
Returns
-------
data: Pandas dataframe
Pandas dataframe of sample's Cufflinks data
columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi
|
[
"Load",
"Cufflinks",
"gene",
"quantification",
"given",
"a",
"patient"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L936-L958
|
train
|
hammerlab/cohorts
|
cohorts/cohort.py
|
Cohort.get_filtered_isovar_epitopes
|
def get_filtered_isovar_epitopes(self, epitopes, ic50_cutoff):
"""
Mostly replicates topiary.build_epitope_collection_from_binding_predictions
Note: topiary needs to do fancy stuff like subsequence_protein_offset + binding_prediction.offset
in order to figure out whether a variant is in the peptide because it only has the variant's
offset into the full protein; but isovar gives us the variant's offset into the protein subsequence
(dictated by protein_sequence_length); so all we need to do is map that onto the smaller 8-11mer
peptides generated by mhctools.
"""
mutant_binding_predictions = []
for binding_prediction in epitopes:
peptide = binding_prediction.peptide
peptide_offset = binding_prediction.offset
isovar_row = dict(binding_prediction.source_sequence_key)
is_mutant = contains_mutant_residues(
peptide_start_in_protein=peptide_offset,
peptide_length=len(peptide),
mutation_start_in_protein=isovar_row["variant_aa_interval_start"],
mutation_end_in_protein=isovar_row["variant_aa_interval_end"])
if is_mutant and binding_prediction.value <= ic50_cutoff:
mutant_binding_predictions.append(binding_prediction)
return EpitopeCollection(mutant_binding_predictions)
|
python
|
def get_filtered_isovar_epitopes(self, epitopes, ic50_cutoff):
"""
Mostly replicates topiary.build_epitope_collection_from_binding_predictions
Note: topiary needs to do fancy stuff like subsequence_protein_offset + binding_prediction.offset
in order to figure out whether a variant is in the peptide because it only has the variant's
offset into the full protein; but isovar gives us the variant's offset into the protein subsequence
(dictated by protein_sequence_length); so all we need to do is map that onto the smaller 8-11mer
peptides generated by mhctools.
"""
mutant_binding_predictions = []
for binding_prediction in epitopes:
peptide = binding_prediction.peptide
peptide_offset = binding_prediction.offset
isovar_row = dict(binding_prediction.source_sequence_key)
is_mutant = contains_mutant_residues(
peptide_start_in_protein=peptide_offset,
peptide_length=len(peptide),
mutation_start_in_protein=isovar_row["variant_aa_interval_start"],
mutation_end_in_protein=isovar_row["variant_aa_interval_end"])
if is_mutant and binding_prediction.value <= ic50_cutoff:
mutant_binding_predictions.append(binding_prediction)
return EpitopeCollection(mutant_binding_predictions)
|
[
"def",
"get_filtered_isovar_epitopes",
"(",
"self",
",",
"epitopes",
",",
"ic50_cutoff",
")",
":",
"mutant_binding_predictions",
"=",
"[",
"]",
"for",
"binding_prediction",
"in",
"epitopes",
":",
"peptide",
"=",
"binding_prediction",
".",
"peptide",
"peptide_offset",
"=",
"binding_prediction",
".",
"offset",
"isovar_row",
"=",
"dict",
"(",
"binding_prediction",
".",
"source_sequence_key",
")",
"is_mutant",
"=",
"contains_mutant_residues",
"(",
"peptide_start_in_protein",
"=",
"peptide_offset",
",",
"peptide_length",
"=",
"len",
"(",
"peptide",
")",
",",
"mutation_start_in_protein",
"=",
"isovar_row",
"[",
"\"variant_aa_interval_start\"",
"]",
",",
"mutation_end_in_protein",
"=",
"isovar_row",
"[",
"\"variant_aa_interval_end\"",
"]",
")",
"if",
"is_mutant",
"and",
"binding_prediction",
".",
"value",
"<=",
"ic50_cutoff",
":",
"mutant_binding_predictions",
".",
"append",
"(",
"binding_prediction",
")",
"return",
"EpitopeCollection",
"(",
"mutant_binding_predictions",
")"
] |
Mostly replicates topiary.build_epitope_collection_from_binding_predictions
Note: topiary needs to do fancy stuff like subsequence_protein_offset + binding_prediction.offset
in order to figure out whether a variant is in the peptide because it only has the variant's
offset into the full protein; but isovar gives us the variant's offset into the protein subsequence
(dictated by protein_sequence_length); so all we need to do is map that onto the smaller 8-11mer
peptides generated by mhctools.
|
[
"Mostly",
"replicates",
"topiary",
".",
"build_epitope_collection_from_binding_predictions"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1064-L1086
|
train
|
hammerlab/cohorts
|
cohorts/cohort.py
|
Cohort.plot_roc_curve
|
def plot_roc_curve(self, on, bootstrap_samples=100, ax=None, **kwargs):
"""Plot an ROC curve for benefit and a given variable
Parameters
----------
on : str or function or list or dict
See `cohort.load.as_dataframe`
bootstrap_samples : int, optional
Number of boostrap samples to use to compute the AUC
ax : Axes, default None
Axes to plot on
Returns
-------
(mean_auc_score, plot): (float, matplotlib plot)
Returns the average AUC for the given predictor over `bootstrap_samples`
and the associated ROC curve
"""
plot_col, df = self.as_dataframe(on, return_cols=True, **kwargs)
df = filter_not_null(df, "benefit")
df = filter_not_null(df, plot_col)
df.benefit = df.benefit.astype(bool)
return roc_curve_plot(df, plot_col, "benefit", bootstrap_samples, ax=ax)
|
python
|
def plot_roc_curve(self, on, bootstrap_samples=100, ax=None, **kwargs):
"""Plot an ROC curve for benefit and a given variable
Parameters
----------
on : str or function or list or dict
See `cohort.load.as_dataframe`
bootstrap_samples : int, optional
Number of boostrap samples to use to compute the AUC
ax : Axes, default None
Axes to plot on
Returns
-------
(mean_auc_score, plot): (float, matplotlib plot)
Returns the average AUC for the given predictor over `bootstrap_samples`
and the associated ROC curve
"""
plot_col, df = self.as_dataframe(on, return_cols=True, **kwargs)
df = filter_not_null(df, "benefit")
df = filter_not_null(df, plot_col)
df.benefit = df.benefit.astype(bool)
return roc_curve_plot(df, plot_col, "benefit", bootstrap_samples, ax=ax)
|
[
"def",
"plot_roc_curve",
"(",
"self",
",",
"on",
",",
"bootstrap_samples",
"=",
"100",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"plot_col",
",",
"df",
"=",
"self",
".",
"as_dataframe",
"(",
"on",
",",
"return_cols",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"df",
"=",
"filter_not_null",
"(",
"df",
",",
"\"benefit\"",
")",
"df",
"=",
"filter_not_null",
"(",
"df",
",",
"plot_col",
")",
"df",
".",
"benefit",
"=",
"df",
".",
"benefit",
".",
"astype",
"(",
"bool",
")",
"return",
"roc_curve_plot",
"(",
"df",
",",
"plot_col",
",",
"\"benefit\"",
",",
"bootstrap_samples",
",",
"ax",
"=",
"ax",
")"
] |
Plot an ROC curve for benefit and a given variable
Parameters
----------
on : str or function or list or dict
See `cohort.load.as_dataframe`
bootstrap_samples : int, optional
Number of boostrap samples to use to compute the AUC
ax : Axes, default None
Axes to plot on
Returns
-------
(mean_auc_score, plot): (float, matplotlib plot)
Returns the average AUC for the given predictor over `bootstrap_samples`
and the associated ROC curve
|
[
"Plot",
"an",
"ROC",
"curve",
"for",
"benefit",
"and",
"a",
"given",
"variable"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1171-L1193
|
train
|
hammerlab/cohorts
|
cohorts/cohort.py
|
Cohort.plot_benefit
|
def plot_benefit(self, on, benefit_col="benefit", label="Response", ax=None,
alternative="two-sided", boolean_value_map={},
order=None, **kwargs):
"""Plot a comparison of benefit/response in the cohort on a given variable
"""
no_benefit_plot_name = "No %s" % self.benefit_plot_name
boolean_value_map = boolean_value_map or {True: self.benefit_plot_name, False: no_benefit_plot_name}
order = order or [no_benefit_plot_name, self.benefit_plot_name]
return self.plot_boolean(on=on,
boolean_col=benefit_col,
alternative=alternative,
boolean_label=label,
boolean_value_map=boolean_value_map,
order=order,
ax=ax,
**kwargs)
|
python
|
def plot_benefit(self, on, benefit_col="benefit", label="Response", ax=None,
alternative="two-sided", boolean_value_map={},
order=None, **kwargs):
"""Plot a comparison of benefit/response in the cohort on a given variable
"""
no_benefit_plot_name = "No %s" % self.benefit_plot_name
boolean_value_map = boolean_value_map or {True: self.benefit_plot_name, False: no_benefit_plot_name}
order = order or [no_benefit_plot_name, self.benefit_plot_name]
return self.plot_boolean(on=on,
boolean_col=benefit_col,
alternative=alternative,
boolean_label=label,
boolean_value_map=boolean_value_map,
order=order,
ax=ax,
**kwargs)
|
[
"def",
"plot_benefit",
"(",
"self",
",",
"on",
",",
"benefit_col",
"=",
"\"benefit\"",
",",
"label",
"=",
"\"Response\"",
",",
"ax",
"=",
"None",
",",
"alternative",
"=",
"\"two-sided\"",
",",
"boolean_value_map",
"=",
"{",
"}",
",",
"order",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"no_benefit_plot_name",
"=",
"\"No %s\"",
"%",
"self",
".",
"benefit_plot_name",
"boolean_value_map",
"=",
"boolean_value_map",
"or",
"{",
"True",
":",
"self",
".",
"benefit_plot_name",
",",
"False",
":",
"no_benefit_plot_name",
"}",
"order",
"=",
"order",
"or",
"[",
"no_benefit_plot_name",
",",
"self",
".",
"benefit_plot_name",
"]",
"return",
"self",
".",
"plot_boolean",
"(",
"on",
"=",
"on",
",",
"boolean_col",
"=",
"benefit_col",
",",
"alternative",
"=",
"alternative",
",",
"boolean_label",
"=",
"label",
",",
"boolean_value_map",
"=",
"boolean_value_map",
",",
"order",
"=",
"order",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"kwargs",
")"
] |
Plot a comparison of benefit/response in the cohort on a given variable
|
[
"Plot",
"a",
"comparison",
"of",
"benefit",
"/",
"response",
"in",
"the",
"cohort",
"on",
"a",
"given",
"variable"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1195-L1211
|
train
|
hammerlab/cohorts
|
cohorts/cohort.py
|
Cohort.plot_boolean
|
def plot_boolean(self,
on,
boolean_col,
plot_col=None,
boolean_label=None,
boolean_value_map={},
order=None,
ax=None,
alternative="two-sided",
**kwargs):
"""Plot a comparison of `boolean_col` in the cohort on a given variable via
`on` or `col`.
If the variable (through `on` or `col`) is binary this will compare
odds-ratios and perform a Fisher's exact test.
If the variable is numeric, this will compare the distributions through
a Mann-Whitney test and plot the distributions with box-strip plot
Parameters
----------
on : str or function or list or dict
See `cohort.load.as_dataframe`
plot_col : str, optional
If on has many columns, this is the one whose values we are plotting.
If on has a single column, this is unnecessary.
We might want many columns if, e.g. we're generating boolean_col from a
function as well.
boolean_col : str
Column name of boolean column to plot or compare against.
boolean_label : None, optional
Label to give boolean column in the plot
boolean_value_map : dict, optional
Map of conversions for values in the boolean column, i.e. {True: 'High', False: 'Low'}
order : None, optional
Order of the labels on the x-axis
ax : None, optional
Axes to plot on
alternative : str, optional
Choose the sidedness of the mannwhitneyu or Fisher's Exact test.
Returns
-------
(Test statistic, p-value): (float, float)
"""
cols, df = self.as_dataframe(on, return_cols=True, **kwargs)
plot_col = self.plot_col_from_cols(cols=cols, plot_col=plot_col)
df = filter_not_null(df, boolean_col)
df = filter_not_null(df, plot_col)
if boolean_label:
df[boolean_label] = df[boolean_col]
boolean_col = boolean_label
condition_value = None
if boolean_value_map:
assert set(boolean_value_map.keys()) == set([True, False]), \
"Improper mapping of boolean column provided"
df[boolean_col] = df[boolean_col].map(lambda v: boolean_value_map[v])
condition_value = boolean_value_map[True]
if df[plot_col].dtype == "bool":
results = fishers_exact_plot(
data=df,
condition1=boolean_col,
condition2=plot_col,
condition1_value=condition_value,
alternative=alternative,
order=order,
ax=ax)
else:
results = mann_whitney_plot(
data=df,
condition=boolean_col,
distribution=plot_col,
condition_value=condition_value,
alternative=alternative,
order=order,
ax=ax)
return results
|
python
|
def plot_boolean(self,
on,
boolean_col,
plot_col=None,
boolean_label=None,
boolean_value_map={},
order=None,
ax=None,
alternative="two-sided",
**kwargs):
"""Plot a comparison of `boolean_col` in the cohort on a given variable via
`on` or `col`.
If the variable (through `on` or `col`) is binary this will compare
odds-ratios and perform a Fisher's exact test.
If the variable is numeric, this will compare the distributions through
a Mann-Whitney test and plot the distributions with box-strip plot
Parameters
----------
on : str or function or list or dict
See `cohort.load.as_dataframe`
plot_col : str, optional
If on has many columns, this is the one whose values we are plotting.
If on has a single column, this is unnecessary.
We might want many columns if, e.g. we're generating boolean_col from a
function as well.
boolean_col : str
Column name of boolean column to plot or compare against.
boolean_label : None, optional
Label to give boolean column in the plot
boolean_value_map : dict, optional
Map of conversions for values in the boolean column, i.e. {True: 'High', False: 'Low'}
order : None, optional
Order of the labels on the x-axis
ax : None, optional
Axes to plot on
alternative : str, optional
Choose the sidedness of the mannwhitneyu or Fisher's Exact test.
Returns
-------
(Test statistic, p-value): (float, float)
"""
cols, df = self.as_dataframe(on, return_cols=True, **kwargs)
plot_col = self.plot_col_from_cols(cols=cols, plot_col=plot_col)
df = filter_not_null(df, boolean_col)
df = filter_not_null(df, plot_col)
if boolean_label:
df[boolean_label] = df[boolean_col]
boolean_col = boolean_label
condition_value = None
if boolean_value_map:
assert set(boolean_value_map.keys()) == set([True, False]), \
"Improper mapping of boolean column provided"
df[boolean_col] = df[boolean_col].map(lambda v: boolean_value_map[v])
condition_value = boolean_value_map[True]
if df[plot_col].dtype == "bool":
results = fishers_exact_plot(
data=df,
condition1=boolean_col,
condition2=plot_col,
condition1_value=condition_value,
alternative=alternative,
order=order,
ax=ax)
else:
results = mann_whitney_plot(
data=df,
condition=boolean_col,
distribution=plot_col,
condition_value=condition_value,
alternative=alternative,
order=order,
ax=ax)
return results
|
[
"def",
"plot_boolean",
"(",
"self",
",",
"on",
",",
"boolean_col",
",",
"plot_col",
"=",
"None",
",",
"boolean_label",
"=",
"None",
",",
"boolean_value_map",
"=",
"{",
"}",
",",
"order",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"alternative",
"=",
"\"two-sided\"",
",",
"*",
"*",
"kwargs",
")",
":",
"cols",
",",
"df",
"=",
"self",
".",
"as_dataframe",
"(",
"on",
",",
"return_cols",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"plot_col",
"=",
"self",
".",
"plot_col_from_cols",
"(",
"cols",
"=",
"cols",
",",
"plot_col",
"=",
"plot_col",
")",
"df",
"=",
"filter_not_null",
"(",
"df",
",",
"boolean_col",
")",
"df",
"=",
"filter_not_null",
"(",
"df",
",",
"plot_col",
")",
"if",
"boolean_label",
":",
"df",
"[",
"boolean_label",
"]",
"=",
"df",
"[",
"boolean_col",
"]",
"boolean_col",
"=",
"boolean_label",
"condition_value",
"=",
"None",
"if",
"boolean_value_map",
":",
"assert",
"set",
"(",
"boolean_value_map",
".",
"keys",
"(",
")",
")",
"==",
"set",
"(",
"[",
"True",
",",
"False",
"]",
")",
",",
"\"Improper mapping of boolean column provided\"",
"df",
"[",
"boolean_col",
"]",
"=",
"df",
"[",
"boolean_col",
"]",
".",
"map",
"(",
"lambda",
"v",
":",
"boolean_value_map",
"[",
"v",
"]",
")",
"condition_value",
"=",
"boolean_value_map",
"[",
"True",
"]",
"if",
"df",
"[",
"plot_col",
"]",
".",
"dtype",
"==",
"\"bool\"",
":",
"results",
"=",
"fishers_exact_plot",
"(",
"data",
"=",
"df",
",",
"condition1",
"=",
"boolean_col",
",",
"condition2",
"=",
"plot_col",
",",
"condition1_value",
"=",
"condition_value",
",",
"alternative",
"=",
"alternative",
",",
"order",
"=",
"order",
",",
"ax",
"=",
"ax",
")",
"else",
":",
"results",
"=",
"mann_whitney_plot",
"(",
"data",
"=",
"df",
",",
"condition",
"=",
"boolean_col",
",",
"distribution",
"=",
"plot_col",
",",
"condition_value",
"=",
"condition_value",
",",
"alternative",
"=",
"alternative",
",",
"order",
"=",
"order",
",",
"ax",
"=",
"ax",
")",
"return",
"results"
] |
Plot a comparison of `boolean_col` in the cohort on a given variable via
`on` or `col`.
If the variable (through `on` or `col`) is binary this will compare
odds-ratios and perform a Fisher's exact test.
If the variable is numeric, this will compare the distributions through
a Mann-Whitney test and plot the distributions with box-strip plot
Parameters
----------
on : str or function or list or dict
See `cohort.load.as_dataframe`
plot_col : str, optional
If on has many columns, this is the one whose values we are plotting.
If on has a single column, this is unnecessary.
We might want many columns if, e.g. we're generating boolean_col from a
function as well.
boolean_col : str
Column name of boolean column to plot or compare against.
boolean_label : None, optional
Label to give boolean column in the plot
boolean_value_map : dict, optional
Map of conversions for values in the boolean column, i.e. {True: 'High', False: 'Low'}
order : None, optional
Order of the labels on the x-axis
ax : None, optional
Axes to plot on
alternative : str, optional
Choose the sidedness of the mannwhitneyu or Fisher's Exact test.
Returns
-------
(Test statistic, p-value): (float, float)
|
[
"Plot",
"a",
"comparison",
"of",
"boolean_col",
"in",
"the",
"cohort",
"on",
"a",
"given",
"variable",
"via",
"on",
"or",
"col",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1213-L1293
|
train
|
hammerlab/cohorts
|
cohorts/cohort.py
|
Cohort.plot_survival
|
def plot_survival(self,
on,
how="os",
survival_units="Days",
strata=None,
ax=None,
ci_show=False,
with_condition_color="#B38600",
no_condition_color="#A941AC",
with_condition_label=None,
no_condition_label=None,
color_map=None,
label_map=None,
color_palette="Set2",
threshold=None, **kwargs):
"""Plot a Kaplan Meier survival curve by splitting the cohort into two groups
Parameters
----------
on : str or function or list or dict
See `cohort.load.as_dataframe`
how : {"os", "pfs"}, optional
Whether to plot OS (overall survival) or PFS (progression free survival)
survival_units : str
Unit of time for the survival measure, i.e. Days or Months
strata : str
(optional) column name of stratifying variable
ci_show : bool
Display the confidence interval around the survival curve
threshold : int, "median", "median-per-strata" or None (optional)
Threshold of `col` on which to split the cohort
"""
assert how in ["os", "pfs"], "Invalid choice of survival plot type %s" % how
cols, df = self.as_dataframe(on, return_cols=True, **kwargs)
plot_col = self.plot_col_from_cols(cols=cols, only_allow_one=True)
df = filter_not_null(df, plot_col)
results = plot_kmf(
df=df,
condition_col=plot_col,
xlabel=survival_units,
ylabel="Overall Survival (%)" if how == "os" else "Progression-Free Survival (%)",
censor_col="deceased" if how == "os" else "progressed_or_deceased",
survival_col=how,
strata_col=strata,
threshold=threshold,
ax=ax,
ci_show=ci_show,
with_condition_color=with_condition_color,
no_condition_color=no_condition_color,
with_condition_label=with_condition_label,
no_condition_label=no_condition_label,
color_palette=color_palette,
label_map=label_map,
color_map=color_map,
)
return results
|
python
|
def plot_survival(self,
on,
how="os",
survival_units="Days",
strata=None,
ax=None,
ci_show=False,
with_condition_color="#B38600",
no_condition_color="#A941AC",
with_condition_label=None,
no_condition_label=None,
color_map=None,
label_map=None,
color_palette="Set2",
threshold=None, **kwargs):
"""Plot a Kaplan Meier survival curve by splitting the cohort into two groups
Parameters
----------
on : str or function or list or dict
See `cohort.load.as_dataframe`
how : {"os", "pfs"}, optional
Whether to plot OS (overall survival) or PFS (progression free survival)
survival_units : str
Unit of time for the survival measure, i.e. Days or Months
strata : str
(optional) column name of stratifying variable
ci_show : bool
Display the confidence interval around the survival curve
threshold : int, "median", "median-per-strata" or None (optional)
Threshold of `col` on which to split the cohort
"""
assert how in ["os", "pfs"], "Invalid choice of survival plot type %s" % how
cols, df = self.as_dataframe(on, return_cols=True, **kwargs)
plot_col = self.plot_col_from_cols(cols=cols, only_allow_one=True)
df = filter_not_null(df, plot_col)
results = plot_kmf(
df=df,
condition_col=plot_col,
xlabel=survival_units,
ylabel="Overall Survival (%)" if how == "os" else "Progression-Free Survival (%)",
censor_col="deceased" if how == "os" else "progressed_or_deceased",
survival_col=how,
strata_col=strata,
threshold=threshold,
ax=ax,
ci_show=ci_show,
with_condition_color=with_condition_color,
no_condition_color=no_condition_color,
with_condition_label=with_condition_label,
no_condition_label=no_condition_label,
color_palette=color_palette,
label_map=label_map,
color_map=color_map,
)
return results
|
[
"def",
"plot_survival",
"(",
"self",
",",
"on",
",",
"how",
"=",
"\"os\"",
",",
"survival_units",
"=",
"\"Days\"",
",",
"strata",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"ci_show",
"=",
"False",
",",
"with_condition_color",
"=",
"\"#B38600\"",
",",
"no_condition_color",
"=",
"\"#A941AC\"",
",",
"with_condition_label",
"=",
"None",
",",
"no_condition_label",
"=",
"None",
",",
"color_map",
"=",
"None",
",",
"label_map",
"=",
"None",
",",
"color_palette",
"=",
"\"Set2\"",
",",
"threshold",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"how",
"in",
"[",
"\"os\"",
",",
"\"pfs\"",
"]",
",",
"\"Invalid choice of survival plot type %s\"",
"%",
"how",
"cols",
",",
"df",
"=",
"self",
".",
"as_dataframe",
"(",
"on",
",",
"return_cols",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"plot_col",
"=",
"self",
".",
"plot_col_from_cols",
"(",
"cols",
"=",
"cols",
",",
"only_allow_one",
"=",
"True",
")",
"df",
"=",
"filter_not_null",
"(",
"df",
",",
"plot_col",
")",
"results",
"=",
"plot_kmf",
"(",
"df",
"=",
"df",
",",
"condition_col",
"=",
"plot_col",
",",
"xlabel",
"=",
"survival_units",
",",
"ylabel",
"=",
"\"Overall Survival (%)\"",
"if",
"how",
"==",
"\"os\"",
"else",
"\"Progression-Free Survival (%)\"",
",",
"censor_col",
"=",
"\"deceased\"",
"if",
"how",
"==",
"\"os\"",
"else",
"\"progressed_or_deceased\"",
",",
"survival_col",
"=",
"how",
",",
"strata_col",
"=",
"strata",
",",
"threshold",
"=",
"threshold",
",",
"ax",
"=",
"ax",
",",
"ci_show",
"=",
"ci_show",
",",
"with_condition_color",
"=",
"with_condition_color",
",",
"no_condition_color",
"=",
"no_condition_color",
",",
"with_condition_label",
"=",
"with_condition_label",
",",
"no_condition_label",
"=",
"no_condition_label",
",",
"color_palette",
"=",
"color_palette",
",",
"label_map",
"=",
"label_map",
",",
"color_map",
"=",
"color_map",
",",
")",
"return",
"results"
] |
Plot a Kaplan Meier survival curve by splitting the cohort into two groups
Parameters
----------
on : str or function or list or dict
See `cohort.load.as_dataframe`
how : {"os", "pfs"}, optional
Whether to plot OS (overall survival) or PFS (progression free survival)
survival_units : str
Unit of time for the survival measure, i.e. Days or Months
strata : str
(optional) column name of stratifying variable
ci_show : bool
Display the confidence interval around the survival curve
threshold : int, "median", "median-per-strata" or None (optional)
Threshold of `col` on which to split the cohort
|
[
"Plot",
"a",
"Kaplan",
"Meier",
"survival",
"curve",
"by",
"splitting",
"the",
"cohort",
"into",
"two",
"groups",
"Parameters",
"----------",
"on",
":",
"str",
"or",
"function",
"or",
"list",
"or",
"dict",
"See",
"cohort",
".",
"load",
".",
"as_dataframe",
"how",
":",
"{",
"os",
"pfs",
"}",
"optional",
"Whether",
"to",
"plot",
"OS",
"(",
"overall",
"survival",
")",
"or",
"PFS",
"(",
"progression",
"free",
"survival",
")",
"survival_units",
":",
"str",
"Unit",
"of",
"time",
"for",
"the",
"survival",
"measure",
"i",
".",
"e",
".",
"Days",
"or",
"Months",
"strata",
":",
"str",
"(",
"optional",
")",
"column",
"name",
"of",
"stratifying",
"variable",
"ci_show",
":",
"bool",
"Display",
"the",
"confidence",
"interval",
"around",
"the",
"survival",
"curve",
"threshold",
":",
"int",
"median",
"median",
"-",
"per",
"-",
"strata",
"or",
"None",
"(",
"optional",
")",
"Threshold",
"of",
"col",
"on",
"which",
"to",
"split",
"the",
"cohort"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1295-L1349
|
train
|
hammerlab/cohorts
|
cohorts/cohort.py
|
Cohort.plot_correlation
|
def plot_correlation(self, on, x_col=None, plot_type="jointplot", stat_func=pearsonr, show_stat_func=True, plot_kwargs={}, **kwargs):
"""Plot the correlation between two variables.
Parameters
----------
on : list or dict of functions or strings
See `cohort.load.as_dataframe`
x_col : str, optional
If `on` is a dict, this guarantees we have the expected ordering.
plot_type : str, optional
Specify "jointplot", "regplot", "boxplot", or "barplot".
stat_func : function, optional.
Specify which function to use for the statistical test.
show_stat_func : bool, optional
Whether or not to show the stat_func result in the plot itself.
plot_kwargs : dict, optional
kwargs to pass through to plotting functions.
"""
if plot_type not in ["boxplot", "barplot", "jointplot", "regplot"]:
raise ValueError("Invalid plot_type %s" % plot_type)
plot_cols, df = self.as_dataframe(on, return_cols=True, **kwargs)
if len(plot_cols) != 2:
raise ValueError("Must be comparing two columns, but there are %d columns" % len(plot_cols))
for plot_col in plot_cols:
df = filter_not_null(df, plot_col)
if x_col is None:
x_col = plot_cols[0]
y_col = plot_cols[1]
else:
if x_col == plot_cols[0]:
y_col = plot_cols[1]
else:
y_col = plot_cols[0]
series_x = df[x_col]
series_y = df[y_col]
coeff, p_value = stat_func(series_x, series_y)
if plot_type == "jointplot":
plot = sb.jointplot(data=df, x=x_col, y=y_col,
stat_func=stat_func if show_stat_func else None,
**plot_kwargs)
elif plot_type == "regplot":
plot = sb.regplot(data=df, x=x_col, y=y_col,
**plot_kwargs)
elif plot_type == "boxplot":
plot = stripboxplot(data=df, x=x_col, y=y_col, **plot_kwargs)
else:
plot = sb.barplot(data=df, x=x_col, y=y_col, **plot_kwargs)
return CorrelationResults(coeff=coeff, p_value=p_value, stat_func=stat_func,
series_x=series_x, series_y=series_y, plot=plot)
|
python
|
def plot_correlation(self, on, x_col=None, plot_type="jointplot", stat_func=pearsonr, show_stat_func=True, plot_kwargs={}, **kwargs):
"""Plot the correlation between two variables.
Parameters
----------
on : list or dict of functions or strings
See `cohort.load.as_dataframe`
x_col : str, optional
If `on` is a dict, this guarantees we have the expected ordering.
plot_type : str, optional
Specify "jointplot", "regplot", "boxplot", or "barplot".
stat_func : function, optional.
Specify which function to use for the statistical test.
show_stat_func : bool, optional
Whether or not to show the stat_func result in the plot itself.
plot_kwargs : dict, optional
kwargs to pass through to plotting functions.
"""
if plot_type not in ["boxplot", "barplot", "jointplot", "regplot"]:
raise ValueError("Invalid plot_type %s" % plot_type)
plot_cols, df = self.as_dataframe(on, return_cols=True, **kwargs)
if len(plot_cols) != 2:
raise ValueError("Must be comparing two columns, but there are %d columns" % len(plot_cols))
for plot_col in plot_cols:
df = filter_not_null(df, plot_col)
if x_col is None:
x_col = plot_cols[0]
y_col = plot_cols[1]
else:
if x_col == plot_cols[0]:
y_col = plot_cols[1]
else:
y_col = plot_cols[0]
series_x = df[x_col]
series_y = df[y_col]
coeff, p_value = stat_func(series_x, series_y)
if plot_type == "jointplot":
plot = sb.jointplot(data=df, x=x_col, y=y_col,
stat_func=stat_func if show_stat_func else None,
**plot_kwargs)
elif plot_type == "regplot":
plot = sb.regplot(data=df, x=x_col, y=y_col,
**plot_kwargs)
elif plot_type == "boxplot":
plot = stripboxplot(data=df, x=x_col, y=y_col, **plot_kwargs)
else:
plot = sb.barplot(data=df, x=x_col, y=y_col, **plot_kwargs)
return CorrelationResults(coeff=coeff, p_value=p_value, stat_func=stat_func,
series_x=series_x, series_y=series_y, plot=plot)
|
[
"def",
"plot_correlation",
"(",
"self",
",",
"on",
",",
"x_col",
"=",
"None",
",",
"plot_type",
"=",
"\"jointplot\"",
",",
"stat_func",
"=",
"pearsonr",
",",
"show_stat_func",
"=",
"True",
",",
"plot_kwargs",
"=",
"{",
"}",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"plot_type",
"not",
"in",
"[",
"\"boxplot\"",
",",
"\"barplot\"",
",",
"\"jointplot\"",
",",
"\"regplot\"",
"]",
":",
"raise",
"ValueError",
"(",
"\"Invalid plot_type %s\"",
"%",
"plot_type",
")",
"plot_cols",
",",
"df",
"=",
"self",
".",
"as_dataframe",
"(",
"on",
",",
"return_cols",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"if",
"len",
"(",
"plot_cols",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"\"Must be comparing two columns, but there are %d columns\"",
"%",
"len",
"(",
"plot_cols",
")",
")",
"for",
"plot_col",
"in",
"plot_cols",
":",
"df",
"=",
"filter_not_null",
"(",
"df",
",",
"plot_col",
")",
"if",
"x_col",
"is",
"None",
":",
"x_col",
"=",
"plot_cols",
"[",
"0",
"]",
"y_col",
"=",
"plot_cols",
"[",
"1",
"]",
"else",
":",
"if",
"x_col",
"==",
"plot_cols",
"[",
"0",
"]",
":",
"y_col",
"=",
"plot_cols",
"[",
"1",
"]",
"else",
":",
"y_col",
"=",
"plot_cols",
"[",
"0",
"]",
"series_x",
"=",
"df",
"[",
"x_col",
"]",
"series_y",
"=",
"df",
"[",
"y_col",
"]",
"coeff",
",",
"p_value",
"=",
"stat_func",
"(",
"series_x",
",",
"series_y",
")",
"if",
"plot_type",
"==",
"\"jointplot\"",
":",
"plot",
"=",
"sb",
".",
"jointplot",
"(",
"data",
"=",
"df",
",",
"x",
"=",
"x_col",
",",
"y",
"=",
"y_col",
",",
"stat_func",
"=",
"stat_func",
"if",
"show_stat_func",
"else",
"None",
",",
"*",
"*",
"plot_kwargs",
")",
"elif",
"plot_type",
"==",
"\"regplot\"",
":",
"plot",
"=",
"sb",
".",
"regplot",
"(",
"data",
"=",
"df",
",",
"x",
"=",
"x_col",
",",
"y",
"=",
"y_col",
",",
"*",
"*",
"plot_kwargs",
")",
"elif",
"plot_type",
"==",
"\"boxplot\"",
":",
"plot",
"=",
"stripboxplot",
"(",
"data",
"=",
"df",
",",
"x",
"=",
"x_col",
",",
"y",
"=",
"y_col",
",",
"*",
"*",
"plot_kwargs",
")",
"else",
":",
"plot",
"=",
"sb",
".",
"barplot",
"(",
"data",
"=",
"df",
",",
"x",
"=",
"x_col",
",",
"y",
"=",
"y_col",
",",
"*",
"*",
"plot_kwargs",
")",
"return",
"CorrelationResults",
"(",
"coeff",
"=",
"coeff",
",",
"p_value",
"=",
"p_value",
",",
"stat_func",
"=",
"stat_func",
",",
"series_x",
"=",
"series_x",
",",
"series_y",
"=",
"series_y",
",",
"plot",
"=",
"plot",
")"
] |
Plot the correlation between two variables.
Parameters
----------
on : list or dict of functions or strings
See `cohort.load.as_dataframe`
x_col : str, optional
If `on` is a dict, this guarantees we have the expected ordering.
plot_type : str, optional
Specify "jointplot", "regplot", "boxplot", or "barplot".
stat_func : function, optional.
Specify which function to use for the statistical test.
show_stat_func : bool, optional
Whether or not to show the stat_func result in the plot itself.
plot_kwargs : dict, optional
kwargs to pass through to plotting functions.
|
[
"Plot",
"the",
"correlation",
"between",
"two",
"variables",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1351-L1399
|
train
|
hammerlab/cohorts
|
cohorts/cohort.py
|
Cohort._list_patient_ids
|
def _list_patient_ids(self):
""" Utility function to return a list of patient ids in the Cohort
"""
results = []
for patient in self:
results.append(patient.id)
return(results)
|
python
|
def _list_patient_ids(self):
""" Utility function to return a list of patient ids in the Cohort
"""
results = []
for patient in self:
results.append(patient.id)
return(results)
|
[
"def",
"_list_patient_ids",
"(",
"self",
")",
":",
"results",
"=",
"[",
"]",
"for",
"patient",
"in",
"self",
":",
"results",
".",
"append",
"(",
"patient",
".",
"id",
")",
"return",
"(",
"results",
")"
] |
Utility function to return a list of patient ids in the Cohort
|
[
"Utility",
"function",
"to",
"return",
"a",
"list",
"of",
"patient",
"ids",
"in",
"the",
"Cohort"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1410-L1416
|
train
|
hammerlab/cohorts
|
cohorts/cohort.py
|
Cohort.summarize_provenance_per_cache
|
def summarize_provenance_per_cache(self):
"""Utility function to summarize provenance files for cached items used by a Cohort,
for each cache_dir that exists. Only existing cache_dirs are summarized.
This is a summary of provenance files because the function checks to see whether all
patients data have the same provenance within the cache dir. The function assumes
that it will be desireable to have all patients data generated using the same
environment, for each cache type.
At the moment, most PROVENANCE files contain details about packages used to generat
e the cached data file. However, this function is generic & so it summarizes the
contents of those files irrespective of their contents.
Returns
----------
Dict containing summarized provenance for each existing cache_dir, after checking
to see that provenance files are identical among all patients in the data frame for
that cache_dir.
If conflicting PROVENANCE files are discovered within a cache-dir:
- a warning is generated, describing the conflict
- and, a value of `None` is returned in the dictionary for that cache-dir
See also
-----------
* `?cohorts.Cohort.summarize_provenance` which summarizes provenance files among
cache_dirs.
* `?cohorts.Cohort.summarize_dataframe` which hashes/summarizes contents of the data
frame for this cohort.
"""
provenance_summary = {}
df = self.as_dataframe()
for cache in self.cache_names:
cache_name = self.cache_names[cache]
cache_provenance = None
num_discrepant = 0
this_cache_dir = path.join(self.cache_dir, cache_name)
if path.exists(this_cache_dir):
for patient_id in self._list_patient_ids():
patient_cache_dir = path.join(this_cache_dir, patient_id)
try:
this_provenance = self.load_provenance(patient_cache_dir = patient_cache_dir)
except:
this_provenance = None
if this_provenance:
if not(cache_provenance):
cache_provenance = this_provenance
else:
num_discrepant += compare_provenance(this_provenance, cache_provenance)
if num_discrepant == 0:
provenance_summary[cache_name] = cache_provenance
else:
provenance_summary[cache_name] = None
return(provenance_summary)
|
python
|
def summarize_provenance_per_cache(self):
"""Utility function to summarize provenance files for cached items used by a Cohort,
for each cache_dir that exists. Only existing cache_dirs are summarized.
This is a summary of provenance files because the function checks to see whether all
patients data have the same provenance within the cache dir. The function assumes
that it will be desireable to have all patients data generated using the same
environment, for each cache type.
At the moment, most PROVENANCE files contain details about packages used to generat
e the cached data file. However, this function is generic & so it summarizes the
contents of those files irrespective of their contents.
Returns
----------
Dict containing summarized provenance for each existing cache_dir, after checking
to see that provenance files are identical among all patients in the data frame for
that cache_dir.
If conflicting PROVENANCE files are discovered within a cache-dir:
- a warning is generated, describing the conflict
- and, a value of `None` is returned in the dictionary for that cache-dir
See also
-----------
* `?cohorts.Cohort.summarize_provenance` which summarizes provenance files among
cache_dirs.
* `?cohorts.Cohort.summarize_dataframe` which hashes/summarizes contents of the data
frame for this cohort.
"""
provenance_summary = {}
df = self.as_dataframe()
for cache in self.cache_names:
cache_name = self.cache_names[cache]
cache_provenance = None
num_discrepant = 0
this_cache_dir = path.join(self.cache_dir, cache_name)
if path.exists(this_cache_dir):
for patient_id in self._list_patient_ids():
patient_cache_dir = path.join(this_cache_dir, patient_id)
try:
this_provenance = self.load_provenance(patient_cache_dir = patient_cache_dir)
except:
this_provenance = None
if this_provenance:
if not(cache_provenance):
cache_provenance = this_provenance
else:
num_discrepant += compare_provenance(this_provenance, cache_provenance)
if num_discrepant == 0:
provenance_summary[cache_name] = cache_provenance
else:
provenance_summary[cache_name] = None
return(provenance_summary)
|
[
"def",
"summarize_provenance_per_cache",
"(",
"self",
")",
":",
"provenance_summary",
"=",
"{",
"}",
"df",
"=",
"self",
".",
"as_dataframe",
"(",
")",
"for",
"cache",
"in",
"self",
".",
"cache_names",
":",
"cache_name",
"=",
"self",
".",
"cache_names",
"[",
"cache",
"]",
"cache_provenance",
"=",
"None",
"num_discrepant",
"=",
"0",
"this_cache_dir",
"=",
"path",
".",
"join",
"(",
"self",
".",
"cache_dir",
",",
"cache_name",
")",
"if",
"path",
".",
"exists",
"(",
"this_cache_dir",
")",
":",
"for",
"patient_id",
"in",
"self",
".",
"_list_patient_ids",
"(",
")",
":",
"patient_cache_dir",
"=",
"path",
".",
"join",
"(",
"this_cache_dir",
",",
"patient_id",
")",
"try",
":",
"this_provenance",
"=",
"self",
".",
"load_provenance",
"(",
"patient_cache_dir",
"=",
"patient_cache_dir",
")",
"except",
":",
"this_provenance",
"=",
"None",
"if",
"this_provenance",
":",
"if",
"not",
"(",
"cache_provenance",
")",
":",
"cache_provenance",
"=",
"this_provenance",
"else",
":",
"num_discrepant",
"+=",
"compare_provenance",
"(",
"this_provenance",
",",
"cache_provenance",
")",
"if",
"num_discrepant",
"==",
"0",
":",
"provenance_summary",
"[",
"cache_name",
"]",
"=",
"cache_provenance",
"else",
":",
"provenance_summary",
"[",
"cache_name",
"]",
"=",
"None",
"return",
"(",
"provenance_summary",
")"
] |
Utility function to summarize provenance files for cached items used by a Cohort,
for each cache_dir that exists. Only existing cache_dirs are summarized.
This is a summary of provenance files because the function checks to see whether all
patients data have the same provenance within the cache dir. The function assumes
that it will be desireable to have all patients data generated using the same
environment, for each cache type.
At the moment, most PROVENANCE files contain details about packages used to generat
e the cached data file. However, this function is generic & so it summarizes the
contents of those files irrespective of their contents.
Returns
----------
Dict containing summarized provenance for each existing cache_dir, after checking
to see that provenance files are identical among all patients in the data frame for
that cache_dir.
If conflicting PROVENANCE files are discovered within a cache-dir:
- a warning is generated, describing the conflict
- and, a value of `None` is returned in the dictionary for that cache-dir
See also
-----------
* `?cohorts.Cohort.summarize_provenance` which summarizes provenance files among
cache_dirs.
* `?cohorts.Cohort.summarize_dataframe` which hashes/summarizes contents of the data
frame for this cohort.
|
[
"Utility",
"function",
"to",
"summarize",
"provenance",
"files",
"for",
"cached",
"items",
"used",
"by",
"a",
"Cohort",
"for",
"each",
"cache_dir",
"that",
"exists",
".",
"Only",
"existing",
"cache_dirs",
"are",
"summarized",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1418-L1471
|
train
|
hammerlab/cohorts
|
cohorts/cohort.py
|
Cohort.summarize_dataframe
|
def summarize_dataframe(self):
"""Summarize default dataframe for this cohort using a hash function.
Useful for confirming the version of data used in various reports, e.g. ipynbs
"""
if self.dataframe_hash:
return(self.dataframe_hash)
else:
df = self._as_dataframe_unmodified()
return(self.dataframe_hash)
|
python
|
def summarize_dataframe(self):
"""Summarize default dataframe for this cohort using a hash function.
Useful for confirming the version of data used in various reports, e.g. ipynbs
"""
if self.dataframe_hash:
return(self.dataframe_hash)
else:
df = self._as_dataframe_unmodified()
return(self.dataframe_hash)
|
[
"def",
"summarize_dataframe",
"(",
"self",
")",
":",
"if",
"self",
".",
"dataframe_hash",
":",
"return",
"(",
"self",
".",
"dataframe_hash",
")",
"else",
":",
"df",
"=",
"self",
".",
"_as_dataframe_unmodified",
"(",
")",
"return",
"(",
"self",
".",
"dataframe_hash",
")"
] |
Summarize default dataframe for this cohort using a hash function.
Useful for confirming the version of data used in various reports, e.g. ipynbs
|
[
"Summarize",
"default",
"dataframe",
"for",
"this",
"cohort",
"using",
"a",
"hash",
"function",
".",
"Useful",
"for",
"confirming",
"the",
"version",
"of",
"data",
"used",
"in",
"various",
"reports",
"e",
".",
"g",
".",
"ipynbs"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1473-L1481
|
train
|
hammerlab/cohorts
|
cohorts/cohort.py
|
Cohort.summarize_provenance
|
def summarize_provenance(self):
"""Utility function to summarize provenance files for cached items used by a Cohort.
At the moment, most PROVENANCE files contain details about packages used to
generate files. However, this function is generic & so it summarizes the contents
of those files irrespective of their contents.
Returns
----------
Dict containing summary of provenance items, among all cache dirs used by the Cohort.
IE if all provenances are identical across all cache dirs, then a single set of
provenances is returned. Otherwise, if all provenances are not identical, the provenance
items per cache_dir are returned.
See also
----------
`?cohorts.Cohort.summarize_provenance_per_cache` which is used to summarize provenance
for each existing cache_dir.
"""
provenance_per_cache = self.summarize_provenance_per_cache()
summary_provenance = None
num_discrepant = 0
for cache in provenance_per_cache:
if not(summary_provenance):
## pick arbitrary provenance & call this the "summary" (for now)
summary_provenance = provenance_per_cache[cache]
summary_provenance_name = cache
## for each cache, check equivalence with summary_provenance
num_discrepant += compare_provenance(
provenance_per_cache[cache],
summary_provenance,
left_outer_diff = "In %s but not in %s" % (cache, summary_provenance_name),
right_outer_diff = "In %s but not in %s" % (summary_provenance_name, cache)
)
## compare provenance across cached items
if num_discrepant == 0:
prov = summary_provenance ## report summary provenance if exists
else:
prov = provenance_per_cache ## otherwise, return provenance per cache
return(prov)
|
python
|
def summarize_provenance(self):
"""Utility function to summarize provenance files for cached items used by a Cohort.
At the moment, most PROVENANCE files contain details about packages used to
generate files. However, this function is generic & so it summarizes the contents
of those files irrespective of their contents.
Returns
----------
Dict containing summary of provenance items, among all cache dirs used by the Cohort.
IE if all provenances are identical across all cache dirs, then a single set of
provenances is returned. Otherwise, if all provenances are not identical, the provenance
items per cache_dir are returned.
See also
----------
`?cohorts.Cohort.summarize_provenance_per_cache` which is used to summarize provenance
for each existing cache_dir.
"""
provenance_per_cache = self.summarize_provenance_per_cache()
summary_provenance = None
num_discrepant = 0
for cache in provenance_per_cache:
if not(summary_provenance):
## pick arbitrary provenance & call this the "summary" (for now)
summary_provenance = provenance_per_cache[cache]
summary_provenance_name = cache
## for each cache, check equivalence with summary_provenance
num_discrepant += compare_provenance(
provenance_per_cache[cache],
summary_provenance,
left_outer_diff = "In %s but not in %s" % (cache, summary_provenance_name),
right_outer_diff = "In %s but not in %s" % (summary_provenance_name, cache)
)
## compare provenance across cached items
if num_discrepant == 0:
prov = summary_provenance ## report summary provenance if exists
else:
prov = provenance_per_cache ## otherwise, return provenance per cache
return(prov)
|
[
"def",
"summarize_provenance",
"(",
"self",
")",
":",
"provenance_per_cache",
"=",
"self",
".",
"summarize_provenance_per_cache",
"(",
")",
"summary_provenance",
"=",
"None",
"num_discrepant",
"=",
"0",
"for",
"cache",
"in",
"provenance_per_cache",
":",
"if",
"not",
"(",
"summary_provenance",
")",
":",
"## pick arbitrary provenance & call this the \"summary\" (for now)",
"summary_provenance",
"=",
"provenance_per_cache",
"[",
"cache",
"]",
"summary_provenance_name",
"=",
"cache",
"## for each cache, check equivalence with summary_provenance",
"num_discrepant",
"+=",
"compare_provenance",
"(",
"provenance_per_cache",
"[",
"cache",
"]",
",",
"summary_provenance",
",",
"left_outer_diff",
"=",
"\"In %s but not in %s\"",
"%",
"(",
"cache",
",",
"summary_provenance_name",
")",
",",
"right_outer_diff",
"=",
"\"In %s but not in %s\"",
"%",
"(",
"summary_provenance_name",
",",
"cache",
")",
")",
"## compare provenance across cached items",
"if",
"num_discrepant",
"==",
"0",
":",
"prov",
"=",
"summary_provenance",
"## report summary provenance if exists",
"else",
":",
"prov",
"=",
"provenance_per_cache",
"## otherwise, return provenance per cache",
"return",
"(",
"prov",
")"
] |
Utility function to summarize provenance files for cached items used by a Cohort.
At the moment, most PROVENANCE files contain details about packages used to
generate files. However, this function is generic & so it summarizes the contents
of those files irrespective of their contents.
Returns
----------
Dict containing summary of provenance items, among all cache dirs used by the Cohort.
IE if all provenances are identical across all cache dirs, then a single set of
provenances is returned. Otherwise, if all provenances are not identical, the provenance
items per cache_dir are returned.
See also
----------
`?cohorts.Cohort.summarize_provenance_per_cache` which is used to summarize provenance
for each existing cache_dir.
|
[
"Utility",
"function",
"to",
"summarize",
"provenance",
"files",
"for",
"cached",
"items",
"used",
"by",
"a",
"Cohort",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1483-L1523
|
train
|
hammerlab/cohorts
|
cohorts/cohort.py
|
Cohort.summarize_data_sources
|
def summarize_data_sources(self):
"""Utility function to summarize data source status for this Cohort, useful for confirming
the state of data used for an analysis
Returns
----------
Dictionary with summary of data sources
Currently contains
- dataframe_hash: hash of the dataframe (see `?cohorts.Cohort.summarize_dataframe`)
- provenance_file_summary: summary of provenance file contents (see `?cohorts.Cohort.summarize_provenance`)
"""
provenance_file_summary = self.summarize_provenance()
dataframe_hash = self.summarize_dataframe()
results = {
"provenance_file_summary": provenance_file_summary,
"dataframe_hash": dataframe_hash
}
return(results)
|
python
|
def summarize_data_sources(self):
"""Utility function to summarize data source status for this Cohort, useful for confirming
the state of data used for an analysis
Returns
----------
Dictionary with summary of data sources
Currently contains
- dataframe_hash: hash of the dataframe (see `?cohorts.Cohort.summarize_dataframe`)
- provenance_file_summary: summary of provenance file contents (see `?cohorts.Cohort.summarize_provenance`)
"""
provenance_file_summary = self.summarize_provenance()
dataframe_hash = self.summarize_dataframe()
results = {
"provenance_file_summary": provenance_file_summary,
"dataframe_hash": dataframe_hash
}
return(results)
|
[
"def",
"summarize_data_sources",
"(",
"self",
")",
":",
"provenance_file_summary",
"=",
"self",
".",
"summarize_provenance",
"(",
")",
"dataframe_hash",
"=",
"self",
".",
"summarize_dataframe",
"(",
")",
"results",
"=",
"{",
"\"provenance_file_summary\"",
":",
"provenance_file_summary",
",",
"\"dataframe_hash\"",
":",
"dataframe_hash",
"}",
"return",
"(",
"results",
")"
] |
Utility function to summarize data source status for this Cohort, useful for confirming
the state of data used for an analysis
Returns
----------
Dictionary with summary of data sources
Currently contains
- dataframe_hash: hash of the dataframe (see `?cohorts.Cohort.summarize_dataframe`)
- provenance_file_summary: summary of provenance file contents (see `?cohorts.Cohort.summarize_provenance`)
|
[
"Utility",
"function",
"to",
"summarize",
"data",
"source",
"status",
"for",
"this",
"Cohort",
"useful",
"for",
"confirming",
"the",
"state",
"of",
"data",
"used",
"for",
"an",
"analysis"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1525-L1543
|
train
|
hammerlab/cohorts
|
cohorts/variant_stats.py
|
strelka_somatic_variant_stats
|
def strelka_somatic_variant_stats(variant, variant_metadata):
"""Parse out the variant calling statistics for a given variant from a Strelka VCF
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of sample to variant calling statistics, corresponds to the sample columns
in a Strelka VCF
Returns
-------
SomaticVariantStats
"""
sample_info = variant_metadata["sample_info"]
# Ensure there are exactly two samples in the VCF, a tumor and normal
assert len(sample_info) == 2, "More than two samples found in the somatic VCF"
tumor_stats = _strelka_variant_stats(variant, sample_info["TUMOR"])
normal_stats = _strelka_variant_stats(variant, sample_info["NORMAL"])
return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
|
python
|
def strelka_somatic_variant_stats(variant, variant_metadata):
"""Parse out the variant calling statistics for a given variant from a Strelka VCF
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of sample to variant calling statistics, corresponds to the sample columns
in a Strelka VCF
Returns
-------
SomaticVariantStats
"""
sample_info = variant_metadata["sample_info"]
# Ensure there are exactly two samples in the VCF, a tumor and normal
assert len(sample_info) == 2, "More than two samples found in the somatic VCF"
tumor_stats = _strelka_variant_stats(variant, sample_info["TUMOR"])
normal_stats = _strelka_variant_stats(variant, sample_info["NORMAL"])
return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
|
[
"def",
"strelka_somatic_variant_stats",
"(",
"variant",
",",
"variant_metadata",
")",
":",
"sample_info",
"=",
"variant_metadata",
"[",
"\"sample_info\"",
"]",
"# Ensure there are exactly two samples in the VCF, a tumor and normal",
"assert",
"len",
"(",
"sample_info",
")",
"==",
"2",
",",
"\"More than two samples found in the somatic VCF\"",
"tumor_stats",
"=",
"_strelka_variant_stats",
"(",
"variant",
",",
"sample_info",
"[",
"\"TUMOR\"",
"]",
")",
"normal_stats",
"=",
"_strelka_variant_stats",
"(",
"variant",
",",
"sample_info",
"[",
"\"NORMAL\"",
"]",
")",
"return",
"SomaticVariantStats",
"(",
"tumor_stats",
"=",
"tumor_stats",
",",
"normal_stats",
"=",
"normal_stats",
")"
] |
Parse out the variant calling statistics for a given variant from a Strelka VCF
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of sample to variant calling statistics, corresponds to the sample columns
in a Strelka VCF
Returns
-------
SomaticVariantStats
|
[
"Parse",
"out",
"the",
"variant",
"calling",
"statistics",
"for",
"a",
"given",
"variant",
"from",
"a",
"Strelka",
"VCF"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/variant_stats.py#L24-L44
|
train
|
hammerlab/cohorts
|
cohorts/variant_stats.py
|
_strelka_variant_stats
|
def _strelka_variant_stats(variant, sample_info):
"""Parse a single sample"s variant calling statistics based on Strelka VCF output
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of Strelka-specific variant calling fields
Returns
-------
VariantStats
"""
if variant.is_deletion or variant.is_insertion:
# ref: https://sites.google.com/site/strelkasomaticvariantcaller/home/somatic-variant-output
ref_depth = int(sample_info['TAR'][0]) # number of reads supporting ref allele (non-deletion)
alt_depth = int(sample_info['TIR'][0]) # number of reads supporting alt allele (deletion)
depth = ref_depth + alt_depth
else:
# Retrieve the Tier 1 counts from Strelka
ref_depth = int(sample_info[variant.ref+"U"][0])
alt_depth = int(sample_info[variant.alt+"U"][0])
depth = alt_depth + ref_depth
if depth > 0:
vaf = float(alt_depth) / depth
else:
# unclear how to define vaf if no reads support variant
# up to user to interpret this (hopefully filtered out in QC settings)
vaf = None
return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf)
|
python
|
def _strelka_variant_stats(variant, sample_info):
"""Parse a single sample"s variant calling statistics based on Strelka VCF output
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of Strelka-specific variant calling fields
Returns
-------
VariantStats
"""
if variant.is_deletion or variant.is_insertion:
# ref: https://sites.google.com/site/strelkasomaticvariantcaller/home/somatic-variant-output
ref_depth = int(sample_info['TAR'][0]) # number of reads supporting ref allele (non-deletion)
alt_depth = int(sample_info['TIR'][0]) # number of reads supporting alt allele (deletion)
depth = ref_depth + alt_depth
else:
# Retrieve the Tier 1 counts from Strelka
ref_depth = int(sample_info[variant.ref+"U"][0])
alt_depth = int(sample_info[variant.alt+"U"][0])
depth = alt_depth + ref_depth
if depth > 0:
vaf = float(alt_depth) / depth
else:
# unclear how to define vaf if no reads support variant
# up to user to interpret this (hopefully filtered out in QC settings)
vaf = None
return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf)
|
[
"def",
"_strelka_variant_stats",
"(",
"variant",
",",
"sample_info",
")",
":",
"if",
"variant",
".",
"is_deletion",
"or",
"variant",
".",
"is_insertion",
":",
"# ref: https://sites.google.com/site/strelkasomaticvariantcaller/home/somatic-variant-output",
"ref_depth",
"=",
"int",
"(",
"sample_info",
"[",
"'TAR'",
"]",
"[",
"0",
"]",
")",
"# number of reads supporting ref allele (non-deletion)",
"alt_depth",
"=",
"int",
"(",
"sample_info",
"[",
"'TIR'",
"]",
"[",
"0",
"]",
")",
"# number of reads supporting alt allele (deletion)",
"depth",
"=",
"ref_depth",
"+",
"alt_depth",
"else",
":",
"# Retrieve the Tier 1 counts from Strelka",
"ref_depth",
"=",
"int",
"(",
"sample_info",
"[",
"variant",
".",
"ref",
"+",
"\"U\"",
"]",
"[",
"0",
"]",
")",
"alt_depth",
"=",
"int",
"(",
"sample_info",
"[",
"variant",
".",
"alt",
"+",
"\"U\"",
"]",
"[",
"0",
"]",
")",
"depth",
"=",
"alt_depth",
"+",
"ref_depth",
"if",
"depth",
">",
"0",
":",
"vaf",
"=",
"float",
"(",
"alt_depth",
")",
"/",
"depth",
"else",
":",
"# unclear how to define vaf if no reads support variant",
"# up to user to interpret this (hopefully filtered out in QC settings)",
"vaf",
"=",
"None",
"return",
"VariantStats",
"(",
"depth",
"=",
"depth",
",",
"alt_depth",
"=",
"alt_depth",
",",
"variant_allele_frequency",
"=",
"vaf",
")"
] |
Parse a single sample"s variant calling statistics based on Strelka VCF output
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of Strelka-specific variant calling fields
Returns
-------
VariantStats
|
[
"Parse",
"a",
"single",
"sample",
"s",
"variant",
"calling",
"statistics",
"based",
"on",
"Strelka",
"VCF",
"output"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/variant_stats.py#L46-L77
|
train
|
hammerlab/cohorts
|
cohorts/variant_stats.py
|
mutect_somatic_variant_stats
|
def mutect_somatic_variant_stats(variant, variant_metadata):
"""Parse out the variant calling statistics for a given variant from a Mutect VCF
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of sample to variant calling statistics, corresponds to the sample columns
in a Mutect VCF
Returns
-------
SomaticVariantStats
"""
sample_info = variant_metadata["sample_info"]
# Ensure there are exactly two samples in the VCF, a tumor and normal
assert len(sample_info) == 2, "More than two samples found in the somatic VCF"
# Find the sample with the genotype field set to variant in the VCF
tumor_sample_infos = [info for info in sample_info.values() if info["GT"] == "0/1"]
# Ensure there is only one such sample
assert len(tumor_sample_infos) == 1, "More than one tumor sample found in the VCF file"
tumor_sample_info = tumor_sample_infos[0]
normal_sample_info = [info for info in sample_info.values() if info["GT"] != "0/1"][0]
tumor_stats = _mutect_variant_stats(variant, tumor_sample_info)
normal_stats = _mutect_variant_stats(variant, normal_sample_info)
return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
|
python
|
def mutect_somatic_variant_stats(variant, variant_metadata):
"""Parse out the variant calling statistics for a given variant from a Mutect VCF
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of sample to variant calling statistics, corresponds to the sample columns
in a Mutect VCF
Returns
-------
SomaticVariantStats
"""
sample_info = variant_metadata["sample_info"]
# Ensure there are exactly two samples in the VCF, a tumor and normal
assert len(sample_info) == 2, "More than two samples found in the somatic VCF"
# Find the sample with the genotype field set to variant in the VCF
tumor_sample_infos = [info for info in sample_info.values() if info["GT"] == "0/1"]
# Ensure there is only one such sample
assert len(tumor_sample_infos) == 1, "More than one tumor sample found in the VCF file"
tumor_sample_info = tumor_sample_infos[0]
normal_sample_info = [info for info in sample_info.values() if info["GT"] != "0/1"][0]
tumor_stats = _mutect_variant_stats(variant, tumor_sample_info)
normal_stats = _mutect_variant_stats(variant, normal_sample_info)
return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
|
[
"def",
"mutect_somatic_variant_stats",
"(",
"variant",
",",
"variant_metadata",
")",
":",
"sample_info",
"=",
"variant_metadata",
"[",
"\"sample_info\"",
"]",
"# Ensure there are exactly two samples in the VCF, a tumor and normal",
"assert",
"len",
"(",
"sample_info",
")",
"==",
"2",
",",
"\"More than two samples found in the somatic VCF\"",
"# Find the sample with the genotype field set to variant in the VCF",
"tumor_sample_infos",
"=",
"[",
"info",
"for",
"info",
"in",
"sample_info",
".",
"values",
"(",
")",
"if",
"info",
"[",
"\"GT\"",
"]",
"==",
"\"0/1\"",
"]",
"# Ensure there is only one such sample",
"assert",
"len",
"(",
"tumor_sample_infos",
")",
"==",
"1",
",",
"\"More than one tumor sample found in the VCF file\"",
"tumor_sample_info",
"=",
"tumor_sample_infos",
"[",
"0",
"]",
"normal_sample_info",
"=",
"[",
"info",
"for",
"info",
"in",
"sample_info",
".",
"values",
"(",
")",
"if",
"info",
"[",
"\"GT\"",
"]",
"!=",
"\"0/1\"",
"]",
"[",
"0",
"]",
"tumor_stats",
"=",
"_mutect_variant_stats",
"(",
"variant",
",",
"tumor_sample_info",
")",
"normal_stats",
"=",
"_mutect_variant_stats",
"(",
"variant",
",",
"normal_sample_info",
")",
"return",
"SomaticVariantStats",
"(",
"tumor_stats",
"=",
"tumor_stats",
",",
"normal_stats",
"=",
"normal_stats",
")"
] |
Parse out the variant calling statistics for a given variant from a Mutect VCF
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of sample to variant calling statistics, corresponds to the sample columns
in a Mutect VCF
Returns
-------
SomaticVariantStats
|
[
"Parse",
"out",
"the",
"variant",
"calling",
"statistics",
"for",
"a",
"given",
"variant",
"from",
"a",
"Mutect",
"VCF"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/variant_stats.py#L79-L109
|
train
|
hammerlab/cohorts
|
cohorts/variant_stats.py
|
_mutect_variant_stats
|
def _mutect_variant_stats(variant, sample_info):
"""Parse a single sample"s variant calling statistics based on Mutect"s (v1) VCF output
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of Mutect-specific variant calling fields
Returns
-------
VariantStats
"""
# Parse out the AD (or allele depth field), which is an array of [REF_DEPTH, ALT_DEPTH]
ref_depth, alt_depth = sample_info["AD"]
depth = int(ref_depth) + int(alt_depth)
vaf = float(alt_depth) / depth
return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf)
|
python
|
def _mutect_variant_stats(variant, sample_info):
"""Parse a single sample"s variant calling statistics based on Mutect"s (v1) VCF output
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of Mutect-specific variant calling fields
Returns
-------
VariantStats
"""
# Parse out the AD (or allele depth field), which is an array of [REF_DEPTH, ALT_DEPTH]
ref_depth, alt_depth = sample_info["AD"]
depth = int(ref_depth) + int(alt_depth)
vaf = float(alt_depth) / depth
return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf)
|
[
"def",
"_mutect_variant_stats",
"(",
"variant",
",",
"sample_info",
")",
":",
"# Parse out the AD (or allele depth field), which is an array of [REF_DEPTH, ALT_DEPTH]",
"ref_depth",
",",
"alt_depth",
"=",
"sample_info",
"[",
"\"AD\"",
"]",
"depth",
"=",
"int",
"(",
"ref_depth",
")",
"+",
"int",
"(",
"alt_depth",
")",
"vaf",
"=",
"float",
"(",
"alt_depth",
")",
"/",
"depth",
"return",
"VariantStats",
"(",
"depth",
"=",
"depth",
",",
"alt_depth",
"=",
"alt_depth",
",",
"variant_allele_frequency",
"=",
"vaf",
")"
] |
Parse a single sample"s variant calling statistics based on Mutect"s (v1) VCF output
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of Mutect-specific variant calling fields
Returns
-------
VariantStats
|
[
"Parse",
"a",
"single",
"sample",
"s",
"variant",
"calling",
"statistics",
"based",
"on",
"Mutect",
"s",
"(",
"v1",
")",
"VCF",
"output"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/variant_stats.py#L111-L130
|
train
|
hammerlab/cohorts
|
cohorts/variant_stats.py
|
maf_somatic_variant_stats
|
def maf_somatic_variant_stats(variant, variant_metadata):
"""
Parse out the variant calling statistics for a given variant from a MAF file
Assumes the MAF format described here: https://www.biostars.org/p/161298/#161777
Parameters
----------
variant : varcode.Variant
variant_metadata : dict
Dictionary of metadata for this variant
Returns
-------
SomaticVariantStats
"""
tumor_stats = None
normal_stats = None
if "t_ref_count" in variant_metadata:
tumor_stats = _maf_variant_stats(variant, variant_metadata, prefix="t")
if "n_ref_count" in variant_metadata:
normal_stats = _maf_variant_stats(variant, variant_metadata, prefix="n")
return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
|
python
|
def maf_somatic_variant_stats(variant, variant_metadata):
"""
Parse out the variant calling statistics for a given variant from a MAF file
Assumes the MAF format described here: https://www.biostars.org/p/161298/#161777
Parameters
----------
variant : varcode.Variant
variant_metadata : dict
Dictionary of metadata for this variant
Returns
-------
SomaticVariantStats
"""
tumor_stats = None
normal_stats = None
if "t_ref_count" in variant_metadata:
tumor_stats = _maf_variant_stats(variant, variant_metadata, prefix="t")
if "n_ref_count" in variant_metadata:
normal_stats = _maf_variant_stats(variant, variant_metadata, prefix="n")
return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
|
[
"def",
"maf_somatic_variant_stats",
"(",
"variant",
",",
"variant_metadata",
")",
":",
"tumor_stats",
"=",
"None",
"normal_stats",
"=",
"None",
"if",
"\"t_ref_count\"",
"in",
"variant_metadata",
":",
"tumor_stats",
"=",
"_maf_variant_stats",
"(",
"variant",
",",
"variant_metadata",
",",
"prefix",
"=",
"\"t\"",
")",
"if",
"\"n_ref_count\"",
"in",
"variant_metadata",
":",
"normal_stats",
"=",
"_maf_variant_stats",
"(",
"variant",
",",
"variant_metadata",
",",
"prefix",
"=",
"\"n\"",
")",
"return",
"SomaticVariantStats",
"(",
"tumor_stats",
"=",
"tumor_stats",
",",
"normal_stats",
"=",
"normal_stats",
")"
] |
Parse out the variant calling statistics for a given variant from a MAF file
Assumes the MAF format described here: https://www.biostars.org/p/161298/#161777
Parameters
----------
variant : varcode.Variant
variant_metadata : dict
Dictionary of metadata for this variant
Returns
-------
SomaticVariantStats
|
[
"Parse",
"out",
"the",
"variant",
"calling",
"statistics",
"for",
"a",
"given",
"variant",
"from",
"a",
"MAF",
"file"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/variant_stats.py#L139-L161
|
train
|
hammerlab/cohorts
|
cohorts/variant_stats.py
|
_vcf_is_strelka
|
def _vcf_is_strelka(variant_file, variant_metadata):
"""Return True if variant_file given is in strelka format
"""
if "strelka" in variant_file.lower():
return True
elif "NORMAL" in variant_metadata["sample_info"].keys():
return True
else:
vcf_reader = vcf.Reader(open(variant_file, "r"))
try:
vcf_type = vcf_reader.metadata["content"]
except KeyError:
vcf_type = ""
if "strelka" in vcf_type.lower():
return True
return False
|
python
|
def _vcf_is_strelka(variant_file, variant_metadata):
"""Return True if variant_file given is in strelka format
"""
if "strelka" in variant_file.lower():
return True
elif "NORMAL" in variant_metadata["sample_info"].keys():
return True
else:
vcf_reader = vcf.Reader(open(variant_file, "r"))
try:
vcf_type = vcf_reader.metadata["content"]
except KeyError:
vcf_type = ""
if "strelka" in vcf_type.lower():
return True
return False
|
[
"def",
"_vcf_is_strelka",
"(",
"variant_file",
",",
"variant_metadata",
")",
":",
"if",
"\"strelka\"",
"in",
"variant_file",
".",
"lower",
"(",
")",
":",
"return",
"True",
"elif",
"\"NORMAL\"",
"in",
"variant_metadata",
"[",
"\"sample_info\"",
"]",
".",
"keys",
"(",
")",
":",
"return",
"True",
"else",
":",
"vcf_reader",
"=",
"vcf",
".",
"Reader",
"(",
"open",
"(",
"variant_file",
",",
"\"r\"",
")",
")",
"try",
":",
"vcf_type",
"=",
"vcf_reader",
".",
"metadata",
"[",
"\"content\"",
"]",
"except",
"KeyError",
":",
"vcf_type",
"=",
"\"\"",
"if",
"\"strelka\"",
"in",
"vcf_type",
".",
"lower",
"(",
")",
":",
"return",
"True",
"return",
"False"
] |
Return True if variant_file given is in strelka format
|
[
"Return",
"True",
"if",
"variant_file",
"given",
"is",
"in",
"strelka",
"format"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/variant_stats.py#L163-L178
|
train
|
hammerlab/cohorts
|
cohorts/variant_stats.py
|
variant_stats_from_variant
|
def variant_stats_from_variant(variant,
metadata,
merge_fn=(lambda all_stats: \
max(all_stats, key=(lambda stats: stats.tumor_stats.depth)))):
"""Parse the variant calling stats from a variant called from multiple variant files. The stats are merged
based on `merge_fn`
Parameters
----------
variant : varcode.Variant
metadata : dict
Dictionary of variant file to variant calling metadata from that file
merge_fn : function
Function from list of SomaticVariantStats to single SomaticVariantStats.
This is used if a variant is called by multiple callers or appears in multiple VCFs.
By default, this uses the data from the caller that had a higher tumor depth.
Returns
-------
SomaticVariantStats
"""
all_stats = []
for (variant_file, variant_metadata) in metadata.items():
if _vcf_is_maf(variant_file=variant_file):
stats = maf_somatic_variant_stats(variant, variant_metadata)
elif _vcf_is_strelka(variant_file=variant_file,
variant_metadata=variant_metadata):
stats = strelka_somatic_variant_stats(variant, variant_metadata)
elif _vcf_is_mutect(variant_file=variant_file,
variant_metadata=variant_metadata):
stats = mutect_somatic_variant_stats(variant, variant_metadata)
else:
raise ValueError("Cannot parse sample fields, variant file {} is from an unsupported caller.".format(variant_file))
all_stats.append(stats)
return merge_fn(all_stats)
|
python
|
def variant_stats_from_variant(variant,
metadata,
merge_fn=(lambda all_stats: \
max(all_stats, key=(lambda stats: stats.tumor_stats.depth)))):
"""Parse the variant calling stats from a variant called from multiple variant files. The stats are merged
based on `merge_fn`
Parameters
----------
variant : varcode.Variant
metadata : dict
Dictionary of variant file to variant calling metadata from that file
merge_fn : function
Function from list of SomaticVariantStats to single SomaticVariantStats.
This is used if a variant is called by multiple callers or appears in multiple VCFs.
By default, this uses the data from the caller that had a higher tumor depth.
Returns
-------
SomaticVariantStats
"""
all_stats = []
for (variant_file, variant_metadata) in metadata.items():
if _vcf_is_maf(variant_file=variant_file):
stats = maf_somatic_variant_stats(variant, variant_metadata)
elif _vcf_is_strelka(variant_file=variant_file,
variant_metadata=variant_metadata):
stats = strelka_somatic_variant_stats(variant, variant_metadata)
elif _vcf_is_mutect(variant_file=variant_file,
variant_metadata=variant_metadata):
stats = mutect_somatic_variant_stats(variant, variant_metadata)
else:
raise ValueError("Cannot parse sample fields, variant file {} is from an unsupported caller.".format(variant_file))
all_stats.append(stats)
return merge_fn(all_stats)
|
[
"def",
"variant_stats_from_variant",
"(",
"variant",
",",
"metadata",
",",
"merge_fn",
"=",
"(",
"lambda",
"all_stats",
":",
"max",
"(",
"all_stats",
",",
"key",
"=",
"(",
"lambda",
"stats",
":",
"stats",
".",
"tumor_stats",
".",
"depth",
")",
")",
")",
")",
":",
"all_stats",
"=",
"[",
"]",
"for",
"(",
"variant_file",
",",
"variant_metadata",
")",
"in",
"metadata",
".",
"items",
"(",
")",
":",
"if",
"_vcf_is_maf",
"(",
"variant_file",
"=",
"variant_file",
")",
":",
"stats",
"=",
"maf_somatic_variant_stats",
"(",
"variant",
",",
"variant_metadata",
")",
"elif",
"_vcf_is_strelka",
"(",
"variant_file",
"=",
"variant_file",
",",
"variant_metadata",
"=",
"variant_metadata",
")",
":",
"stats",
"=",
"strelka_somatic_variant_stats",
"(",
"variant",
",",
"variant_metadata",
")",
"elif",
"_vcf_is_mutect",
"(",
"variant_file",
"=",
"variant_file",
",",
"variant_metadata",
"=",
"variant_metadata",
")",
":",
"stats",
"=",
"mutect_somatic_variant_stats",
"(",
"variant",
",",
"variant_metadata",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Cannot parse sample fields, variant file {} is from an unsupported caller.\"",
".",
"format",
"(",
"variant_file",
")",
")",
"all_stats",
".",
"append",
"(",
"stats",
")",
"return",
"merge_fn",
"(",
"all_stats",
")"
] |
Parse the variant calling stats from a variant called from multiple variant files. The stats are merged
based on `merge_fn`
Parameters
----------
variant : varcode.Variant
metadata : dict
Dictionary of variant file to variant calling metadata from that file
merge_fn : function
Function from list of SomaticVariantStats to single SomaticVariantStats.
This is used if a variant is called by multiple callers or appears in multiple VCFs.
By default, this uses the data from the caller that had a higher tumor depth.
Returns
-------
SomaticVariantStats
|
[
"Parse",
"the",
"variant",
"calling",
"stats",
"from",
"a",
"variant",
"called",
"from",
"multiple",
"variant",
"files",
".",
"The",
"stats",
"are",
"merged",
"based",
"on",
"merge_fn"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/variant_stats.py#L202-L236
|
train
|
alvarogzp/telegram-bot-framework
|
bot/multithreading/worker/pool/workers/limited_lifespan.py
|
LimitedLifespanQueueWorker._get_and_execute
|
def _get_and_execute(self):
"""
:return: True if it should continue running, False if it should end its execution.
"""
try:
work = self.queue.get(timeout=self.max_seconds_idle)
except queue.Empty:
# max_seconds_idle has been exhausted, exiting
self.end_notify()
return False
else:
self._work(work)
self.queue.task_done()
return True
|
python
|
def _get_and_execute(self):
"""
:return: True if it should continue running, False if it should end its execution.
"""
try:
work = self.queue.get(timeout=self.max_seconds_idle)
except queue.Empty:
# max_seconds_idle has been exhausted, exiting
self.end_notify()
return False
else:
self._work(work)
self.queue.task_done()
return True
|
[
"def",
"_get_and_execute",
"(",
"self",
")",
":",
"try",
":",
"work",
"=",
"self",
".",
"queue",
".",
"get",
"(",
"timeout",
"=",
"self",
".",
"max_seconds_idle",
")",
"except",
"queue",
".",
"Empty",
":",
"# max_seconds_idle has been exhausted, exiting",
"self",
".",
"end_notify",
"(",
")",
"return",
"False",
"else",
":",
"self",
".",
"_work",
"(",
"work",
")",
"self",
".",
"queue",
".",
"task_done",
"(",
")",
"return",
"True"
] |
:return: True if it should continue running, False if it should end its execution.
|
[
":",
"return",
":",
"True",
"if",
"it",
"should",
"continue",
"running",
"False",
"if",
"it",
"should",
"end",
"its",
"execution",
"."
] |
7b597a415c1901901c677976cb13100fc3083107
|
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/multithreading/worker/pool/workers/limited_lifespan.py#L21-L34
|
train
|
alvarogzp/telegram-bot-framework
|
bot/action/standard/info/formatter/chat.py
|
ChatInfoFormatter.format
|
def format(self, full_info: bool = False):
"""
:param full_info: If True, adds more info about the chat. Please, note that this additional info requires
to make up to THREE synchronous api calls.
"""
chat = self.api_object
if full_info:
self.__format_full(chat)
else:
self.__format_simple(chat)
|
python
|
def format(self, full_info: bool = False):
"""
:param full_info: If True, adds more info about the chat. Please, note that this additional info requires
to make up to THREE synchronous api calls.
"""
chat = self.api_object
if full_info:
self.__format_full(chat)
else:
self.__format_simple(chat)
|
[
"def",
"format",
"(",
"self",
",",
"full_info",
":",
"bool",
"=",
"False",
")",
":",
"chat",
"=",
"self",
".",
"api_object",
"if",
"full_info",
":",
"self",
".",
"__format_full",
"(",
"chat",
")",
"else",
":",
"self",
".",
"__format_simple",
"(",
"chat",
")"
] |
:param full_info: If True, adds more info about the chat. Please, note that this additional info requires
to make up to THREE synchronous api calls.
|
[
":",
"param",
"full_info",
":",
"If",
"True",
"adds",
"more",
"info",
"about",
"the",
"chat",
".",
"Please",
"note",
"that",
"this",
"additional",
"info",
"requires",
"to",
"make",
"up",
"to",
"THREE",
"synchronous",
"api",
"calls",
"."
] |
7b597a415c1901901c677976cb13100fc3083107
|
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/info/formatter/chat.py#L20-L29
|
train
|
alvarogzp/telegram-bot-framework
|
bot/action/standard/chatsettings/__init__.py
|
ChatSettings.list
|
def list(self):
"""
:rtype: list(setting_name, value, default_value, is_set, is_supported)
"""
settings = []
for setting in _SETTINGS:
value = self.get(setting)
is_set = self.is_set(setting)
default_value = self.get_default_value(setting)
is_supported = True
settings.append((setting, value, default_value, is_set, is_supported))
for setting in sorted(self.settings_state.list_keys()):
if not self.is_supported(setting):
value = self.get(setting)
default_value = None
is_set = True
is_supported = False
settings.append((setting, value, default_value, is_set, is_supported))
return settings
|
python
|
def list(self):
"""
:rtype: list(setting_name, value, default_value, is_set, is_supported)
"""
settings = []
for setting in _SETTINGS:
value = self.get(setting)
is_set = self.is_set(setting)
default_value = self.get_default_value(setting)
is_supported = True
settings.append((setting, value, default_value, is_set, is_supported))
for setting in sorted(self.settings_state.list_keys()):
if not self.is_supported(setting):
value = self.get(setting)
default_value = None
is_set = True
is_supported = False
settings.append((setting, value, default_value, is_set, is_supported))
return settings
|
[
"def",
"list",
"(",
"self",
")",
":",
"settings",
"=",
"[",
"]",
"for",
"setting",
"in",
"_SETTINGS",
":",
"value",
"=",
"self",
".",
"get",
"(",
"setting",
")",
"is_set",
"=",
"self",
".",
"is_set",
"(",
"setting",
")",
"default_value",
"=",
"self",
".",
"get_default_value",
"(",
"setting",
")",
"is_supported",
"=",
"True",
"settings",
".",
"append",
"(",
"(",
"setting",
",",
"value",
",",
"default_value",
",",
"is_set",
",",
"is_supported",
")",
")",
"for",
"setting",
"in",
"sorted",
"(",
"self",
".",
"settings_state",
".",
"list_keys",
"(",
")",
")",
":",
"if",
"not",
"self",
".",
"is_supported",
"(",
"setting",
")",
":",
"value",
"=",
"self",
".",
"get",
"(",
"setting",
")",
"default_value",
"=",
"None",
"is_set",
"=",
"True",
"is_supported",
"=",
"False",
"settings",
".",
"append",
"(",
"(",
"setting",
",",
"value",
",",
"default_value",
",",
"is_set",
",",
"is_supported",
")",
")",
"return",
"settings"
] |
:rtype: list(setting_name, value, default_value, is_set, is_supported)
|
[
":",
"rtype",
":",
"list",
"(",
"setting_name",
"value",
"default_value",
"is_set",
"is_supported",
")"
] |
7b597a415c1901901c677976cb13100fc3083107
|
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/chatsettings/__init__.py#L42-L60
|
train
|
hammerlab/cohorts
|
cohorts/variant_filters.py
|
load_ensembl_coverage
|
def load_ensembl_coverage(cohort, coverage_path, min_tumor_depth, min_normal_depth=0,
pageant_dir_fn=None):
"""
Load in Pageant CoverageDepth results with Ensembl loci.
coverage_path is a path to Pageant CoverageDepth output directory, with
one subdirectory per patient and a `cdf.csv` file inside each patient subdir.
If min_normal_depth is 0, calculate tumor coverage. Otherwise, calculate
join tumor/normal coverage.
pageant_dir_fn is a function that takes in a Patient and produces a Pageant
dir name.
Last tested with Pageant CoverageDepth version 1ca9ed2.
"""
# Function to grab the pageant file name using the Patient
if pageant_dir_fn is None:
pageant_dir_fn = lambda patient: patient.id
columns_both = [
"depth1", # Normal
"depth2", # Tumor
"onBP1",
"onBP2",
"numOnLoci",
"fracBPOn1",
"fracBPOn2",
"fracLociOn",
"offBP1",
"offBP2",
"numOffLoci",
"fracBPOff1",
"fracBPOff2",
"fracLociOff",
]
columns_single = [
"depth",
"onBP",
"numOnLoci",
"fracBPOn",
"fracLociOn",
"offBP",
"numOffLoci",
"fracBPOff",
"fracLociOff"
]
if min_normal_depth < 0:
raise ValueError("min_normal_depth must be >= 0")
use_tumor_only = (min_normal_depth == 0)
columns = columns_single if use_tumor_only else columns_both
ensembl_loci_dfs = []
for patient in cohort:
patient_ensembl_loci_df = pd.read_csv(
path.join(coverage_path, pageant_dir_fn(patient), "cdf.csv"),
names=columns,
header=1)
# pylint: disable=no-member
# pylint gets confused by read_csv
if use_tumor_only:
depth_mask = (patient_ensembl_loci_df.depth == min_tumor_depth)
else:
depth_mask = (
(patient_ensembl_loci_df.depth1 == min_normal_depth) &
(patient_ensembl_loci_df.depth2 == min_tumor_depth))
patient_ensembl_loci_df = patient_ensembl_loci_df[depth_mask]
assert len(patient_ensembl_loci_df) == 1, (
"Incorrect number of tumor={}, normal={} depth loci results: {} for patient {}".format(
min_tumor_depth, min_normal_depth, len(patient_ensembl_loci_df), patient))
patient_ensembl_loci_df["patient_id"] = patient.id
ensembl_loci_dfs.append(patient_ensembl_loci_df)
ensembl_loci_df = pd.concat(ensembl_loci_dfs)
ensembl_loci_df["MB"] = ensembl_loci_df.numOnLoci / 1000000.0
return ensembl_loci_df[["patient_id", "numOnLoci", "MB"]]
|
python
|
def load_ensembl_coverage(cohort, coverage_path, min_tumor_depth, min_normal_depth=0,
pageant_dir_fn=None):
"""
Load in Pageant CoverageDepth results with Ensembl loci.
coverage_path is a path to Pageant CoverageDepth output directory, with
one subdirectory per patient and a `cdf.csv` file inside each patient subdir.
If min_normal_depth is 0, calculate tumor coverage. Otherwise, calculate
join tumor/normal coverage.
pageant_dir_fn is a function that takes in a Patient and produces a Pageant
dir name.
Last tested with Pageant CoverageDepth version 1ca9ed2.
"""
# Function to grab the pageant file name using the Patient
if pageant_dir_fn is None:
pageant_dir_fn = lambda patient: patient.id
columns_both = [
"depth1", # Normal
"depth2", # Tumor
"onBP1",
"onBP2",
"numOnLoci",
"fracBPOn1",
"fracBPOn2",
"fracLociOn",
"offBP1",
"offBP2",
"numOffLoci",
"fracBPOff1",
"fracBPOff2",
"fracLociOff",
]
columns_single = [
"depth",
"onBP",
"numOnLoci",
"fracBPOn",
"fracLociOn",
"offBP",
"numOffLoci",
"fracBPOff",
"fracLociOff"
]
if min_normal_depth < 0:
raise ValueError("min_normal_depth must be >= 0")
use_tumor_only = (min_normal_depth == 0)
columns = columns_single if use_tumor_only else columns_both
ensembl_loci_dfs = []
for patient in cohort:
patient_ensembl_loci_df = pd.read_csv(
path.join(coverage_path, pageant_dir_fn(patient), "cdf.csv"),
names=columns,
header=1)
# pylint: disable=no-member
# pylint gets confused by read_csv
if use_tumor_only:
depth_mask = (patient_ensembl_loci_df.depth == min_tumor_depth)
else:
depth_mask = (
(patient_ensembl_loci_df.depth1 == min_normal_depth) &
(patient_ensembl_loci_df.depth2 == min_tumor_depth))
patient_ensembl_loci_df = patient_ensembl_loci_df[depth_mask]
assert len(patient_ensembl_loci_df) == 1, (
"Incorrect number of tumor={}, normal={} depth loci results: {} for patient {}".format(
min_tumor_depth, min_normal_depth, len(patient_ensembl_loci_df), patient))
patient_ensembl_loci_df["patient_id"] = patient.id
ensembl_loci_dfs.append(patient_ensembl_loci_df)
ensembl_loci_df = pd.concat(ensembl_loci_dfs)
ensembl_loci_df["MB"] = ensembl_loci_df.numOnLoci / 1000000.0
return ensembl_loci_df[["patient_id", "numOnLoci", "MB"]]
|
[
"def",
"load_ensembl_coverage",
"(",
"cohort",
",",
"coverage_path",
",",
"min_tumor_depth",
",",
"min_normal_depth",
"=",
"0",
",",
"pageant_dir_fn",
"=",
"None",
")",
":",
"# Function to grab the pageant file name using the Patient",
"if",
"pageant_dir_fn",
"is",
"None",
":",
"pageant_dir_fn",
"=",
"lambda",
"patient",
":",
"patient",
".",
"id",
"columns_both",
"=",
"[",
"\"depth1\"",
",",
"# Normal",
"\"depth2\"",
",",
"# Tumor",
"\"onBP1\"",
",",
"\"onBP2\"",
",",
"\"numOnLoci\"",
",",
"\"fracBPOn1\"",
",",
"\"fracBPOn2\"",
",",
"\"fracLociOn\"",
",",
"\"offBP1\"",
",",
"\"offBP2\"",
",",
"\"numOffLoci\"",
",",
"\"fracBPOff1\"",
",",
"\"fracBPOff2\"",
",",
"\"fracLociOff\"",
",",
"]",
"columns_single",
"=",
"[",
"\"depth\"",
",",
"\"onBP\"",
",",
"\"numOnLoci\"",
",",
"\"fracBPOn\"",
",",
"\"fracLociOn\"",
",",
"\"offBP\"",
",",
"\"numOffLoci\"",
",",
"\"fracBPOff\"",
",",
"\"fracLociOff\"",
"]",
"if",
"min_normal_depth",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"min_normal_depth must be >= 0\"",
")",
"use_tumor_only",
"=",
"(",
"min_normal_depth",
"==",
"0",
")",
"columns",
"=",
"columns_single",
"if",
"use_tumor_only",
"else",
"columns_both",
"ensembl_loci_dfs",
"=",
"[",
"]",
"for",
"patient",
"in",
"cohort",
":",
"patient_ensembl_loci_df",
"=",
"pd",
".",
"read_csv",
"(",
"path",
".",
"join",
"(",
"coverage_path",
",",
"pageant_dir_fn",
"(",
"patient",
")",
",",
"\"cdf.csv\"",
")",
",",
"names",
"=",
"columns",
",",
"header",
"=",
"1",
")",
"# pylint: disable=no-member",
"# pylint gets confused by read_csv",
"if",
"use_tumor_only",
":",
"depth_mask",
"=",
"(",
"patient_ensembl_loci_df",
".",
"depth",
"==",
"min_tumor_depth",
")",
"else",
":",
"depth_mask",
"=",
"(",
"(",
"patient_ensembl_loci_df",
".",
"depth1",
"==",
"min_normal_depth",
")",
"&",
"(",
"patient_ensembl_loci_df",
".",
"depth2",
"==",
"min_tumor_depth",
")",
")",
"patient_ensembl_loci_df",
"=",
"patient_ensembl_loci_df",
"[",
"depth_mask",
"]",
"assert",
"len",
"(",
"patient_ensembl_loci_df",
")",
"==",
"1",
",",
"(",
"\"Incorrect number of tumor={}, normal={} depth loci results: {} for patient {}\"",
".",
"format",
"(",
"min_tumor_depth",
",",
"min_normal_depth",
",",
"len",
"(",
"patient_ensembl_loci_df",
")",
",",
"patient",
")",
")",
"patient_ensembl_loci_df",
"[",
"\"patient_id\"",
"]",
"=",
"patient",
".",
"id",
"ensembl_loci_dfs",
".",
"append",
"(",
"patient_ensembl_loci_df",
")",
"ensembl_loci_df",
"=",
"pd",
".",
"concat",
"(",
"ensembl_loci_dfs",
")",
"ensembl_loci_df",
"[",
"\"MB\"",
"]",
"=",
"ensembl_loci_df",
".",
"numOnLoci",
"/",
"1000000.0",
"return",
"ensembl_loci_df",
"[",
"[",
"\"patient_id\"",
",",
"\"numOnLoci\"",
",",
"\"MB\"",
"]",
"]"
] |
Load in Pageant CoverageDepth results with Ensembl loci.
coverage_path is a path to Pageant CoverageDepth output directory, with
one subdirectory per patient and a `cdf.csv` file inside each patient subdir.
If min_normal_depth is 0, calculate tumor coverage. Otherwise, calculate
join tumor/normal coverage.
pageant_dir_fn is a function that takes in a Patient and produces a Pageant
dir name.
Last tested with Pageant CoverageDepth version 1ca9ed2.
|
[
"Load",
"in",
"Pageant",
"CoverageDepth",
"results",
"with",
"Ensembl",
"loci",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/variant_filters.py#L87-L160
|
train
|
hammerlab/cohorts
|
cohorts/plot.py
|
vertical_percent
|
def vertical_percent(plot, percent=0.1):
"""
Using the size of the y axis, return a fraction of that size.
"""
plot_bottom, plot_top = plot.get_ylim()
return percent * (plot_top - plot_bottom)
|
python
|
def vertical_percent(plot, percent=0.1):
"""
Using the size of the y axis, return a fraction of that size.
"""
plot_bottom, plot_top = plot.get_ylim()
return percent * (plot_top - plot_bottom)
|
[
"def",
"vertical_percent",
"(",
"plot",
",",
"percent",
"=",
"0.1",
")",
":",
"plot_bottom",
",",
"plot_top",
"=",
"plot",
".",
"get_ylim",
"(",
")",
"return",
"percent",
"*",
"(",
"plot_top",
"-",
"plot_bottom",
")"
] |
Using the size of the y axis, return a fraction of that size.
|
[
"Using",
"the",
"size",
"of",
"the",
"y",
"axis",
"return",
"a",
"fraction",
"of",
"that",
"size",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/plot.py#L24-L29
|
train
|
hammerlab/cohorts
|
cohorts/plot.py
|
hide_ticks
|
def hide_ticks(plot, min_tick_value=None, max_tick_value=None):
"""Hide tick values that are outside of [min_tick_value, max_tick_value]"""
for tick, tick_value in zip(plot.get_yticklabels(), plot.get_yticks()):
tick_label = as_numeric(tick_value)
if tick_label:
if (min_tick_value is not None and tick_label < min_tick_value or
max_tick_value is not None and tick_label > max_tick_value):
tick.set_visible(False)
|
python
|
def hide_ticks(plot, min_tick_value=None, max_tick_value=None):
"""Hide tick values that are outside of [min_tick_value, max_tick_value]"""
for tick, tick_value in zip(plot.get_yticklabels(), plot.get_yticks()):
tick_label = as_numeric(tick_value)
if tick_label:
if (min_tick_value is not None and tick_label < min_tick_value or
max_tick_value is not None and tick_label > max_tick_value):
tick.set_visible(False)
|
[
"def",
"hide_ticks",
"(",
"plot",
",",
"min_tick_value",
"=",
"None",
",",
"max_tick_value",
"=",
"None",
")",
":",
"for",
"tick",
",",
"tick_value",
"in",
"zip",
"(",
"plot",
".",
"get_yticklabels",
"(",
")",
",",
"plot",
".",
"get_yticks",
"(",
")",
")",
":",
"tick_label",
"=",
"as_numeric",
"(",
"tick_value",
")",
"if",
"tick_label",
":",
"if",
"(",
"min_tick_value",
"is",
"not",
"None",
"and",
"tick_label",
"<",
"min_tick_value",
"or",
"max_tick_value",
"is",
"not",
"None",
"and",
"tick_label",
">",
"max_tick_value",
")",
":",
"tick",
".",
"set_visible",
"(",
"False",
")"
] |
Hide tick values that are outside of [min_tick_value, max_tick_value]
|
[
"Hide",
"tick",
"values",
"that",
"are",
"outside",
"of",
"[",
"min_tick_value",
"max_tick_value",
"]"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/plot.py#L37-L44
|
train
|
hammerlab/cohorts
|
cohorts/plot.py
|
add_significance_indicator
|
def add_significance_indicator(plot, col_a=0, col_b=1, significant=False):
"""
Add a p-value significance indicator.
"""
plot_bottom, plot_top = plot.get_ylim()
# Give the plot a little room for the significance indicator
line_height = vertical_percent(plot, 0.1)
# Add some extra spacing below the indicator
plot_top = plot_top + line_height
# Add some extra spacing above the indicator
plot.set_ylim(top=plot_top + line_height * 2)
color = "black"
line_top = plot_top + line_height
plot.plot([col_a, col_a, col_b, col_b], [plot_top, line_top, line_top, plot_top], lw=1.5, color=color)
indicator = "*" if significant else "ns"
plot.text((col_a + col_b) * 0.5, line_top, indicator, ha="center", va="bottom", color=color)
|
python
|
def add_significance_indicator(plot, col_a=0, col_b=1, significant=False):
"""
Add a p-value significance indicator.
"""
plot_bottom, plot_top = plot.get_ylim()
# Give the plot a little room for the significance indicator
line_height = vertical_percent(plot, 0.1)
# Add some extra spacing below the indicator
plot_top = plot_top + line_height
# Add some extra spacing above the indicator
plot.set_ylim(top=plot_top + line_height * 2)
color = "black"
line_top = plot_top + line_height
plot.plot([col_a, col_a, col_b, col_b], [plot_top, line_top, line_top, plot_top], lw=1.5, color=color)
indicator = "*" if significant else "ns"
plot.text((col_a + col_b) * 0.5, line_top, indicator, ha="center", va="bottom", color=color)
|
[
"def",
"add_significance_indicator",
"(",
"plot",
",",
"col_a",
"=",
"0",
",",
"col_b",
"=",
"1",
",",
"significant",
"=",
"False",
")",
":",
"plot_bottom",
",",
"plot_top",
"=",
"plot",
".",
"get_ylim",
"(",
")",
"# Give the plot a little room for the significance indicator",
"line_height",
"=",
"vertical_percent",
"(",
"plot",
",",
"0.1",
")",
"# Add some extra spacing below the indicator",
"plot_top",
"=",
"plot_top",
"+",
"line_height",
"# Add some extra spacing above the indicator",
"plot",
".",
"set_ylim",
"(",
"top",
"=",
"plot_top",
"+",
"line_height",
"*",
"2",
")",
"color",
"=",
"\"black\"",
"line_top",
"=",
"plot_top",
"+",
"line_height",
"plot",
".",
"plot",
"(",
"[",
"col_a",
",",
"col_a",
",",
"col_b",
",",
"col_b",
"]",
",",
"[",
"plot_top",
",",
"line_top",
",",
"line_top",
",",
"plot_top",
"]",
",",
"lw",
"=",
"1.5",
",",
"color",
"=",
"color",
")",
"indicator",
"=",
"\"*\"",
"if",
"significant",
"else",
"\"ns\"",
"plot",
".",
"text",
"(",
"(",
"col_a",
"+",
"col_b",
")",
"*",
"0.5",
",",
"line_top",
",",
"indicator",
",",
"ha",
"=",
"\"center\"",
",",
"va",
"=",
"\"bottom\"",
",",
"color",
"=",
"color",
")"
] |
Add a p-value significance indicator.
|
[
"Add",
"a",
"p",
"-",
"value",
"significance",
"indicator",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/plot.py#L55-L70
|
train
|
hammerlab/cohorts
|
cohorts/plot.py
|
stripboxplot
|
def stripboxplot(x, y, data, ax=None, significant=None, **kwargs):
"""
Overlay a stripplot on top of a boxplot.
"""
ax = sb.boxplot(
x=x,
y=y,
data=data,
ax=ax,
fliersize=0,
**kwargs
)
plot = sb.stripplot(
x=x,
y=y,
data=data,
ax=ax,
jitter=kwargs.pop("jitter", 0.05),
color=kwargs.pop("color", "0.3"),
**kwargs
)
if data[y].min() >= 0:
hide_negative_y_ticks(plot)
if significant is not None:
add_significance_indicator(plot=plot, significant=significant)
return plot
|
python
|
def stripboxplot(x, y, data, ax=None, significant=None, **kwargs):
"""
Overlay a stripplot on top of a boxplot.
"""
ax = sb.boxplot(
x=x,
y=y,
data=data,
ax=ax,
fliersize=0,
**kwargs
)
plot = sb.stripplot(
x=x,
y=y,
data=data,
ax=ax,
jitter=kwargs.pop("jitter", 0.05),
color=kwargs.pop("color", "0.3"),
**kwargs
)
if data[y].min() >= 0:
hide_negative_y_ticks(plot)
if significant is not None:
add_significance_indicator(plot=plot, significant=significant)
return plot
|
[
"def",
"stripboxplot",
"(",
"x",
",",
"y",
",",
"data",
",",
"ax",
"=",
"None",
",",
"significant",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"ax",
"=",
"sb",
".",
"boxplot",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"data",
"=",
"data",
",",
"ax",
"=",
"ax",
",",
"fliersize",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
"plot",
"=",
"sb",
".",
"stripplot",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"data",
"=",
"data",
",",
"ax",
"=",
"ax",
",",
"jitter",
"=",
"kwargs",
".",
"pop",
"(",
"\"jitter\"",
",",
"0.05",
")",
",",
"color",
"=",
"kwargs",
".",
"pop",
"(",
"\"color\"",
",",
"\"0.3\"",
")",
",",
"*",
"*",
"kwargs",
")",
"if",
"data",
"[",
"y",
"]",
".",
"min",
"(",
")",
">=",
"0",
":",
"hide_negative_y_ticks",
"(",
"plot",
")",
"if",
"significant",
"is",
"not",
"None",
":",
"add_significance_indicator",
"(",
"plot",
"=",
"plot",
",",
"significant",
"=",
"significant",
")",
"return",
"plot"
] |
Overlay a stripplot on top of a boxplot.
|
[
"Overlay",
"a",
"stripplot",
"on",
"top",
"of",
"a",
"boxplot",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/plot.py#L72-L100
|
train
|
hammerlab/cohorts
|
cohorts/plot.py
|
fishers_exact_plot
|
def fishers_exact_plot(data, condition1, condition2, ax=None,
condition1_value=None,
alternative="two-sided", **kwargs):
"""
Perform a Fisher's exact test to compare to binary columns
Parameters
----------
data: Pandas dataframe
Dataframe to retrieve information from
condition1: str
First binary column to compare (and used for test sidedness)
condition2: str
Second binary column to compare
ax : Axes, default None
Axes to plot on
condition1_value:
If `condition1` is not a binary column, split on =/!= to condition1_value
alternative:
Specify the sidedness of the test: "two-sided", "less"
or "greater"
"""
plot = sb.barplot(
x=condition1,
y=condition2,
ax=ax,
data=data,
**kwargs
)
plot.set_ylabel("Percent %s" % condition2)
condition1_mask = get_condition_mask(data, condition1, condition1_value)
count_table = pd.crosstab(data[condition1], data[condition2])
print(count_table)
oddsratio, p_value = fisher_exact(count_table, alternative=alternative)
add_significance_indicator(plot=plot, significant=p_value <= 0.05)
only_percentage_ticks(plot)
if alternative != "two-sided":
raise ValueError("We need to better understand the one-sided Fisher's Exact test")
sided_str = "two-sided"
print("Fisher's Exact Test: OR: {}, p-value={} ({})".format(oddsratio, p_value, sided_str))
return FishersExactResults(oddsratio=oddsratio,
p_value=p_value,
sided_str=sided_str,
with_condition1_series=data[condition1_mask][condition2],
without_condition1_series=data[~condition1_mask][condition2],
plot=plot)
|
python
|
def fishers_exact_plot(data, condition1, condition2, ax=None,
condition1_value=None,
alternative="two-sided", **kwargs):
"""
Perform a Fisher's exact test to compare to binary columns
Parameters
----------
data: Pandas dataframe
Dataframe to retrieve information from
condition1: str
First binary column to compare (and used for test sidedness)
condition2: str
Second binary column to compare
ax : Axes, default None
Axes to plot on
condition1_value:
If `condition1` is not a binary column, split on =/!= to condition1_value
alternative:
Specify the sidedness of the test: "two-sided", "less"
or "greater"
"""
plot = sb.barplot(
x=condition1,
y=condition2,
ax=ax,
data=data,
**kwargs
)
plot.set_ylabel("Percent %s" % condition2)
condition1_mask = get_condition_mask(data, condition1, condition1_value)
count_table = pd.crosstab(data[condition1], data[condition2])
print(count_table)
oddsratio, p_value = fisher_exact(count_table, alternative=alternative)
add_significance_indicator(plot=plot, significant=p_value <= 0.05)
only_percentage_ticks(plot)
if alternative != "two-sided":
raise ValueError("We need to better understand the one-sided Fisher's Exact test")
sided_str = "two-sided"
print("Fisher's Exact Test: OR: {}, p-value={} ({})".format(oddsratio, p_value, sided_str))
return FishersExactResults(oddsratio=oddsratio,
p_value=p_value,
sided_str=sided_str,
with_condition1_series=data[condition1_mask][condition2],
without_condition1_series=data[~condition1_mask][condition2],
plot=plot)
|
[
"def",
"fishers_exact_plot",
"(",
"data",
",",
"condition1",
",",
"condition2",
",",
"ax",
"=",
"None",
",",
"condition1_value",
"=",
"None",
",",
"alternative",
"=",
"\"two-sided\"",
",",
"*",
"*",
"kwargs",
")",
":",
"plot",
"=",
"sb",
".",
"barplot",
"(",
"x",
"=",
"condition1",
",",
"y",
"=",
"condition2",
",",
"ax",
"=",
"ax",
",",
"data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")",
"plot",
".",
"set_ylabel",
"(",
"\"Percent %s\"",
"%",
"condition2",
")",
"condition1_mask",
"=",
"get_condition_mask",
"(",
"data",
",",
"condition1",
",",
"condition1_value",
")",
"count_table",
"=",
"pd",
".",
"crosstab",
"(",
"data",
"[",
"condition1",
"]",
",",
"data",
"[",
"condition2",
"]",
")",
"print",
"(",
"count_table",
")",
"oddsratio",
",",
"p_value",
"=",
"fisher_exact",
"(",
"count_table",
",",
"alternative",
"=",
"alternative",
")",
"add_significance_indicator",
"(",
"plot",
"=",
"plot",
",",
"significant",
"=",
"p_value",
"<=",
"0.05",
")",
"only_percentage_ticks",
"(",
"plot",
")",
"if",
"alternative",
"!=",
"\"two-sided\"",
":",
"raise",
"ValueError",
"(",
"\"We need to better understand the one-sided Fisher's Exact test\"",
")",
"sided_str",
"=",
"\"two-sided\"",
"print",
"(",
"\"Fisher's Exact Test: OR: {}, p-value={} ({})\"",
".",
"format",
"(",
"oddsratio",
",",
"p_value",
",",
"sided_str",
")",
")",
"return",
"FishersExactResults",
"(",
"oddsratio",
"=",
"oddsratio",
",",
"p_value",
"=",
"p_value",
",",
"sided_str",
"=",
"sided_str",
",",
"with_condition1_series",
"=",
"data",
"[",
"condition1_mask",
"]",
"[",
"condition2",
"]",
",",
"without_condition1_series",
"=",
"data",
"[",
"~",
"condition1_mask",
"]",
"[",
"condition2",
"]",
",",
"plot",
"=",
"plot",
")"
] |
Perform a Fisher's exact test to compare to binary columns
Parameters
----------
data: Pandas dataframe
Dataframe to retrieve information from
condition1: str
First binary column to compare (and used for test sidedness)
condition2: str
Second binary column to compare
ax : Axes, default None
Axes to plot on
condition1_value:
If `condition1` is not a binary column, split on =/!= to condition1_value
alternative:
Specify the sidedness of the test: "two-sided", "less"
or "greater"
|
[
"Perform",
"a",
"Fisher",
"s",
"exact",
"test",
"to",
"compare",
"to",
"binary",
"columns"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/plot.py#L130-L182
|
train
|
hammerlab/cohorts
|
cohorts/plot.py
|
mann_whitney_plot
|
def mann_whitney_plot(data,
condition,
distribution,
ax=None,
condition_value=None,
alternative="two-sided",
skip_plot=False,
**kwargs):
"""
Create a box plot comparing a condition and perform a
Mann Whitney test to compare the distribution in condition A v B
Parameters
----------
data: Pandas dataframe
Dataframe to retrieve information from
condition: str
Column to use as the splitting criteria
distribution: str
Column to use as the Y-axis or distribution in the test
ax : Axes, default None
Axes to plot on
condition_value:
If `condition` is not a binary column, split on =/!= to condition_value
alternative:
Specify the sidedness of the Mann-Whitney test: "two-sided", "less"
or "greater"
skip_plot:
Calculate the test statistic and p-value, but don't plot.
"""
condition_mask = get_condition_mask(data, condition, condition_value)
U, p_value = mannwhitneyu(
data[condition_mask][distribution],
data[~condition_mask][distribution],
alternative=alternative
)
plot = None
if not skip_plot:
plot = stripboxplot(
x=condition,
y=distribution,
data=data,
ax=ax,
significant=p_value <= 0.05,
**kwargs
)
sided_str = sided_str_from_alternative(alternative, condition)
print("Mann-Whitney test: U={}, p-value={} ({})".format(U, p_value, sided_str))
return MannWhitneyResults(U=U,
p_value=p_value,
sided_str=sided_str,
with_condition_series=data[condition_mask][distribution],
without_condition_series=data[~condition_mask][distribution],
plot=plot)
|
python
|
def mann_whitney_plot(data,
condition,
distribution,
ax=None,
condition_value=None,
alternative="two-sided",
skip_plot=False,
**kwargs):
"""
Create a box plot comparing a condition and perform a
Mann Whitney test to compare the distribution in condition A v B
Parameters
----------
data: Pandas dataframe
Dataframe to retrieve information from
condition: str
Column to use as the splitting criteria
distribution: str
Column to use as the Y-axis or distribution in the test
ax : Axes, default None
Axes to plot on
condition_value:
If `condition` is not a binary column, split on =/!= to condition_value
alternative:
Specify the sidedness of the Mann-Whitney test: "two-sided", "less"
or "greater"
skip_plot:
Calculate the test statistic and p-value, but don't plot.
"""
condition_mask = get_condition_mask(data, condition, condition_value)
U, p_value = mannwhitneyu(
data[condition_mask][distribution],
data[~condition_mask][distribution],
alternative=alternative
)
plot = None
if not skip_plot:
plot = stripboxplot(
x=condition,
y=distribution,
data=data,
ax=ax,
significant=p_value <= 0.05,
**kwargs
)
sided_str = sided_str_from_alternative(alternative, condition)
print("Mann-Whitney test: U={}, p-value={} ({})".format(U, p_value, sided_str))
return MannWhitneyResults(U=U,
p_value=p_value,
sided_str=sided_str,
with_condition_series=data[condition_mask][distribution],
without_condition_series=data[~condition_mask][distribution],
plot=plot)
|
[
"def",
"mann_whitney_plot",
"(",
"data",
",",
"condition",
",",
"distribution",
",",
"ax",
"=",
"None",
",",
"condition_value",
"=",
"None",
",",
"alternative",
"=",
"\"two-sided\"",
",",
"skip_plot",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"condition_mask",
"=",
"get_condition_mask",
"(",
"data",
",",
"condition",
",",
"condition_value",
")",
"U",
",",
"p_value",
"=",
"mannwhitneyu",
"(",
"data",
"[",
"condition_mask",
"]",
"[",
"distribution",
"]",
",",
"data",
"[",
"~",
"condition_mask",
"]",
"[",
"distribution",
"]",
",",
"alternative",
"=",
"alternative",
")",
"plot",
"=",
"None",
"if",
"not",
"skip_plot",
":",
"plot",
"=",
"stripboxplot",
"(",
"x",
"=",
"condition",
",",
"y",
"=",
"distribution",
",",
"data",
"=",
"data",
",",
"ax",
"=",
"ax",
",",
"significant",
"=",
"p_value",
"<=",
"0.05",
",",
"*",
"*",
"kwargs",
")",
"sided_str",
"=",
"sided_str_from_alternative",
"(",
"alternative",
",",
"condition",
")",
"print",
"(",
"\"Mann-Whitney test: U={}, p-value={} ({})\"",
".",
"format",
"(",
"U",
",",
"p_value",
",",
"sided_str",
")",
")",
"return",
"MannWhitneyResults",
"(",
"U",
"=",
"U",
",",
"p_value",
"=",
"p_value",
",",
"sided_str",
"=",
"sided_str",
",",
"with_condition_series",
"=",
"data",
"[",
"condition_mask",
"]",
"[",
"distribution",
"]",
",",
"without_condition_series",
"=",
"data",
"[",
"~",
"condition_mask",
"]",
"[",
"distribution",
"]",
",",
"plot",
"=",
"plot",
")"
] |
Create a box plot comparing a condition and perform a
Mann Whitney test to compare the distribution in condition A v B
Parameters
----------
data: Pandas dataframe
Dataframe to retrieve information from
condition: str
Column to use as the splitting criteria
distribution: str
Column to use as the Y-axis or distribution in the test
ax : Axes, default None
Axes to plot on
condition_value:
If `condition` is not a binary column, split on =/!= to condition_value
alternative:
Specify the sidedness of the Mann-Whitney test: "two-sided", "less"
or "greater"
skip_plot:
Calculate the test statistic and p-value, but don't plot.
|
[
"Create",
"a",
"box",
"plot",
"comparing",
"a",
"condition",
"and",
"perform",
"a",
"Mann",
"Whitney",
"test",
"to",
"compare",
"the",
"distribution",
"in",
"condition",
"A",
"v",
"B"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/plot.py#L192-L253
|
train
|
hammerlab/cohorts
|
cohorts/plot.py
|
roc_curve_plot
|
def roc_curve_plot(data, value_column, outcome_column, bootstrap_samples=100, ax=None):
"""Create a ROC curve and compute the bootstrap AUC for the given variable and outcome
Parameters
----------
data : Pandas dataframe
Dataframe to retrieve information from
value_column : str
Column to retrieve the values from
outcome_column : str
Column to use as the outcome variable
bootstrap_samples : int, optional
Number of bootstrap samples to use to compute the AUC
ax : Axes, default None
Axes to plot on
Returns
-------
(mean_bootstrap_auc, roc_plot) : (float, matplotlib plot)
Mean AUC for the given number of bootstrap samples and the plot
"""
scores = bootstrap_auc(df=data,
col=value_column,
pred_col=outcome_column,
n_bootstrap=bootstrap_samples)
mean_bootstrap_auc = scores.mean()
print("{}, Bootstrap (samples = {}) AUC:{}, std={}".format(
value_column, bootstrap_samples, mean_bootstrap_auc, scores.std()))
outcome = data[outcome_column].astype(int)
values = data[value_column]
fpr, tpr, thresholds = roc_curve(outcome, values)
if ax is None:
ax = plt.gca()
roc_plot = ax.plot(fpr, tpr, lw=1, label=value_column)
ax.set_xlim([-0.05, 1.05])
ax.set_ylim([-0.05, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.legend(loc=2, borderaxespad=0.)
ax.set_title('{} ROC Curve (n={})'.format(value_column, len(values)))
return (mean_bootstrap_auc, roc_plot)
|
python
|
def roc_curve_plot(data, value_column, outcome_column, bootstrap_samples=100, ax=None):
"""Create a ROC curve and compute the bootstrap AUC for the given variable and outcome
Parameters
----------
data : Pandas dataframe
Dataframe to retrieve information from
value_column : str
Column to retrieve the values from
outcome_column : str
Column to use as the outcome variable
bootstrap_samples : int, optional
Number of bootstrap samples to use to compute the AUC
ax : Axes, default None
Axes to plot on
Returns
-------
(mean_bootstrap_auc, roc_plot) : (float, matplotlib plot)
Mean AUC for the given number of bootstrap samples and the plot
"""
scores = bootstrap_auc(df=data,
col=value_column,
pred_col=outcome_column,
n_bootstrap=bootstrap_samples)
mean_bootstrap_auc = scores.mean()
print("{}, Bootstrap (samples = {}) AUC:{}, std={}".format(
value_column, bootstrap_samples, mean_bootstrap_auc, scores.std()))
outcome = data[outcome_column].astype(int)
values = data[value_column]
fpr, tpr, thresholds = roc_curve(outcome, values)
if ax is None:
ax = plt.gca()
roc_plot = ax.plot(fpr, tpr, lw=1, label=value_column)
ax.set_xlim([-0.05, 1.05])
ax.set_ylim([-0.05, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.legend(loc=2, borderaxespad=0.)
ax.set_title('{} ROC Curve (n={})'.format(value_column, len(values)))
return (mean_bootstrap_auc, roc_plot)
|
[
"def",
"roc_curve_plot",
"(",
"data",
",",
"value_column",
",",
"outcome_column",
",",
"bootstrap_samples",
"=",
"100",
",",
"ax",
"=",
"None",
")",
":",
"scores",
"=",
"bootstrap_auc",
"(",
"df",
"=",
"data",
",",
"col",
"=",
"value_column",
",",
"pred_col",
"=",
"outcome_column",
",",
"n_bootstrap",
"=",
"bootstrap_samples",
")",
"mean_bootstrap_auc",
"=",
"scores",
".",
"mean",
"(",
")",
"print",
"(",
"\"{}, Bootstrap (samples = {}) AUC:{}, std={}\"",
".",
"format",
"(",
"value_column",
",",
"bootstrap_samples",
",",
"mean_bootstrap_auc",
",",
"scores",
".",
"std",
"(",
")",
")",
")",
"outcome",
"=",
"data",
"[",
"outcome_column",
"]",
".",
"astype",
"(",
"int",
")",
"values",
"=",
"data",
"[",
"value_column",
"]",
"fpr",
",",
"tpr",
",",
"thresholds",
"=",
"roc_curve",
"(",
"outcome",
",",
"values",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"roc_plot",
"=",
"ax",
".",
"plot",
"(",
"fpr",
",",
"tpr",
",",
"lw",
"=",
"1",
",",
"label",
"=",
"value_column",
")",
"ax",
".",
"set_xlim",
"(",
"[",
"-",
"0.05",
",",
"1.05",
"]",
")",
"ax",
".",
"set_ylim",
"(",
"[",
"-",
"0.05",
",",
"1.05",
"]",
")",
"ax",
".",
"set_xlabel",
"(",
"'False Positive Rate'",
")",
"ax",
".",
"set_ylabel",
"(",
"'True Positive Rate'",
")",
"ax",
".",
"legend",
"(",
"loc",
"=",
"2",
",",
"borderaxespad",
"=",
"0.",
")",
"ax",
".",
"set_title",
"(",
"'{} ROC Curve (n={})'",
".",
"format",
"(",
"value_column",
",",
"len",
"(",
"values",
")",
")",
")",
"return",
"(",
"mean_bootstrap_auc",
",",
"roc_plot",
")"
] |
Create a ROC curve and compute the bootstrap AUC for the given variable and outcome
Parameters
----------
data : Pandas dataframe
Dataframe to retrieve information from
value_column : str
Column to retrieve the values from
outcome_column : str
Column to use as the outcome variable
bootstrap_samples : int, optional
Number of bootstrap samples to use to compute the AUC
ax : Axes, default None
Axes to plot on
Returns
-------
(mean_bootstrap_auc, roc_plot) : (float, matplotlib plot)
Mean AUC for the given number of bootstrap samples and the plot
|
[
"Create",
"a",
"ROC",
"curve",
"and",
"compute",
"the",
"bootstrap",
"AUC",
"for",
"the",
"given",
"variable",
"and",
"outcome"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/plot.py#L263-L308
|
train
|
hammerlab/cohorts
|
cohorts/utils.py
|
get_cache_dir
|
def get_cache_dir(cache_dir, cache_root_dir=None, *args, **kwargs):
"""
Return full cache_dir, according to following logic:
- if cache_dir is a full path (per path.isabs), return that value
- if not and if cache_root_dir is not None, join two paths
- otherwise, log warnings and return None
Separately, if args or kwargs are given, format cache_dir using kwargs
"""
cache_dir = cache_dir.format(*args, **kwargs)
if path.isabs(cache_dir):
if cache_root_dir is not None:
logger.warning('cache_dir ({}) is a full path; ignoring cache_root_dir'.format(cache_dir))
return cache_dir
if cache_root_dir is not None:
return path.join(cache_root_dir, cache_dir)
else:
logger.warning("cache dir is not full path & cache_root_dir not given. Caching may not work as expected!")
return None
|
python
|
def get_cache_dir(cache_dir, cache_root_dir=None, *args, **kwargs):
"""
Return full cache_dir, according to following logic:
- if cache_dir is a full path (per path.isabs), return that value
- if not and if cache_root_dir is not None, join two paths
- otherwise, log warnings and return None
Separately, if args or kwargs are given, format cache_dir using kwargs
"""
cache_dir = cache_dir.format(*args, **kwargs)
if path.isabs(cache_dir):
if cache_root_dir is not None:
logger.warning('cache_dir ({}) is a full path; ignoring cache_root_dir'.format(cache_dir))
return cache_dir
if cache_root_dir is not None:
return path.join(cache_root_dir, cache_dir)
else:
logger.warning("cache dir is not full path & cache_root_dir not given. Caching may not work as expected!")
return None
|
[
"def",
"get_cache_dir",
"(",
"cache_dir",
",",
"cache_root_dir",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cache_dir",
"=",
"cache_dir",
".",
"format",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"path",
".",
"isabs",
"(",
"cache_dir",
")",
":",
"if",
"cache_root_dir",
"is",
"not",
"None",
":",
"logger",
".",
"warning",
"(",
"'cache_dir ({}) is a full path; ignoring cache_root_dir'",
".",
"format",
"(",
"cache_dir",
")",
")",
"return",
"cache_dir",
"if",
"cache_root_dir",
"is",
"not",
"None",
":",
"return",
"path",
".",
"join",
"(",
"cache_root_dir",
",",
"cache_dir",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"cache dir is not full path & cache_root_dir not given. Caching may not work as expected!\"",
")",
"return",
"None"
] |
Return full cache_dir, according to following logic:
- if cache_dir is a full path (per path.isabs), return that value
- if not and if cache_root_dir is not None, join two paths
- otherwise, log warnings and return None
Separately, if args or kwargs are given, format cache_dir using kwargs
|
[
"Return",
"full",
"cache_dir",
"according",
"to",
"following",
"logic",
":",
"-",
"if",
"cache_dir",
"is",
"a",
"full",
"path",
"(",
"per",
"path",
".",
"isabs",
")",
"return",
"that",
"value",
"-",
"if",
"not",
"and",
"if",
"cache_root_dir",
"is",
"not",
"None",
"join",
"two",
"paths",
"-",
"otherwise",
"log",
"warnings",
"and",
"return",
"None",
"Separately",
"if",
"args",
"or",
"kwargs",
"are",
"given",
"format",
"cache_dir",
"using",
"kwargs"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/utils.py#L24-L41
|
train
|
hammerlab/cohorts
|
cohorts/utils.py
|
_strip_column_name
|
def _strip_column_name(col_name, keep_paren_contents=True):
"""
Utility script applying several regexs to a string.
Intended to be used by `strip_column_names`.
This function will:
1. replace informative punctuation components with text
2. (optionally) remove text within parentheses
3. replace remaining punctuation/whitespace with _
4. strip leading/trailing punctuation/whitespace
Parameters
----------
col_name (str): input character string
keep_paren_contents (logical):
controls behavior of within-paren elements of text
- if True, (the default) all text within parens retained
- if False, text within parens will be removed from the field name
Returns
--------
modified string for new field name
Examples
--------
> print([_strip_column_name(col) for col in ['PD-L1','PD L1','PD L1_']])
"""
# start with input
new_col_name = col_name
# replace meaningful punctuation with text equivalents
# surround each with whitespace to enforce consistent use of _
punctuation_to_text = {
'<=': 'le',
'>=': 'ge',
'=<': 'le',
'=>': 'ge',
'<': 'lt',
'>': 'gt',
'#': 'num'
}
for punctuation, punctuation_text in punctuation_to_text.items():
new_col_name = new_col_name.replace(punctuation, punctuation_text)
# remove contents within ()
if not(keep_paren_contents):
new_col_name = re.sub('\([^)]*\)', '', new_col_name)
# replace remaining punctuation/whitespace with _
punct_pattern = '[\W_]+'
punct_replacement = '_'
new_col_name = re.sub(punct_pattern, punct_replacement, new_col_name)
# remove leading/trailing _ if it exists (if last char was punctuation)
new_col_name = new_col_name.strip("_")
# TODO: check for empty string
# return lower-case version of column name
return new_col_name.lower()
|
python
|
def _strip_column_name(col_name, keep_paren_contents=True):
"""
Utility script applying several regexs to a string.
Intended to be used by `strip_column_names`.
This function will:
1. replace informative punctuation components with text
2. (optionally) remove text within parentheses
3. replace remaining punctuation/whitespace with _
4. strip leading/trailing punctuation/whitespace
Parameters
----------
col_name (str): input character string
keep_paren_contents (logical):
controls behavior of within-paren elements of text
- if True, (the default) all text within parens retained
- if False, text within parens will be removed from the field name
Returns
--------
modified string for new field name
Examples
--------
> print([_strip_column_name(col) for col in ['PD-L1','PD L1','PD L1_']])
"""
# start with input
new_col_name = col_name
# replace meaningful punctuation with text equivalents
# surround each with whitespace to enforce consistent use of _
punctuation_to_text = {
'<=': 'le',
'>=': 'ge',
'=<': 'le',
'=>': 'ge',
'<': 'lt',
'>': 'gt',
'#': 'num'
}
for punctuation, punctuation_text in punctuation_to_text.items():
new_col_name = new_col_name.replace(punctuation, punctuation_text)
# remove contents within ()
if not(keep_paren_contents):
new_col_name = re.sub('\([^)]*\)', '', new_col_name)
# replace remaining punctuation/whitespace with _
punct_pattern = '[\W_]+'
punct_replacement = '_'
new_col_name = re.sub(punct_pattern, punct_replacement, new_col_name)
# remove leading/trailing _ if it exists (if last char was punctuation)
new_col_name = new_col_name.strip("_")
# TODO: check for empty string
# return lower-case version of column name
return new_col_name.lower()
|
[
"def",
"_strip_column_name",
"(",
"col_name",
",",
"keep_paren_contents",
"=",
"True",
")",
":",
"# start with input",
"new_col_name",
"=",
"col_name",
"# replace meaningful punctuation with text equivalents",
"# surround each with whitespace to enforce consistent use of _",
"punctuation_to_text",
"=",
"{",
"'<='",
":",
"'le'",
",",
"'>='",
":",
"'ge'",
",",
"'=<'",
":",
"'le'",
",",
"'=>'",
":",
"'ge'",
",",
"'<'",
":",
"'lt'",
",",
"'>'",
":",
"'gt'",
",",
"'#'",
":",
"'num'",
"}",
"for",
"punctuation",
",",
"punctuation_text",
"in",
"punctuation_to_text",
".",
"items",
"(",
")",
":",
"new_col_name",
"=",
"new_col_name",
".",
"replace",
"(",
"punctuation",
",",
"punctuation_text",
")",
"# remove contents within ()",
"if",
"not",
"(",
"keep_paren_contents",
")",
":",
"new_col_name",
"=",
"re",
".",
"sub",
"(",
"'\\([^)]*\\)'",
",",
"''",
",",
"new_col_name",
")",
"# replace remaining punctuation/whitespace with _",
"punct_pattern",
"=",
"'[\\W_]+'",
"punct_replacement",
"=",
"'_'",
"new_col_name",
"=",
"re",
".",
"sub",
"(",
"punct_pattern",
",",
"punct_replacement",
",",
"new_col_name",
")",
"# remove leading/trailing _ if it exists (if last char was punctuation)",
"new_col_name",
"=",
"new_col_name",
".",
"strip",
"(",
"\"_\"",
")",
"# TODO: check for empty string",
"# return lower-case version of column name",
"return",
"new_col_name",
".",
"lower",
"(",
")"
] |
Utility script applying several regexs to a string.
Intended to be used by `strip_column_names`.
This function will:
1. replace informative punctuation components with text
2. (optionally) remove text within parentheses
3. replace remaining punctuation/whitespace with _
4. strip leading/trailing punctuation/whitespace
Parameters
----------
col_name (str): input character string
keep_paren_contents (logical):
controls behavior of within-paren elements of text
- if True, (the default) all text within parens retained
- if False, text within parens will be removed from the field name
Returns
--------
modified string for new field name
Examples
--------
> print([_strip_column_name(col) for col in ['PD-L1','PD L1','PD L1_']])
|
[
"Utility",
"script",
"applying",
"several",
"regexs",
"to",
"a",
"string",
".",
"Intended",
"to",
"be",
"used",
"by",
"strip_column_names",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/utils.py#L84-L142
|
train
|
hammerlab/cohorts
|
cohorts/utils.py
|
strip_column_names
|
def strip_column_names(cols, keep_paren_contents=True):
"""
Utility script for renaming pandas columns to patsy-friendly names.
Revised names have been:
- stripped of all punctuation and whitespace (converted to text or `_`)
- converted to lower case
Takes a list of column names, returns a dict mapping
names to revised names.
If there are any concerns with the conversion, this will
print a warning & return original column names.
Parameters
----------
cols (list): list of strings containing column names
keep_paren_contents (logical):
controls behavior of within-paren elements of text
- if True, (the default) all text within parens retained
- if False, text within parens will be removed from the field name
Returns
-------
dict mapping col_names -> new_col_names
Example
-------
> df = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']),
'PD L1 (value)': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']),
'PD L1 (>1)': pd.Series([0., 1., 1., 0.], index=['a', 'b', 'c', 'd']),
}
> df = pd.DataFrame(df)
> df = df.rename(columns = strip_column_names(df.columns))
## observe, by comparison
> df2 = df.rename(columns = strip_column_names(df.columns,
keep_paren_contents=False))
"""
# strip/replace punctuation
new_cols = [
_strip_column_name(col, keep_paren_contents=keep_paren_contents)
for col in cols]
if len(new_cols) != len(set(new_cols)):
warn_str = 'Warning: strip_column_names (if run) would introduce duplicate names.'
warn_str += ' Reverting column names to the original.'
warnings.warn(warn_str, Warning)
print('Warning: strip_column_names would introduce duplicate names. Please fix & try again.')
return dict(zip(cols, cols))
return dict(zip(cols, new_cols))
|
python
|
def strip_column_names(cols, keep_paren_contents=True):
"""
Utility script for renaming pandas columns to patsy-friendly names.
Revised names have been:
- stripped of all punctuation and whitespace (converted to text or `_`)
- converted to lower case
Takes a list of column names, returns a dict mapping
names to revised names.
If there are any concerns with the conversion, this will
print a warning & return original column names.
Parameters
----------
cols (list): list of strings containing column names
keep_paren_contents (logical):
controls behavior of within-paren elements of text
- if True, (the default) all text within parens retained
- if False, text within parens will be removed from the field name
Returns
-------
dict mapping col_names -> new_col_names
Example
-------
> df = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']),
'PD L1 (value)': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']),
'PD L1 (>1)': pd.Series([0., 1., 1., 0.], index=['a', 'b', 'c', 'd']),
}
> df = pd.DataFrame(df)
> df = df.rename(columns = strip_column_names(df.columns))
## observe, by comparison
> df2 = df.rename(columns = strip_column_names(df.columns,
keep_paren_contents=False))
"""
# strip/replace punctuation
new_cols = [
_strip_column_name(col, keep_paren_contents=keep_paren_contents)
for col in cols]
if len(new_cols) != len(set(new_cols)):
warn_str = 'Warning: strip_column_names (if run) would introduce duplicate names.'
warn_str += ' Reverting column names to the original.'
warnings.warn(warn_str, Warning)
print('Warning: strip_column_names would introduce duplicate names. Please fix & try again.')
return dict(zip(cols, cols))
return dict(zip(cols, new_cols))
|
[
"def",
"strip_column_names",
"(",
"cols",
",",
"keep_paren_contents",
"=",
"True",
")",
":",
"# strip/replace punctuation",
"new_cols",
"=",
"[",
"_strip_column_name",
"(",
"col",
",",
"keep_paren_contents",
"=",
"keep_paren_contents",
")",
"for",
"col",
"in",
"cols",
"]",
"if",
"len",
"(",
"new_cols",
")",
"!=",
"len",
"(",
"set",
"(",
"new_cols",
")",
")",
":",
"warn_str",
"=",
"'Warning: strip_column_names (if run) would introduce duplicate names.'",
"warn_str",
"+=",
"' Reverting column names to the original.'",
"warnings",
".",
"warn",
"(",
"warn_str",
",",
"Warning",
")",
"print",
"(",
"'Warning: strip_column_names would introduce duplicate names. Please fix & try again.'",
")",
"return",
"dict",
"(",
"zip",
"(",
"cols",
",",
"cols",
")",
")",
"return",
"dict",
"(",
"zip",
"(",
"cols",
",",
"new_cols",
")",
")"
] |
Utility script for renaming pandas columns to patsy-friendly names.
Revised names have been:
- stripped of all punctuation and whitespace (converted to text or `_`)
- converted to lower case
Takes a list of column names, returns a dict mapping
names to revised names.
If there are any concerns with the conversion, this will
print a warning & return original column names.
Parameters
----------
cols (list): list of strings containing column names
keep_paren_contents (logical):
controls behavior of within-paren elements of text
- if True, (the default) all text within parens retained
- if False, text within parens will be removed from the field name
Returns
-------
dict mapping col_names -> new_col_names
Example
-------
> df = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']),
'PD L1 (value)': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']),
'PD L1 (>1)': pd.Series([0., 1., 1., 0.], index=['a', 'b', 'c', 'd']),
}
> df = pd.DataFrame(df)
> df = df.rename(columns = strip_column_names(df.columns))
## observe, by comparison
> df2 = df.rename(columns = strip_column_names(df.columns,
keep_paren_contents=False))
|
[
"Utility",
"script",
"for",
"renaming",
"pandas",
"columns",
"to",
"patsy",
"-",
"friendly",
"names",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/utils.py#L144-L201
|
train
|
hammerlab/cohorts
|
cohorts/utils.py
|
set_attributes
|
def set_attributes(obj, additional_data):
"""
Given an object and a dictionary, give the object new attributes from that dictionary.
Uses _strip_column_name to git rid of whitespace/uppercase/special characters.
"""
for key, value in additional_data.items():
if hasattr(obj, key):
raise ValueError("Key %s in additional_data already exists in this object" % key)
setattr(obj, _strip_column_name(key), value)
|
python
|
def set_attributes(obj, additional_data):
"""
Given an object and a dictionary, give the object new attributes from that dictionary.
Uses _strip_column_name to git rid of whitespace/uppercase/special characters.
"""
for key, value in additional_data.items():
if hasattr(obj, key):
raise ValueError("Key %s in additional_data already exists in this object" % key)
setattr(obj, _strip_column_name(key), value)
|
[
"def",
"set_attributes",
"(",
"obj",
",",
"additional_data",
")",
":",
"for",
"key",
",",
"value",
"in",
"additional_data",
".",
"items",
"(",
")",
":",
"if",
"hasattr",
"(",
"obj",
",",
"key",
")",
":",
"raise",
"ValueError",
"(",
"\"Key %s in additional_data already exists in this object\"",
"%",
"key",
")",
"setattr",
"(",
"obj",
",",
"_strip_column_name",
"(",
"key",
")",
",",
"value",
")"
] |
Given an object and a dictionary, give the object new attributes from that dictionary.
Uses _strip_column_name to git rid of whitespace/uppercase/special characters.
|
[
"Given",
"an",
"object",
"and",
"a",
"dictionary",
"give",
"the",
"object",
"new",
"attributes",
"from",
"that",
"dictionary",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/utils.py#L210-L219
|
train
|
hammerlab/cohorts
|
cohorts/utils.py
|
DataFrameHolder.return_obj
|
def return_obj(cols, df, return_cols=False):
"""Construct a DataFrameHolder and then return either that or the DataFrame."""
df_holder = DataFrameHolder(cols=cols, df=df)
return df_holder.return_self(return_cols=return_cols)
|
python
|
def return_obj(cols, df, return_cols=False):
"""Construct a DataFrameHolder and then return either that or the DataFrame."""
df_holder = DataFrameHolder(cols=cols, df=df)
return df_holder.return_self(return_cols=return_cols)
|
[
"def",
"return_obj",
"(",
"cols",
",",
"df",
",",
"return_cols",
"=",
"False",
")",
":",
"df_holder",
"=",
"DataFrameHolder",
"(",
"cols",
"=",
"cols",
",",
"df",
"=",
"df",
")",
"return",
"df_holder",
".",
"return_self",
"(",
"return_cols",
"=",
"return_cols",
")"
] |
Construct a DataFrameHolder and then return either that or the DataFrame.
|
[
"Construct",
"a",
"DataFrameHolder",
"and",
"then",
"return",
"either",
"that",
"or",
"the",
"DataFrame",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/utils.py#L54-L57
|
train
|
hammerlab/cohorts
|
cohorts/provenance.py
|
compare_provenance
|
def compare_provenance(
this_provenance, other_provenance,
left_outer_diff = "In current but not comparison",
right_outer_diff = "In comparison but not current"):
"""Utility function to compare two abritrary provenance dicts
returns number of discrepancies.
Parameters
----------
this_provenance: provenance dict (to be compared to "other_provenance")
other_provenance: comparison provenance dict
(optional)
left_outer_diff: description/prefix used when printing items in this_provenance but not in other_provenance
right_outer_diff: description/prefix used when printing items in other_provenance but not in this_provenance
Returns
-----------
Number of discrepancies (0: None)
"""
## if either this or other items is null, return 0
if (not this_provenance or not other_provenance):
return 0
this_items = set(this_provenance.items())
other_items = set(other_provenance.items())
# Two-way diff: are any modules introduced, and are any modules lost?
new_diff = this_items.difference(other_items)
old_diff = other_items.difference(this_items)
warn_str = ""
if len(new_diff) > 0:
warn_str += "%s: %s" % (
left_outer_diff,
_provenance_str(new_diff))
if len(old_diff) > 0:
warn_str += "%s: %s" % (
right_outer_diff,
_provenance_str(old_diff))
if len(warn_str) > 0:
warnings.warn(warn_str, Warning)
return(len(new_diff)+len(old_diff))
|
python
|
def compare_provenance(
this_provenance, other_provenance,
left_outer_diff = "In current but not comparison",
right_outer_diff = "In comparison but not current"):
"""Utility function to compare two abritrary provenance dicts
returns number of discrepancies.
Parameters
----------
this_provenance: provenance dict (to be compared to "other_provenance")
other_provenance: comparison provenance dict
(optional)
left_outer_diff: description/prefix used when printing items in this_provenance but not in other_provenance
right_outer_diff: description/prefix used when printing items in other_provenance but not in this_provenance
Returns
-----------
Number of discrepancies (0: None)
"""
## if either this or other items is null, return 0
if (not this_provenance or not other_provenance):
return 0
this_items = set(this_provenance.items())
other_items = set(other_provenance.items())
# Two-way diff: are any modules introduced, and are any modules lost?
new_diff = this_items.difference(other_items)
old_diff = other_items.difference(this_items)
warn_str = ""
if len(new_diff) > 0:
warn_str += "%s: %s" % (
left_outer_diff,
_provenance_str(new_diff))
if len(old_diff) > 0:
warn_str += "%s: %s" % (
right_outer_diff,
_provenance_str(old_diff))
if len(warn_str) > 0:
warnings.warn(warn_str, Warning)
return(len(new_diff)+len(old_diff))
|
[
"def",
"compare_provenance",
"(",
"this_provenance",
",",
"other_provenance",
",",
"left_outer_diff",
"=",
"\"In current but not comparison\"",
",",
"right_outer_diff",
"=",
"\"In comparison but not current\"",
")",
":",
"## if either this or other items is null, return 0",
"if",
"(",
"not",
"this_provenance",
"or",
"not",
"other_provenance",
")",
":",
"return",
"0",
"this_items",
"=",
"set",
"(",
"this_provenance",
".",
"items",
"(",
")",
")",
"other_items",
"=",
"set",
"(",
"other_provenance",
".",
"items",
"(",
")",
")",
"# Two-way diff: are any modules introduced, and are any modules lost?",
"new_diff",
"=",
"this_items",
".",
"difference",
"(",
"other_items",
")",
"old_diff",
"=",
"other_items",
".",
"difference",
"(",
"this_items",
")",
"warn_str",
"=",
"\"\"",
"if",
"len",
"(",
"new_diff",
")",
">",
"0",
":",
"warn_str",
"+=",
"\"%s: %s\"",
"%",
"(",
"left_outer_diff",
",",
"_provenance_str",
"(",
"new_diff",
")",
")",
"if",
"len",
"(",
"old_diff",
")",
">",
"0",
":",
"warn_str",
"+=",
"\"%s: %s\"",
"%",
"(",
"right_outer_diff",
",",
"_provenance_str",
"(",
"old_diff",
")",
")",
"if",
"len",
"(",
"warn_str",
")",
">",
"0",
":",
"warnings",
".",
"warn",
"(",
"warn_str",
",",
"Warning",
")",
"return",
"(",
"len",
"(",
"new_diff",
")",
"+",
"len",
"(",
"old_diff",
")",
")"
] |
Utility function to compare two abritrary provenance dicts
returns number of discrepancies.
Parameters
----------
this_provenance: provenance dict (to be compared to "other_provenance")
other_provenance: comparison provenance dict
(optional)
left_outer_diff: description/prefix used when printing items in this_provenance but not in other_provenance
right_outer_diff: description/prefix used when printing items in other_provenance but not in this_provenance
Returns
-----------
Number of discrepancies (0: None)
|
[
"Utility",
"function",
"to",
"compare",
"two",
"abritrary",
"provenance",
"dicts",
"returns",
"number",
"of",
"discrepancies",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/provenance.py#L22-L65
|
train
|
hammerlab/cohorts
|
cohorts/survival.py
|
_plot_kmf_single
|
def _plot_kmf_single(df,
condition_col,
survival_col,
censor_col,
threshold,
title,
xlabel,
ylabel,
ax,
with_condition_color,
no_condition_color,
with_condition_label,
no_condition_label,
color_map,
label_map,
color_palette,
ci_show,
print_as_title):
"""
Helper function to produce a single KM survival plot, among observations in df by groups defined by condition_col.
All inputs are required - this function is intended to be called by `plot_kmf`.
"""
# make color inputs consistent hex format
if colors.is_color_like(with_condition_color):
with_condition_color = colors.to_hex(with_condition_color)
if colors.is_color_like(no_condition_color):
no_condition_color = colors.to_hex(no_condition_color)
## prepare data to be plotted; producing 3 outputs:
# - `condition`, series containing category labels to be plotted
# - `label_map` (mapping condition values to plot labels)
# - `color_map` (mapping condition values to plotted colors)
if threshold is not None:
is_median = threshold == "median"
if is_median:
threshold = df[condition_col].median()
label_suffix = float_str(threshold)
condition = df[condition_col] > threshold
default_label_no_condition = "%s ≤ %s" % (condition_col, label_suffix)
if is_median:
label_suffix += " (median)"
default_label_with_condition = "%s > %s" % (condition_col, label_suffix)
with_condition_label = with_condition_label or default_label_with_condition
no_condition_label = no_condition_label or default_label_no_condition
if not label_map:
label_map = {False: no_condition_label,
True: with_condition_label}
if not color_map:
color_map = {False: no_condition_color,
True: with_condition_color}
elif df[condition_col].dtype == 'O' or df[condition_col].dtype.name == "category":
condition = df[condition_col].astype("category")
if not label_map:
label_map = dict()
[label_map.update({condition_value: '{} = {}'.format(condition_col,
condition_value)})
for condition_value in condition.unique()]
if not color_map:
rgb_values = sb.color_palette(color_palette, len(label_map.keys()))
hex_values = [colors.to_hex(col) for col in rgb_values]
color_map = dict(zip(label_map.keys(), hex_values))
elif df[condition_col].dtype == 'bool':
condition = df[condition_col]
default_label_with_condition = "= {}".format(condition_col)
default_label_no_condition = "¬ {}".format(condition_col)
with_condition_label = with_condition_label or default_label_with_condition
no_condition_label = no_condition_label or default_label_no_condition
if not label_map:
label_map = {False: no_condition_label,
True: with_condition_label}
if not color_map:
color_map = {False: no_condition_color,
True: with_condition_color}
else:
raise ValueError('Don\'t know how to plot data of type\
{}'.format(df[condition_col].dtype))
# produce kmf plot for each category (group) identified above
kmf = KaplanMeierFitter()
grp_desc = list()
grp_survival_data = dict()
grp_event_data = dict()
grp_names = list(condition.unique())
for grp_name, grp_df in df.groupby(condition):
grp_survival = grp_df[survival_col]
grp_event = (grp_df[censor_col].astype(bool))
grp_label = label_map[grp_name]
grp_color = color_map[grp_name]
kmf.fit(grp_survival, grp_event, label=grp_label)
desc_str = "# {}: {}".format(grp_label, len(grp_survival))
grp_desc.append(desc_str)
grp_survival_data[grp_name] = grp_survival
grp_event_data[grp_name] = grp_event
if ax:
ax = kmf.plot(ax=ax, show_censors=True, ci_show=ci_show, color=grp_color)
else:
ax = kmf.plot(show_censors=True, ci_show=ci_show, color=grp_color)
## format the plot
# Set the y-axis to range 0 to 1
ax.set_ylim(0, 1)
y_tick_vals = ax.get_yticks()
ax.set_yticklabels(["%d" % int(y_tick_val * 100) for y_tick_val in y_tick_vals])
# plot title
if title:
ax.set_title(title)
elif print_as_title:
ax.set_title(' | '.join(grp_desc))
else:
[print(desc) for desc in grp_desc]
# axis labels
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
## summarize analytical version of results
## again using same groups as are plotted
if len(grp_names) == 2:
# use log-rank test for 2 groups
results = logrank_test(grp_survival_data[grp_names[0]],
grp_survival_data[grp_names[1]],
event_observed_A=grp_event_data[grp_names[0]],
event_observed_B=grp_event_data[grp_names[1]])
elif len(grp_names) == 1:
# no analytical result for 1 or 0 groups
results = NullSurvivalResults()
else:
# cox PH fitter for >2 groups
cf = CoxPHFitter()
cox_df = patsy.dmatrix('+'.join([condition_col, survival_col,
censor_col]),
df, return_type='dataframe')
del cox_df['Intercept']
results = cf.fit(cox_df, survival_col, event_col=censor_col)
results.print_summary()
# add metadata to results object so caller can print them
results.survival_data_series = grp_survival_data
results.event_data_series = grp_event_data
results.desc = grp_desc
return results
|
python
|
def _plot_kmf_single(df,
condition_col,
survival_col,
censor_col,
threshold,
title,
xlabel,
ylabel,
ax,
with_condition_color,
no_condition_color,
with_condition_label,
no_condition_label,
color_map,
label_map,
color_palette,
ci_show,
print_as_title):
"""
Helper function to produce a single KM survival plot, among observations in df by groups defined by condition_col.
All inputs are required - this function is intended to be called by `plot_kmf`.
"""
# make color inputs consistent hex format
if colors.is_color_like(with_condition_color):
with_condition_color = colors.to_hex(with_condition_color)
if colors.is_color_like(no_condition_color):
no_condition_color = colors.to_hex(no_condition_color)
## prepare data to be plotted; producing 3 outputs:
# - `condition`, series containing category labels to be plotted
# - `label_map` (mapping condition values to plot labels)
# - `color_map` (mapping condition values to plotted colors)
if threshold is not None:
is_median = threshold == "median"
if is_median:
threshold = df[condition_col].median()
label_suffix = float_str(threshold)
condition = df[condition_col] > threshold
default_label_no_condition = "%s ≤ %s" % (condition_col, label_suffix)
if is_median:
label_suffix += " (median)"
default_label_with_condition = "%s > %s" % (condition_col, label_suffix)
with_condition_label = with_condition_label or default_label_with_condition
no_condition_label = no_condition_label or default_label_no_condition
if not label_map:
label_map = {False: no_condition_label,
True: with_condition_label}
if not color_map:
color_map = {False: no_condition_color,
True: with_condition_color}
elif df[condition_col].dtype == 'O' or df[condition_col].dtype.name == "category":
condition = df[condition_col].astype("category")
if not label_map:
label_map = dict()
[label_map.update({condition_value: '{} = {}'.format(condition_col,
condition_value)})
for condition_value in condition.unique()]
if not color_map:
rgb_values = sb.color_palette(color_palette, len(label_map.keys()))
hex_values = [colors.to_hex(col) for col in rgb_values]
color_map = dict(zip(label_map.keys(), hex_values))
elif df[condition_col].dtype == 'bool':
condition = df[condition_col]
default_label_with_condition = "= {}".format(condition_col)
default_label_no_condition = "¬ {}".format(condition_col)
with_condition_label = with_condition_label or default_label_with_condition
no_condition_label = no_condition_label or default_label_no_condition
if not label_map:
label_map = {False: no_condition_label,
True: with_condition_label}
if not color_map:
color_map = {False: no_condition_color,
True: with_condition_color}
else:
raise ValueError('Don\'t know how to plot data of type\
{}'.format(df[condition_col].dtype))
# produce kmf plot for each category (group) identified above
kmf = KaplanMeierFitter()
grp_desc = list()
grp_survival_data = dict()
grp_event_data = dict()
grp_names = list(condition.unique())
for grp_name, grp_df in df.groupby(condition):
grp_survival = grp_df[survival_col]
grp_event = (grp_df[censor_col].astype(bool))
grp_label = label_map[grp_name]
grp_color = color_map[grp_name]
kmf.fit(grp_survival, grp_event, label=grp_label)
desc_str = "# {}: {}".format(grp_label, len(grp_survival))
grp_desc.append(desc_str)
grp_survival_data[grp_name] = grp_survival
grp_event_data[grp_name] = grp_event
if ax:
ax = kmf.plot(ax=ax, show_censors=True, ci_show=ci_show, color=grp_color)
else:
ax = kmf.plot(show_censors=True, ci_show=ci_show, color=grp_color)
## format the plot
# Set the y-axis to range 0 to 1
ax.set_ylim(0, 1)
y_tick_vals = ax.get_yticks()
ax.set_yticklabels(["%d" % int(y_tick_val * 100) for y_tick_val in y_tick_vals])
# plot title
if title:
ax.set_title(title)
elif print_as_title:
ax.set_title(' | '.join(grp_desc))
else:
[print(desc) for desc in grp_desc]
# axis labels
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
## summarize analytical version of results
## again using same groups as are plotted
if len(grp_names) == 2:
# use log-rank test for 2 groups
results = logrank_test(grp_survival_data[grp_names[0]],
grp_survival_data[grp_names[1]],
event_observed_A=grp_event_data[grp_names[0]],
event_observed_B=grp_event_data[grp_names[1]])
elif len(grp_names) == 1:
# no analytical result for 1 or 0 groups
results = NullSurvivalResults()
else:
# cox PH fitter for >2 groups
cf = CoxPHFitter()
cox_df = patsy.dmatrix('+'.join([condition_col, survival_col,
censor_col]),
df, return_type='dataframe')
del cox_df['Intercept']
results = cf.fit(cox_df, survival_col, event_col=censor_col)
results.print_summary()
# add metadata to results object so caller can print them
results.survival_data_series = grp_survival_data
results.event_data_series = grp_event_data
results.desc = grp_desc
return results
|
[
"def",
"_plot_kmf_single",
"(",
"df",
",",
"condition_col",
",",
"survival_col",
",",
"censor_col",
",",
"threshold",
",",
"title",
",",
"xlabel",
",",
"ylabel",
",",
"ax",
",",
"with_condition_color",
",",
"no_condition_color",
",",
"with_condition_label",
",",
"no_condition_label",
",",
"color_map",
",",
"label_map",
",",
"color_palette",
",",
"ci_show",
",",
"print_as_title",
")",
":",
"# make color inputs consistent hex format",
"if",
"colors",
".",
"is_color_like",
"(",
"with_condition_color",
")",
":",
"with_condition_color",
"=",
"colors",
".",
"to_hex",
"(",
"with_condition_color",
")",
"if",
"colors",
".",
"is_color_like",
"(",
"no_condition_color",
")",
":",
"no_condition_color",
"=",
"colors",
".",
"to_hex",
"(",
"no_condition_color",
")",
"## prepare data to be plotted; producing 3 outputs:",
"# - `condition`, series containing category labels to be plotted",
"# - `label_map` (mapping condition values to plot labels)",
"# - `color_map` (mapping condition values to plotted colors)",
"if",
"threshold",
"is",
"not",
"None",
":",
"is_median",
"=",
"threshold",
"==",
"\"median\"",
"if",
"is_median",
":",
"threshold",
"=",
"df",
"[",
"condition_col",
"]",
".",
"median",
"(",
")",
"label_suffix",
"=",
"float_str",
"(",
"threshold",
")",
"condition",
"=",
"df",
"[",
"condition_col",
"]",
">",
"threshold",
"default_label_no_condition",
"=",
"\"%s ≤ %s\" %",
"(",
"o",
"ndition_col, ",
"l",
"bel_suffix)",
"",
"if",
"is_median",
":",
"label_suffix",
"+=",
"\" (median)\"",
"default_label_with_condition",
"=",
"\"%s > %s\"",
"%",
"(",
"condition_col",
",",
"label_suffix",
")",
"with_condition_label",
"=",
"with_condition_label",
"or",
"default_label_with_condition",
"no_condition_label",
"=",
"no_condition_label",
"or",
"default_label_no_condition",
"if",
"not",
"label_map",
":",
"label_map",
"=",
"{",
"False",
":",
"no_condition_label",
",",
"True",
":",
"with_condition_label",
"}",
"if",
"not",
"color_map",
":",
"color_map",
"=",
"{",
"False",
":",
"no_condition_color",
",",
"True",
":",
"with_condition_color",
"}",
"elif",
"df",
"[",
"condition_col",
"]",
".",
"dtype",
"==",
"'O'",
"or",
"df",
"[",
"condition_col",
"]",
".",
"dtype",
".",
"name",
"==",
"\"category\"",
":",
"condition",
"=",
"df",
"[",
"condition_col",
"]",
".",
"astype",
"(",
"\"category\"",
")",
"if",
"not",
"label_map",
":",
"label_map",
"=",
"dict",
"(",
")",
"[",
"label_map",
".",
"update",
"(",
"{",
"condition_value",
":",
"'{} = {}'",
".",
"format",
"(",
"condition_col",
",",
"condition_value",
")",
"}",
")",
"for",
"condition_value",
"in",
"condition",
".",
"unique",
"(",
")",
"]",
"if",
"not",
"color_map",
":",
"rgb_values",
"=",
"sb",
".",
"color_palette",
"(",
"color_palette",
",",
"len",
"(",
"label_map",
".",
"keys",
"(",
")",
")",
")",
"hex_values",
"=",
"[",
"colors",
".",
"to_hex",
"(",
"col",
")",
"for",
"col",
"in",
"rgb_values",
"]",
"color_map",
"=",
"dict",
"(",
"zip",
"(",
"label_map",
".",
"keys",
"(",
")",
",",
"hex_values",
")",
")",
"elif",
"df",
"[",
"condition_col",
"]",
".",
"dtype",
"==",
"'bool'",
":",
"condition",
"=",
"df",
"[",
"condition_col",
"]",
"default_label_with_condition",
"=",
"\"= {}\"",
".",
"format",
"(",
"condition_col",
")",
"default_label_no_condition",
"=",
"\"¬ {}\".",
"f",
"ormat(",
"c",
"ondition_col)",
"",
"with_condition_label",
"=",
"with_condition_label",
"or",
"default_label_with_condition",
"no_condition_label",
"=",
"no_condition_label",
"or",
"default_label_no_condition",
"if",
"not",
"label_map",
":",
"label_map",
"=",
"{",
"False",
":",
"no_condition_label",
",",
"True",
":",
"with_condition_label",
"}",
"if",
"not",
"color_map",
":",
"color_map",
"=",
"{",
"False",
":",
"no_condition_color",
",",
"True",
":",
"with_condition_color",
"}",
"else",
":",
"raise",
"ValueError",
"(",
"'Don\\'t know how to plot data of type\\\n {}'",
".",
"format",
"(",
"df",
"[",
"condition_col",
"]",
".",
"dtype",
")",
")",
"# produce kmf plot for each category (group) identified above",
"kmf",
"=",
"KaplanMeierFitter",
"(",
")",
"grp_desc",
"=",
"list",
"(",
")",
"grp_survival_data",
"=",
"dict",
"(",
")",
"grp_event_data",
"=",
"dict",
"(",
")",
"grp_names",
"=",
"list",
"(",
"condition",
".",
"unique",
"(",
")",
")",
"for",
"grp_name",
",",
"grp_df",
"in",
"df",
".",
"groupby",
"(",
"condition",
")",
":",
"grp_survival",
"=",
"grp_df",
"[",
"survival_col",
"]",
"grp_event",
"=",
"(",
"grp_df",
"[",
"censor_col",
"]",
".",
"astype",
"(",
"bool",
")",
")",
"grp_label",
"=",
"label_map",
"[",
"grp_name",
"]",
"grp_color",
"=",
"color_map",
"[",
"grp_name",
"]",
"kmf",
".",
"fit",
"(",
"grp_survival",
",",
"grp_event",
",",
"label",
"=",
"grp_label",
")",
"desc_str",
"=",
"\"# {}: {}\"",
".",
"format",
"(",
"grp_label",
",",
"len",
"(",
"grp_survival",
")",
")",
"grp_desc",
".",
"append",
"(",
"desc_str",
")",
"grp_survival_data",
"[",
"grp_name",
"]",
"=",
"grp_survival",
"grp_event_data",
"[",
"grp_name",
"]",
"=",
"grp_event",
"if",
"ax",
":",
"ax",
"=",
"kmf",
".",
"plot",
"(",
"ax",
"=",
"ax",
",",
"show_censors",
"=",
"True",
",",
"ci_show",
"=",
"ci_show",
",",
"color",
"=",
"grp_color",
")",
"else",
":",
"ax",
"=",
"kmf",
".",
"plot",
"(",
"show_censors",
"=",
"True",
",",
"ci_show",
"=",
"ci_show",
",",
"color",
"=",
"grp_color",
")",
"## format the plot",
"# Set the y-axis to range 0 to 1",
"ax",
".",
"set_ylim",
"(",
"0",
",",
"1",
")",
"y_tick_vals",
"=",
"ax",
".",
"get_yticks",
"(",
")",
"ax",
".",
"set_yticklabels",
"(",
"[",
"\"%d\"",
"%",
"int",
"(",
"y_tick_val",
"*",
"100",
")",
"for",
"y_tick_val",
"in",
"y_tick_vals",
"]",
")",
"# plot title",
"if",
"title",
":",
"ax",
".",
"set_title",
"(",
"title",
")",
"elif",
"print_as_title",
":",
"ax",
".",
"set_title",
"(",
"' | '",
".",
"join",
"(",
"grp_desc",
")",
")",
"else",
":",
"[",
"print",
"(",
"desc",
")",
"for",
"desc",
"in",
"grp_desc",
"]",
"# axis labels",
"if",
"xlabel",
":",
"ax",
".",
"set_xlabel",
"(",
"xlabel",
")",
"if",
"ylabel",
":",
"ax",
".",
"set_ylabel",
"(",
"ylabel",
")",
"## summarize analytical version of results",
"## again using same groups as are plotted",
"if",
"len",
"(",
"grp_names",
")",
"==",
"2",
":",
"# use log-rank test for 2 groups",
"results",
"=",
"logrank_test",
"(",
"grp_survival_data",
"[",
"grp_names",
"[",
"0",
"]",
"]",
",",
"grp_survival_data",
"[",
"grp_names",
"[",
"1",
"]",
"]",
",",
"event_observed_A",
"=",
"grp_event_data",
"[",
"grp_names",
"[",
"0",
"]",
"]",
",",
"event_observed_B",
"=",
"grp_event_data",
"[",
"grp_names",
"[",
"1",
"]",
"]",
")",
"elif",
"len",
"(",
"grp_names",
")",
"==",
"1",
":",
"# no analytical result for 1 or 0 groups",
"results",
"=",
"NullSurvivalResults",
"(",
")",
"else",
":",
"# cox PH fitter for >2 groups",
"cf",
"=",
"CoxPHFitter",
"(",
")",
"cox_df",
"=",
"patsy",
".",
"dmatrix",
"(",
"'+'",
".",
"join",
"(",
"[",
"condition_col",
",",
"survival_col",
",",
"censor_col",
"]",
")",
",",
"df",
",",
"return_type",
"=",
"'dataframe'",
")",
"del",
"cox_df",
"[",
"'Intercept'",
"]",
"results",
"=",
"cf",
".",
"fit",
"(",
"cox_df",
",",
"survival_col",
",",
"event_col",
"=",
"censor_col",
")",
"results",
".",
"print_summary",
"(",
")",
"# add metadata to results object so caller can print them",
"results",
".",
"survival_data_series",
"=",
"grp_survival_data",
"results",
".",
"event_data_series",
"=",
"grp_event_data",
"results",
".",
"desc",
"=",
"grp_desc",
"return",
"results"
] |
Helper function to produce a single KM survival plot, among observations in df by groups defined by condition_col.
All inputs are required - this function is intended to be called by `plot_kmf`.
|
[
"Helper",
"function",
"to",
"produce",
"a",
"single",
"KM",
"survival",
"plot",
"among",
"observations",
"in",
"df",
"by",
"groups",
"defined",
"by",
"condition_col",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/survival.py#L31-L171
|
train
|
hammerlab/cohorts
|
cohorts/survival.py
|
plot_kmf
|
def plot_kmf(df,
condition_col,
censor_col,
survival_col,
strata_col=None,
threshold=None,
title=None,
xlabel=None,
ylabel=None,
ax=None,
with_condition_color="#B38600",
no_condition_color="#A941AC",
with_condition_label=None,
no_condition_label=None,
color_map=None,
label_map=None,
color_palette="Set1",
ci_show=False,
print_as_title=False):
"""
Plot survival curves by splitting the dataset into two groups based on
condition_col. Report results for a log-rank test (if two groups are plotted)
or CoxPH survival analysis (if >2 groups) for association with survival.
Regarding definition of groups:
If condition_col is numeric, values are split into 2 groups.
- if threshold is defined, the groups are split on being > or < condition_col
- if threshold == 'median', the threshold is set to the median of condition_col
If condition_col is categorical or string, results are plotted for each unique value in the dataset.
If condition_col is None, results are plotted for all observations
Currently, if `strata_col` is given, the results are repeated among each stratum of the df.
A truly "stratified" analysis is not yet supported by may be soon.
Parameters
----------
df: dataframe
condition_col: string, column which contains the condition to split on
survival_col: string, column which contains the survival time
censor_col: string,
strata_col: optional string, denoting column containing data to
stratify by (default: None)
threshold: int or string, if int, condition_col is thresholded at int,
if 'median', condition_col thresholded
at its median
if 'median-per-strata', & if stratified analysis
then condition_col thresholded by strata
title: Title for the plot, default None
ax: an existing matplotlib ax, optional, default None
note: not currently supported when `strata_col` is not None
with_condition_color: str, hex code color for the with-condition curve
no_condition_color: str, hex code color for the no-condition curve
with_condition_label: str, optional, label for True condition case
no_condition_label: str, optional, label for False condition case
color_map: dict, optional, mapping of hex-values to condition text
in the form of {value_name: color_hex_code}.
defaults to `sb.color_palette` using `default_color_palette` name,
or *_condition_color options in case of boolean operators.
label_map: dict, optional, mapping of labels to condition text.
defaults to "condition_name = condition_value", or *_condition_label
options in case of boolean operators.
color_palette: str, optional, name of sb.color_palette to use
if color_map not provided.
print_as_title: bool, optional, whether or not to print text
within the plot's title vs. stdout, default False
"""
# set reasonable default threshold value depending on type of condition_col
if threshold is None:
if df[condition_col].dtype != "bool" and \
np.issubdtype(df[condition_col].dtype, np.number):
threshold = "median"
# check inputs for threshold for validity
elif isinstance(threshold, numbers.Number):
logger.debug("threshold value is numeric")
elif threshold not in ("median", "median-per-strata"):
raise ValueError("invalid input for threshold. Must be numeric, None, 'median', or 'median-per-strata'.")
elif threshold == "median-per-strata" and strata_col is None:
raise ValueError("threshold given was 'median-per-strata' and yet `strata_col` was None. Did you mean 'median'?")
# construct kwarg dict to pass to _plot_kmf_single.
# start with args that do not vary according to strata_col
arglist = dict(
condition_col=condition_col,
survival_col=survival_col,
censor_col=censor_col,
threshold=threshold,
with_condition_color=with_condition_color,
no_condition_color=no_condition_color,
with_condition_label=with_condition_label,
no_condition_label=no_condition_label,
color_map=color_map,
label_map=label_map,
xlabel=xlabel,
ylabel=ylabel,
ci_show=ci_show,
color_palette=color_palette,
print_as_title=print_as_title)
# if strata_col is None, pass all parameters to _plot_kmf_single
if strata_col is None:
arglist.update(dict(
df=df,
title=title,
ax=ax))
return _plot_kmf_single(**arglist)
else:
# prepare for stratified analysis
if threshold == "median":
# by default, "median" threshold should be intra-strata median
arglist["threshold"] = df[condition_col].dropna().median()
elif threshold == "median-per-strata":
arglist["threshold"] = "median"
# create axis / subplots for stratified results
if ax is not None:
raise ValueError("ax not supported with stratified analysis.")
n_strata = len(df[strata_col].unique())
f, ax = plt.subplots(n_strata, sharex=True)
# create results dict to hold per-strata results
results = dict()
# call _plot_kmf_single for each of the strata
for i, (strat_name, strat_df) in enumerate(df.groupby(strata_col)):
if n_strata == 1:
arglist["ax"] = ax
else:
arglist["ax"] = ax[i]
subtitle = "{}: {}".format(strata_col, strat_name)
arglist["title"] = subtitle
arglist["df"] = strat_df
results[subtitle] = plot_kmf(**arglist)
[print(desc) for desc in results[subtitle].desc]
if title:
f.suptitle(title)
return results
|
python
|
def plot_kmf(df,
condition_col,
censor_col,
survival_col,
strata_col=None,
threshold=None,
title=None,
xlabel=None,
ylabel=None,
ax=None,
with_condition_color="#B38600",
no_condition_color="#A941AC",
with_condition_label=None,
no_condition_label=None,
color_map=None,
label_map=None,
color_palette="Set1",
ci_show=False,
print_as_title=False):
"""
Plot survival curves by splitting the dataset into two groups based on
condition_col. Report results for a log-rank test (if two groups are plotted)
or CoxPH survival analysis (if >2 groups) for association with survival.
Regarding definition of groups:
If condition_col is numeric, values are split into 2 groups.
- if threshold is defined, the groups are split on being > or < condition_col
- if threshold == 'median', the threshold is set to the median of condition_col
If condition_col is categorical or string, results are plotted for each unique value in the dataset.
If condition_col is None, results are plotted for all observations
Currently, if `strata_col` is given, the results are repeated among each stratum of the df.
A truly "stratified" analysis is not yet supported by may be soon.
Parameters
----------
df: dataframe
condition_col: string, column which contains the condition to split on
survival_col: string, column which contains the survival time
censor_col: string,
strata_col: optional string, denoting column containing data to
stratify by (default: None)
threshold: int or string, if int, condition_col is thresholded at int,
if 'median', condition_col thresholded
at its median
if 'median-per-strata', & if stratified analysis
then condition_col thresholded by strata
title: Title for the plot, default None
ax: an existing matplotlib ax, optional, default None
note: not currently supported when `strata_col` is not None
with_condition_color: str, hex code color for the with-condition curve
no_condition_color: str, hex code color for the no-condition curve
with_condition_label: str, optional, label for True condition case
no_condition_label: str, optional, label for False condition case
color_map: dict, optional, mapping of hex-values to condition text
in the form of {value_name: color_hex_code}.
defaults to `sb.color_palette` using `default_color_palette` name,
or *_condition_color options in case of boolean operators.
label_map: dict, optional, mapping of labels to condition text.
defaults to "condition_name = condition_value", or *_condition_label
options in case of boolean operators.
color_palette: str, optional, name of sb.color_palette to use
if color_map not provided.
print_as_title: bool, optional, whether or not to print text
within the plot's title vs. stdout, default False
"""
# set reasonable default threshold value depending on type of condition_col
if threshold is None:
if df[condition_col].dtype != "bool" and \
np.issubdtype(df[condition_col].dtype, np.number):
threshold = "median"
# check inputs for threshold for validity
elif isinstance(threshold, numbers.Number):
logger.debug("threshold value is numeric")
elif threshold not in ("median", "median-per-strata"):
raise ValueError("invalid input for threshold. Must be numeric, None, 'median', or 'median-per-strata'.")
elif threshold == "median-per-strata" and strata_col is None:
raise ValueError("threshold given was 'median-per-strata' and yet `strata_col` was None. Did you mean 'median'?")
# construct kwarg dict to pass to _plot_kmf_single.
# start with args that do not vary according to strata_col
arglist = dict(
condition_col=condition_col,
survival_col=survival_col,
censor_col=censor_col,
threshold=threshold,
with_condition_color=with_condition_color,
no_condition_color=no_condition_color,
with_condition_label=with_condition_label,
no_condition_label=no_condition_label,
color_map=color_map,
label_map=label_map,
xlabel=xlabel,
ylabel=ylabel,
ci_show=ci_show,
color_palette=color_palette,
print_as_title=print_as_title)
# if strata_col is None, pass all parameters to _plot_kmf_single
if strata_col is None:
arglist.update(dict(
df=df,
title=title,
ax=ax))
return _plot_kmf_single(**arglist)
else:
# prepare for stratified analysis
if threshold == "median":
# by default, "median" threshold should be intra-strata median
arglist["threshold"] = df[condition_col].dropna().median()
elif threshold == "median-per-strata":
arglist["threshold"] = "median"
# create axis / subplots for stratified results
if ax is not None:
raise ValueError("ax not supported with stratified analysis.")
n_strata = len(df[strata_col].unique())
f, ax = plt.subplots(n_strata, sharex=True)
# create results dict to hold per-strata results
results = dict()
# call _plot_kmf_single for each of the strata
for i, (strat_name, strat_df) in enumerate(df.groupby(strata_col)):
if n_strata == 1:
arglist["ax"] = ax
else:
arglist["ax"] = ax[i]
subtitle = "{}: {}".format(strata_col, strat_name)
arglist["title"] = subtitle
arglist["df"] = strat_df
results[subtitle] = plot_kmf(**arglist)
[print(desc) for desc in results[subtitle].desc]
if title:
f.suptitle(title)
return results
|
[
"def",
"plot_kmf",
"(",
"df",
",",
"condition_col",
",",
"censor_col",
",",
"survival_col",
",",
"strata_col",
"=",
"None",
",",
"threshold",
"=",
"None",
",",
"title",
"=",
"None",
",",
"xlabel",
"=",
"None",
",",
"ylabel",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"with_condition_color",
"=",
"\"#B38600\"",
",",
"no_condition_color",
"=",
"\"#A941AC\"",
",",
"with_condition_label",
"=",
"None",
",",
"no_condition_label",
"=",
"None",
",",
"color_map",
"=",
"None",
",",
"label_map",
"=",
"None",
",",
"color_palette",
"=",
"\"Set1\"",
",",
"ci_show",
"=",
"False",
",",
"print_as_title",
"=",
"False",
")",
":",
"# set reasonable default threshold value depending on type of condition_col",
"if",
"threshold",
"is",
"None",
":",
"if",
"df",
"[",
"condition_col",
"]",
".",
"dtype",
"!=",
"\"bool\"",
"and",
"np",
".",
"issubdtype",
"(",
"df",
"[",
"condition_col",
"]",
".",
"dtype",
",",
"np",
".",
"number",
")",
":",
"threshold",
"=",
"\"median\"",
"# check inputs for threshold for validity",
"elif",
"isinstance",
"(",
"threshold",
",",
"numbers",
".",
"Number",
")",
":",
"logger",
".",
"debug",
"(",
"\"threshold value is numeric\"",
")",
"elif",
"threshold",
"not",
"in",
"(",
"\"median\"",
",",
"\"median-per-strata\"",
")",
":",
"raise",
"ValueError",
"(",
"\"invalid input for threshold. Must be numeric, None, 'median', or 'median-per-strata'.\"",
")",
"elif",
"threshold",
"==",
"\"median-per-strata\"",
"and",
"strata_col",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"threshold given was 'median-per-strata' and yet `strata_col` was None. Did you mean 'median'?\"",
")",
"# construct kwarg dict to pass to _plot_kmf_single.",
"# start with args that do not vary according to strata_col",
"arglist",
"=",
"dict",
"(",
"condition_col",
"=",
"condition_col",
",",
"survival_col",
"=",
"survival_col",
",",
"censor_col",
"=",
"censor_col",
",",
"threshold",
"=",
"threshold",
",",
"with_condition_color",
"=",
"with_condition_color",
",",
"no_condition_color",
"=",
"no_condition_color",
",",
"with_condition_label",
"=",
"with_condition_label",
",",
"no_condition_label",
"=",
"no_condition_label",
",",
"color_map",
"=",
"color_map",
",",
"label_map",
"=",
"label_map",
",",
"xlabel",
"=",
"xlabel",
",",
"ylabel",
"=",
"ylabel",
",",
"ci_show",
"=",
"ci_show",
",",
"color_palette",
"=",
"color_palette",
",",
"print_as_title",
"=",
"print_as_title",
")",
"# if strata_col is None, pass all parameters to _plot_kmf_single",
"if",
"strata_col",
"is",
"None",
":",
"arglist",
".",
"update",
"(",
"dict",
"(",
"df",
"=",
"df",
",",
"title",
"=",
"title",
",",
"ax",
"=",
"ax",
")",
")",
"return",
"_plot_kmf_single",
"(",
"*",
"*",
"arglist",
")",
"else",
":",
"# prepare for stratified analysis",
"if",
"threshold",
"==",
"\"median\"",
":",
"# by default, \"median\" threshold should be intra-strata median",
"arglist",
"[",
"\"threshold\"",
"]",
"=",
"df",
"[",
"condition_col",
"]",
".",
"dropna",
"(",
")",
".",
"median",
"(",
")",
"elif",
"threshold",
"==",
"\"median-per-strata\"",
":",
"arglist",
"[",
"\"threshold\"",
"]",
"=",
"\"median\"",
"# create axis / subplots for stratified results",
"if",
"ax",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"ax not supported with stratified analysis.\"",
")",
"n_strata",
"=",
"len",
"(",
"df",
"[",
"strata_col",
"]",
".",
"unique",
"(",
")",
")",
"f",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"n_strata",
",",
"sharex",
"=",
"True",
")",
"# create results dict to hold per-strata results",
"results",
"=",
"dict",
"(",
")",
"# call _plot_kmf_single for each of the strata",
"for",
"i",
",",
"(",
"strat_name",
",",
"strat_df",
")",
"in",
"enumerate",
"(",
"df",
".",
"groupby",
"(",
"strata_col",
")",
")",
":",
"if",
"n_strata",
"==",
"1",
":",
"arglist",
"[",
"\"ax\"",
"]",
"=",
"ax",
"else",
":",
"arglist",
"[",
"\"ax\"",
"]",
"=",
"ax",
"[",
"i",
"]",
"subtitle",
"=",
"\"{}: {}\"",
".",
"format",
"(",
"strata_col",
",",
"strat_name",
")",
"arglist",
"[",
"\"title\"",
"]",
"=",
"subtitle",
"arglist",
"[",
"\"df\"",
"]",
"=",
"strat_df",
"results",
"[",
"subtitle",
"]",
"=",
"plot_kmf",
"(",
"*",
"*",
"arglist",
")",
"[",
"print",
"(",
"desc",
")",
"for",
"desc",
"in",
"results",
"[",
"subtitle",
"]",
".",
"desc",
"]",
"if",
"title",
":",
"f",
".",
"suptitle",
"(",
"title",
")",
"return",
"results"
] |
Plot survival curves by splitting the dataset into two groups based on
condition_col. Report results for a log-rank test (if two groups are plotted)
or CoxPH survival analysis (if >2 groups) for association with survival.
Regarding definition of groups:
If condition_col is numeric, values are split into 2 groups.
- if threshold is defined, the groups are split on being > or < condition_col
- if threshold == 'median', the threshold is set to the median of condition_col
If condition_col is categorical or string, results are plotted for each unique value in the dataset.
If condition_col is None, results are plotted for all observations
Currently, if `strata_col` is given, the results are repeated among each stratum of the df.
A truly "stratified" analysis is not yet supported by may be soon.
Parameters
----------
df: dataframe
condition_col: string, column which contains the condition to split on
survival_col: string, column which contains the survival time
censor_col: string,
strata_col: optional string, denoting column containing data to
stratify by (default: None)
threshold: int or string, if int, condition_col is thresholded at int,
if 'median', condition_col thresholded
at its median
if 'median-per-strata', & if stratified analysis
then condition_col thresholded by strata
title: Title for the plot, default None
ax: an existing matplotlib ax, optional, default None
note: not currently supported when `strata_col` is not None
with_condition_color: str, hex code color for the with-condition curve
no_condition_color: str, hex code color for the no-condition curve
with_condition_label: str, optional, label for True condition case
no_condition_label: str, optional, label for False condition case
color_map: dict, optional, mapping of hex-values to condition text
in the form of {value_name: color_hex_code}.
defaults to `sb.color_palette` using `default_color_palette` name,
or *_condition_color options in case of boolean operators.
label_map: dict, optional, mapping of labels to condition text.
defaults to "condition_name = condition_value", or *_condition_label
options in case of boolean operators.
color_palette: str, optional, name of sb.color_palette to use
if color_map not provided.
print_as_title: bool, optional, whether or not to print text
within the plot's title vs. stdout, default False
|
[
"Plot",
"survival",
"curves",
"by",
"splitting",
"the",
"dataset",
"into",
"two",
"groups",
"based",
"on",
"condition_col",
".",
"Report",
"results",
"for",
"a",
"log",
"-",
"rank",
"test",
"(",
"if",
"two",
"groups",
"are",
"plotted",
")",
"or",
"CoxPH",
"survival",
"analysis",
"(",
"if",
">",
"2",
"groups",
")",
"for",
"association",
"with",
"survival",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/survival.py#L174-L307
|
train
|
alvarogzp/telegram-bot-framework
|
bot/action/util/textformat.py
|
FormattedText.concat
|
def concat(self, formatted_text):
""":type formatted_text: FormattedText"""
assert self._is_compatible(formatted_text), "Cannot concat text with different modes"
self.text += formatted_text.text
return self
|
python
|
def concat(self, formatted_text):
""":type formatted_text: FormattedText"""
assert self._is_compatible(formatted_text), "Cannot concat text with different modes"
self.text += formatted_text.text
return self
|
[
"def",
"concat",
"(",
"self",
",",
"formatted_text",
")",
":",
"assert",
"self",
".",
"_is_compatible",
"(",
"formatted_text",
")",
",",
"\"Cannot concat text with different modes\"",
"self",
".",
"text",
"+=",
"formatted_text",
".",
"text",
"return",
"self"
] |
:type formatted_text: FormattedText
|
[
":",
"type",
"formatted_text",
":",
"FormattedText"
] |
7b597a415c1901901c677976cb13100fc3083107
|
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/textformat.py#L42-L46
|
train
|
alvarogzp/telegram-bot-framework
|
bot/action/util/textformat.py
|
FormattedText.join
|
def join(self, formatted_texts):
""":type formatted_texts: list[FormattedText]"""
formatted_texts = list(formatted_texts) # so that after the first iteration elements are not lost if generator
for formatted_text in formatted_texts:
assert self._is_compatible(formatted_text), "Cannot join text with different modes"
self.text = self.text.join((formatted_text.text for formatted_text in formatted_texts))
return self
|
python
|
def join(self, formatted_texts):
""":type formatted_texts: list[FormattedText]"""
formatted_texts = list(formatted_texts) # so that after the first iteration elements are not lost if generator
for formatted_text in formatted_texts:
assert self._is_compatible(formatted_text), "Cannot join text with different modes"
self.text = self.text.join((formatted_text.text for formatted_text in formatted_texts))
return self
|
[
"def",
"join",
"(",
"self",
",",
"formatted_texts",
")",
":",
"formatted_texts",
"=",
"list",
"(",
"formatted_texts",
")",
"# so that after the first iteration elements are not lost if generator",
"for",
"formatted_text",
"in",
"formatted_texts",
":",
"assert",
"self",
".",
"_is_compatible",
"(",
"formatted_text",
")",
",",
"\"Cannot join text with different modes\"",
"self",
".",
"text",
"=",
"self",
".",
"text",
".",
"join",
"(",
"(",
"formatted_text",
".",
"text",
"for",
"formatted_text",
"in",
"formatted_texts",
")",
")",
"return",
"self"
] |
:type formatted_texts: list[FormattedText]
|
[
":",
"type",
"formatted_texts",
":",
"list",
"[",
"FormattedText",
"]"
] |
7b597a415c1901901c677976cb13100fc3083107
|
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/textformat.py#L48-L54
|
train
|
alvarogzp/telegram-bot-framework
|
bot/action/util/textformat.py
|
FormattedTextStringFormat.concat
|
def concat(self, *args, **kwargs):
"""
:type args: FormattedText
:type kwargs: FormattedText
"""
for arg in args:
assert self.formatted_text._is_compatible(arg), "Cannot concat text with different modes"
self.format_args.append(arg.text)
for kwarg in kwargs:
value = kwargs[kwarg]
assert self.formatted_text._is_compatible(value), "Cannot concat text with different modes"
self.format_kwargs[kwarg] = value.text
return self
|
python
|
def concat(self, *args, **kwargs):
"""
:type args: FormattedText
:type kwargs: FormattedText
"""
for arg in args:
assert self.formatted_text._is_compatible(arg), "Cannot concat text with different modes"
self.format_args.append(arg.text)
for kwarg in kwargs:
value = kwargs[kwarg]
assert self.formatted_text._is_compatible(value), "Cannot concat text with different modes"
self.format_kwargs[kwarg] = value.text
return self
|
[
"def",
"concat",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"arg",
"in",
"args",
":",
"assert",
"self",
".",
"formatted_text",
".",
"_is_compatible",
"(",
"arg",
")",
",",
"\"Cannot concat text with different modes\"",
"self",
".",
"format_args",
".",
"append",
"(",
"arg",
".",
"text",
")",
"for",
"kwarg",
"in",
"kwargs",
":",
"value",
"=",
"kwargs",
"[",
"kwarg",
"]",
"assert",
"self",
".",
"formatted_text",
".",
"_is_compatible",
"(",
"value",
")",
",",
"\"Cannot concat text with different modes\"",
"self",
".",
"format_kwargs",
"[",
"kwarg",
"]",
"=",
"value",
".",
"text",
"return",
"self"
] |
:type args: FormattedText
:type kwargs: FormattedText
|
[
":",
"type",
"args",
":",
"FormattedText",
":",
"type",
"kwargs",
":",
"FormattedText"
] |
7b597a415c1901901c677976cb13100fc3083107
|
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/textformat.py#L251-L263
|
train
|
hammerlab/cohorts
|
cohorts/random.py
|
random_cohort
|
def random_cohort(size, cache_dir, data_dir=None,
min_random_variants=None,
max_random_variants=None,
seed_val=1234):
"""
Parameters
----------
min_random_variants: optional, int
Minimum number of random variants to be generated per patient.
max_random_variants: optional, int
Maximum number of random variants to be generated per patient.
"""
seed(seed_val)
d = {}
d["id"] = [str(id) for id in range(size)]
d["age"] = choice([10, 15, 28, 32, 59, 62, 64, 66, 68], size)
d["smoker"] = choice([False, True], size)
d["OS"] = [randint(10, 1000) for i in range(size)]
d["PFS"] = [int(os * 0.6) for os in d["OS"]]
d["benefit"] = [pfs < 50 for pfs in d["PFS"]]
d["random"] = [randint(100) for i in range(size)]
d["random_boolean"] = choice([False, True], size)
d["benefit_correlate"] = [randint(50) if benefit else randint(20) for benefit in d["benefit"]]
d["benefit_correlate_boolean"] = [True if corr > 10 else False for corr in d["benefit_correlate"]]
d["deceased"] = choice([False, True], size)
d["progressed_or_deceased"] = [deceased or choice([False, True]) for deceased in d["deceased"]]
df = pd.DataFrame(d)
patients = []
for i, row in df.iterrows():
snv_vcf_paths = None
if max_random_variants is not None and min_random_variants is not None:
if data_dir is None:
raise ValueError("data_dir must be provided if random variants are being generated.")
vcf_path = path.join(data_dir, "patient_%s_mutect.vcf" % row["id"])
generate_simple_vcf(
vcf_path, generate_random_missense_variants(num_variants=randint(min_random_variants, max_random_variants)))
snv_vcf_paths = [vcf_path]
patient = Patient(
id=row["id"],
os=row["OS"],
pfs=row["PFS"],
benefit=row["benefit"],
deceased=row["deceased"],
progressed_or_deceased=row["progressed_or_deceased"],
hla_alleles=["HLA-A02:01"],
variants={"snv": snv_vcf_paths},
additional_data=row)
patients.append(patient)
return Cohort(
patients=patients,
cache_dir=cache_dir,
mhc_class=RandomBindingPredictor)
|
python
|
def random_cohort(size, cache_dir, data_dir=None,
min_random_variants=None,
max_random_variants=None,
seed_val=1234):
"""
Parameters
----------
min_random_variants: optional, int
Minimum number of random variants to be generated per patient.
max_random_variants: optional, int
Maximum number of random variants to be generated per patient.
"""
seed(seed_val)
d = {}
d["id"] = [str(id) for id in range(size)]
d["age"] = choice([10, 15, 28, 32, 59, 62, 64, 66, 68], size)
d["smoker"] = choice([False, True], size)
d["OS"] = [randint(10, 1000) for i in range(size)]
d["PFS"] = [int(os * 0.6) for os in d["OS"]]
d["benefit"] = [pfs < 50 for pfs in d["PFS"]]
d["random"] = [randint(100) for i in range(size)]
d["random_boolean"] = choice([False, True], size)
d["benefit_correlate"] = [randint(50) if benefit else randint(20) for benefit in d["benefit"]]
d["benefit_correlate_boolean"] = [True if corr > 10 else False for corr in d["benefit_correlate"]]
d["deceased"] = choice([False, True], size)
d["progressed_or_deceased"] = [deceased or choice([False, True]) for deceased in d["deceased"]]
df = pd.DataFrame(d)
patients = []
for i, row in df.iterrows():
snv_vcf_paths = None
if max_random_variants is not None and min_random_variants is not None:
if data_dir is None:
raise ValueError("data_dir must be provided if random variants are being generated.")
vcf_path = path.join(data_dir, "patient_%s_mutect.vcf" % row["id"])
generate_simple_vcf(
vcf_path, generate_random_missense_variants(num_variants=randint(min_random_variants, max_random_variants)))
snv_vcf_paths = [vcf_path]
patient = Patient(
id=row["id"],
os=row["OS"],
pfs=row["PFS"],
benefit=row["benefit"],
deceased=row["deceased"],
progressed_or_deceased=row["progressed_or_deceased"],
hla_alleles=["HLA-A02:01"],
variants={"snv": snv_vcf_paths},
additional_data=row)
patients.append(patient)
return Cohort(
patients=patients,
cache_dir=cache_dir,
mhc_class=RandomBindingPredictor)
|
[
"def",
"random_cohort",
"(",
"size",
",",
"cache_dir",
",",
"data_dir",
"=",
"None",
",",
"min_random_variants",
"=",
"None",
",",
"max_random_variants",
"=",
"None",
",",
"seed_val",
"=",
"1234",
")",
":",
"seed",
"(",
"seed_val",
")",
"d",
"=",
"{",
"}",
"d",
"[",
"\"id\"",
"]",
"=",
"[",
"str",
"(",
"id",
")",
"for",
"id",
"in",
"range",
"(",
"size",
")",
"]",
"d",
"[",
"\"age\"",
"]",
"=",
"choice",
"(",
"[",
"10",
",",
"15",
",",
"28",
",",
"32",
",",
"59",
",",
"62",
",",
"64",
",",
"66",
",",
"68",
"]",
",",
"size",
")",
"d",
"[",
"\"smoker\"",
"]",
"=",
"choice",
"(",
"[",
"False",
",",
"True",
"]",
",",
"size",
")",
"d",
"[",
"\"OS\"",
"]",
"=",
"[",
"randint",
"(",
"10",
",",
"1000",
")",
"for",
"i",
"in",
"range",
"(",
"size",
")",
"]",
"d",
"[",
"\"PFS\"",
"]",
"=",
"[",
"int",
"(",
"os",
"*",
"0.6",
")",
"for",
"os",
"in",
"d",
"[",
"\"OS\"",
"]",
"]",
"d",
"[",
"\"benefit\"",
"]",
"=",
"[",
"pfs",
"<",
"50",
"for",
"pfs",
"in",
"d",
"[",
"\"PFS\"",
"]",
"]",
"d",
"[",
"\"random\"",
"]",
"=",
"[",
"randint",
"(",
"100",
")",
"for",
"i",
"in",
"range",
"(",
"size",
")",
"]",
"d",
"[",
"\"random_boolean\"",
"]",
"=",
"choice",
"(",
"[",
"False",
",",
"True",
"]",
",",
"size",
")",
"d",
"[",
"\"benefit_correlate\"",
"]",
"=",
"[",
"randint",
"(",
"50",
")",
"if",
"benefit",
"else",
"randint",
"(",
"20",
")",
"for",
"benefit",
"in",
"d",
"[",
"\"benefit\"",
"]",
"]",
"d",
"[",
"\"benefit_correlate_boolean\"",
"]",
"=",
"[",
"True",
"if",
"corr",
">",
"10",
"else",
"False",
"for",
"corr",
"in",
"d",
"[",
"\"benefit_correlate\"",
"]",
"]",
"d",
"[",
"\"deceased\"",
"]",
"=",
"choice",
"(",
"[",
"False",
",",
"True",
"]",
",",
"size",
")",
"d",
"[",
"\"progressed_or_deceased\"",
"]",
"=",
"[",
"deceased",
"or",
"choice",
"(",
"[",
"False",
",",
"True",
"]",
")",
"for",
"deceased",
"in",
"d",
"[",
"\"deceased\"",
"]",
"]",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"d",
")",
"patients",
"=",
"[",
"]",
"for",
"i",
",",
"row",
"in",
"df",
".",
"iterrows",
"(",
")",
":",
"snv_vcf_paths",
"=",
"None",
"if",
"max_random_variants",
"is",
"not",
"None",
"and",
"min_random_variants",
"is",
"not",
"None",
":",
"if",
"data_dir",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"data_dir must be provided if random variants are being generated.\"",
")",
"vcf_path",
"=",
"path",
".",
"join",
"(",
"data_dir",
",",
"\"patient_%s_mutect.vcf\"",
"%",
"row",
"[",
"\"id\"",
"]",
")",
"generate_simple_vcf",
"(",
"vcf_path",
",",
"generate_random_missense_variants",
"(",
"num_variants",
"=",
"randint",
"(",
"min_random_variants",
",",
"max_random_variants",
")",
")",
")",
"snv_vcf_paths",
"=",
"[",
"vcf_path",
"]",
"patient",
"=",
"Patient",
"(",
"id",
"=",
"row",
"[",
"\"id\"",
"]",
",",
"os",
"=",
"row",
"[",
"\"OS\"",
"]",
",",
"pfs",
"=",
"row",
"[",
"\"PFS\"",
"]",
",",
"benefit",
"=",
"row",
"[",
"\"benefit\"",
"]",
",",
"deceased",
"=",
"row",
"[",
"\"deceased\"",
"]",
",",
"progressed_or_deceased",
"=",
"row",
"[",
"\"progressed_or_deceased\"",
"]",
",",
"hla_alleles",
"=",
"[",
"\"HLA-A02:01\"",
"]",
",",
"variants",
"=",
"{",
"\"snv\"",
":",
"snv_vcf_paths",
"}",
",",
"additional_data",
"=",
"row",
")",
"patients",
".",
"append",
"(",
"patient",
")",
"return",
"Cohort",
"(",
"patients",
"=",
"patients",
",",
"cache_dir",
"=",
"cache_dir",
",",
"mhc_class",
"=",
"RandomBindingPredictor",
")"
] |
Parameters
----------
min_random_variants: optional, int
Minimum number of random variants to be generated per patient.
max_random_variants: optional, int
Maximum number of random variants to be generated per patient.
|
[
"Parameters",
"----------",
"min_random_variants",
":",
"optional",
"int",
"Minimum",
"number",
"of",
"random",
"variants",
"to",
"be",
"generated",
"per",
"patient",
".",
"max_random_variants",
":",
"optional",
"int",
"Maximum",
"number",
"of",
"random",
"variants",
"to",
"be",
"generated",
"per",
"patient",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/random.py#L24-L75
|
train
|
hammerlab/cohorts
|
cohorts/random.py
|
generate_random_missense_variants
|
def generate_random_missense_variants(num_variants=10, max_search=100000, reference="GRCh37"):
"""
Generate a random collection of missense variants by trying random variants repeatedly.
"""
variants = []
for i in range(max_search):
bases = ["A", "C", "T", "G"]
random_ref = choice(bases)
bases.remove(random_ref)
random_alt = choice(bases)
random_contig = choice(["1", "2", "3", "4", "5"])
random_variant = Variant(contig=random_contig, start=randint(1, 1000000),
ref=random_ref, alt=random_alt, ensembl=reference)
try:
effects = random_variant.effects()
for effect in effects:
if isinstance(effect, Substitution):
variants.append(random_variant)
break
except:
continue
if len(variants) == num_variants:
break
return VariantCollection(variants)
|
python
|
def generate_random_missense_variants(num_variants=10, max_search=100000, reference="GRCh37"):
"""
Generate a random collection of missense variants by trying random variants repeatedly.
"""
variants = []
for i in range(max_search):
bases = ["A", "C", "T", "G"]
random_ref = choice(bases)
bases.remove(random_ref)
random_alt = choice(bases)
random_contig = choice(["1", "2", "3", "4", "5"])
random_variant = Variant(contig=random_contig, start=randint(1, 1000000),
ref=random_ref, alt=random_alt, ensembl=reference)
try:
effects = random_variant.effects()
for effect in effects:
if isinstance(effect, Substitution):
variants.append(random_variant)
break
except:
continue
if len(variants) == num_variants:
break
return VariantCollection(variants)
|
[
"def",
"generate_random_missense_variants",
"(",
"num_variants",
"=",
"10",
",",
"max_search",
"=",
"100000",
",",
"reference",
"=",
"\"GRCh37\"",
")",
":",
"variants",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"max_search",
")",
":",
"bases",
"=",
"[",
"\"A\"",
",",
"\"C\"",
",",
"\"T\"",
",",
"\"G\"",
"]",
"random_ref",
"=",
"choice",
"(",
"bases",
")",
"bases",
".",
"remove",
"(",
"random_ref",
")",
"random_alt",
"=",
"choice",
"(",
"bases",
")",
"random_contig",
"=",
"choice",
"(",
"[",
"\"1\"",
",",
"\"2\"",
",",
"\"3\"",
",",
"\"4\"",
",",
"\"5\"",
"]",
")",
"random_variant",
"=",
"Variant",
"(",
"contig",
"=",
"random_contig",
",",
"start",
"=",
"randint",
"(",
"1",
",",
"1000000",
")",
",",
"ref",
"=",
"random_ref",
",",
"alt",
"=",
"random_alt",
",",
"ensembl",
"=",
"reference",
")",
"try",
":",
"effects",
"=",
"random_variant",
".",
"effects",
"(",
")",
"for",
"effect",
"in",
"effects",
":",
"if",
"isinstance",
"(",
"effect",
",",
"Substitution",
")",
":",
"variants",
".",
"append",
"(",
"random_variant",
")",
"break",
"except",
":",
"continue",
"if",
"len",
"(",
"variants",
")",
"==",
"num_variants",
":",
"break",
"return",
"VariantCollection",
"(",
"variants",
")"
] |
Generate a random collection of missense variants by trying random variants repeatedly.
|
[
"Generate",
"a",
"random",
"collection",
"of",
"missense",
"variants",
"by",
"trying",
"random",
"variants",
"repeatedly",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/random.py#L77-L100
|
train
|
hammerlab/cohorts
|
cohorts/random.py
|
generate_simple_vcf
|
def generate_simple_vcf(filename, variant_collection):
"""
Output a very simple metadata-free VCF for each variant in a variant_collection.
"""
contigs = []
positions = []
refs = []
alts = []
for variant in variant_collection:
contigs.append("chr" + variant.contig)
positions.append(variant.start)
refs.append(variant.ref)
alts.append(variant.alt)
df = pd.DataFrame()
df["contig"] = contigs
df["position"] = positions
df["id"] = ["."] * len(variant_collection)
df["ref"] = refs
df["alt"] = alts
df["qual"] = ["."] * len(variant_collection)
df["filter"] = ["."] * len(variant_collection)
df["info"] = ["."] * len(variant_collection)
df["format"] = ["GT:AD:DP"] * len(variant_collection)
normal_ref_depths = [randint(1, 10) for v in variant_collection]
normal_alt_depths = [randint(1, 10) for v in variant_collection]
df["n1"] = ["0:%d,%d:%d" % (normal_ref_depths[i], normal_alt_depths[i],
normal_ref_depths[i] + normal_alt_depths[i])
for i in range(len(variant_collection))]
tumor_ref_depths = [randint(1, 10) for v in variant_collection]
tumor_alt_depths = [randint(1, 10) for v in variant_collection]
df["t1"] = ["0/1:%d,%d:%d" % (tumor_ref_depths[i], tumor_alt_depths[i], tumor_ref_depths[i] + tumor_alt_depths[i])
for i in range(len(variant_collection))]
with open(filename, "w") as f:
f.write("##fileformat=VCFv4.1\n")
f.write("##reference=file:///projects/ngs/resources/gatk/2.3/ucsc.hg19.fasta\n")
with open(filename, "a") as f:
df.to_csv(f, sep="\t", index=None, header=None)
|
python
|
def generate_simple_vcf(filename, variant_collection):
"""
Output a very simple metadata-free VCF for each variant in a variant_collection.
"""
contigs = []
positions = []
refs = []
alts = []
for variant in variant_collection:
contigs.append("chr" + variant.contig)
positions.append(variant.start)
refs.append(variant.ref)
alts.append(variant.alt)
df = pd.DataFrame()
df["contig"] = contigs
df["position"] = positions
df["id"] = ["."] * len(variant_collection)
df["ref"] = refs
df["alt"] = alts
df["qual"] = ["."] * len(variant_collection)
df["filter"] = ["."] * len(variant_collection)
df["info"] = ["."] * len(variant_collection)
df["format"] = ["GT:AD:DP"] * len(variant_collection)
normal_ref_depths = [randint(1, 10) for v in variant_collection]
normal_alt_depths = [randint(1, 10) for v in variant_collection]
df["n1"] = ["0:%d,%d:%d" % (normal_ref_depths[i], normal_alt_depths[i],
normal_ref_depths[i] + normal_alt_depths[i])
for i in range(len(variant_collection))]
tumor_ref_depths = [randint(1, 10) for v in variant_collection]
tumor_alt_depths = [randint(1, 10) for v in variant_collection]
df["t1"] = ["0/1:%d,%d:%d" % (tumor_ref_depths[i], tumor_alt_depths[i], tumor_ref_depths[i] + tumor_alt_depths[i])
for i in range(len(variant_collection))]
with open(filename, "w") as f:
f.write("##fileformat=VCFv4.1\n")
f.write("##reference=file:///projects/ngs/resources/gatk/2.3/ucsc.hg19.fasta\n")
with open(filename, "a") as f:
df.to_csv(f, sep="\t", index=None, header=None)
|
[
"def",
"generate_simple_vcf",
"(",
"filename",
",",
"variant_collection",
")",
":",
"contigs",
"=",
"[",
"]",
"positions",
"=",
"[",
"]",
"refs",
"=",
"[",
"]",
"alts",
"=",
"[",
"]",
"for",
"variant",
"in",
"variant_collection",
":",
"contigs",
".",
"append",
"(",
"\"chr\"",
"+",
"variant",
".",
"contig",
")",
"positions",
".",
"append",
"(",
"variant",
".",
"start",
")",
"refs",
".",
"append",
"(",
"variant",
".",
"ref",
")",
"alts",
".",
"append",
"(",
"variant",
".",
"alt",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"df",
"[",
"\"contig\"",
"]",
"=",
"contigs",
"df",
"[",
"\"position\"",
"]",
"=",
"positions",
"df",
"[",
"\"id\"",
"]",
"=",
"[",
"\".\"",
"]",
"*",
"len",
"(",
"variant_collection",
")",
"df",
"[",
"\"ref\"",
"]",
"=",
"refs",
"df",
"[",
"\"alt\"",
"]",
"=",
"alts",
"df",
"[",
"\"qual\"",
"]",
"=",
"[",
"\".\"",
"]",
"*",
"len",
"(",
"variant_collection",
")",
"df",
"[",
"\"filter\"",
"]",
"=",
"[",
"\".\"",
"]",
"*",
"len",
"(",
"variant_collection",
")",
"df",
"[",
"\"info\"",
"]",
"=",
"[",
"\".\"",
"]",
"*",
"len",
"(",
"variant_collection",
")",
"df",
"[",
"\"format\"",
"]",
"=",
"[",
"\"GT:AD:DP\"",
"]",
"*",
"len",
"(",
"variant_collection",
")",
"normal_ref_depths",
"=",
"[",
"randint",
"(",
"1",
",",
"10",
")",
"for",
"v",
"in",
"variant_collection",
"]",
"normal_alt_depths",
"=",
"[",
"randint",
"(",
"1",
",",
"10",
")",
"for",
"v",
"in",
"variant_collection",
"]",
"df",
"[",
"\"n1\"",
"]",
"=",
"[",
"\"0:%d,%d:%d\"",
"%",
"(",
"normal_ref_depths",
"[",
"i",
"]",
",",
"normal_alt_depths",
"[",
"i",
"]",
",",
"normal_ref_depths",
"[",
"i",
"]",
"+",
"normal_alt_depths",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"variant_collection",
")",
")",
"]",
"tumor_ref_depths",
"=",
"[",
"randint",
"(",
"1",
",",
"10",
")",
"for",
"v",
"in",
"variant_collection",
"]",
"tumor_alt_depths",
"=",
"[",
"randint",
"(",
"1",
",",
"10",
")",
"for",
"v",
"in",
"variant_collection",
"]",
"df",
"[",
"\"t1\"",
"]",
"=",
"[",
"\"0/1:%d,%d:%d\"",
"%",
"(",
"tumor_ref_depths",
"[",
"i",
"]",
",",
"tumor_alt_depths",
"[",
"i",
"]",
",",
"tumor_ref_depths",
"[",
"i",
"]",
"+",
"tumor_alt_depths",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"variant_collection",
")",
")",
"]",
"with",
"open",
"(",
"filename",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"##fileformat=VCFv4.1\\n\"",
")",
"f",
".",
"write",
"(",
"\"##reference=file:///projects/ngs/resources/gatk/2.3/ucsc.hg19.fasta\\n\"",
")",
"with",
"open",
"(",
"filename",
",",
"\"a\"",
")",
"as",
"f",
":",
"df",
".",
"to_csv",
"(",
"f",
",",
"sep",
"=",
"\"\\t\"",
",",
"index",
"=",
"None",
",",
"header",
"=",
"None",
")"
] |
Output a very simple metadata-free VCF for each variant in a variant_collection.
|
[
"Output",
"a",
"very",
"simple",
"metadata",
"-",
"free",
"VCF",
"for",
"each",
"variant",
"in",
"a",
"variant_collection",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/random.py#L102-L140
|
train
|
carletes/mock-ssh-server
|
mockssh/sftp.py
|
SFTPServerInterface.list_folder
|
def list_folder(self, path):
"""Looks up folder contents of `path.`"""
# Inspired by https://github.com/rspivak/sftpserver/blob/0.3/src/sftpserver/stub_sftp.py#L70
try:
folder_contents = []
for f in os.listdir(path):
attr = paramiko.SFTPAttributes.from_stat(os.stat(os.path.join(path, f)))
attr.filename = f
folder_contents.append(attr)
return folder_contents
except OSError as e:
return SFTPServer.convert_errno(e.errno)
|
python
|
def list_folder(self, path):
"""Looks up folder contents of `path.`"""
# Inspired by https://github.com/rspivak/sftpserver/blob/0.3/src/sftpserver/stub_sftp.py#L70
try:
folder_contents = []
for f in os.listdir(path):
attr = paramiko.SFTPAttributes.from_stat(os.stat(os.path.join(path, f)))
attr.filename = f
folder_contents.append(attr)
return folder_contents
except OSError as e:
return SFTPServer.convert_errno(e.errno)
|
[
"def",
"list_folder",
"(",
"self",
",",
"path",
")",
":",
"# Inspired by https://github.com/rspivak/sftpserver/blob/0.3/src/sftpserver/stub_sftp.py#L70",
"try",
":",
"folder_contents",
"=",
"[",
"]",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
":",
"attr",
"=",
"paramiko",
".",
"SFTPAttributes",
".",
"from_stat",
"(",
"os",
".",
"stat",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"f",
")",
")",
")",
"attr",
".",
"filename",
"=",
"f",
"folder_contents",
".",
"append",
"(",
"attr",
")",
"return",
"folder_contents",
"except",
"OSError",
"as",
"e",
":",
"return",
"SFTPServer",
".",
"convert_errno",
"(",
"e",
".",
"errno",
")"
] |
Looks up folder contents of `path.`
|
[
"Looks",
"up",
"folder",
"contents",
"of",
"path",
"."
] |
0d724ad4a43bafcb6a4bbe28b52383528f3460cc
|
https://github.com/carletes/mock-ssh-server/blob/0d724ad4a43bafcb6a4bbe28b52383528f3460cc/mockssh/sftp.py#L142-L153
|
train
|
hammerlab/cohorts
|
cohorts/varcode_utils.py
|
filter_variants
|
def filter_variants(variant_collection, patient, filter_fn, **kwargs):
"""Filter variants from the Variant Collection
Parameters
----------
variant_collection : varcode.VariantCollection
patient : cohorts.Patient
filter_fn: function
Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved.
Returns
-------
varcode.VariantCollection
Filtered variant collection, with only the variants passing the filter
"""
if filter_fn:
return variant_collection.clone_with_new_elements([
variant
for variant in variant_collection
if filter_fn(FilterableVariant(
variant=variant,
variant_collection=variant_collection,
patient=patient,
), **kwargs)
])
else:
return variant_collection
|
python
|
def filter_variants(variant_collection, patient, filter_fn, **kwargs):
"""Filter variants from the Variant Collection
Parameters
----------
variant_collection : varcode.VariantCollection
patient : cohorts.Patient
filter_fn: function
Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved.
Returns
-------
varcode.VariantCollection
Filtered variant collection, with only the variants passing the filter
"""
if filter_fn:
return variant_collection.clone_with_new_elements([
variant
for variant in variant_collection
if filter_fn(FilterableVariant(
variant=variant,
variant_collection=variant_collection,
patient=patient,
), **kwargs)
])
else:
return variant_collection
|
[
"def",
"filter_variants",
"(",
"variant_collection",
",",
"patient",
",",
"filter_fn",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"filter_fn",
":",
"return",
"variant_collection",
".",
"clone_with_new_elements",
"(",
"[",
"variant",
"for",
"variant",
"in",
"variant_collection",
"if",
"filter_fn",
"(",
"FilterableVariant",
"(",
"variant",
"=",
"variant",
",",
"variant_collection",
"=",
"variant_collection",
",",
"patient",
"=",
"patient",
",",
")",
",",
"*",
"*",
"kwargs",
")",
"]",
")",
"else",
":",
"return",
"variant_collection"
] |
Filter variants from the Variant Collection
Parameters
----------
variant_collection : varcode.VariantCollection
patient : cohorts.Patient
filter_fn: function
Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved.
Returns
-------
varcode.VariantCollection
Filtered variant collection, with only the variants passing the filter
|
[
"Filter",
"variants",
"from",
"the",
"Variant",
"Collection"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/varcode_utils.py#L79-L105
|
train
|
hammerlab/cohorts
|
cohorts/varcode_utils.py
|
filter_effects
|
def filter_effects(effect_collection, variant_collection, patient, filter_fn, all_effects, **kwargs):
"""Filter variants from the Effect Collection
Parameters
----------
effect_collection : varcode.EffectCollection
variant_collection : varcode.VariantCollection
patient : cohorts.Patient
filter_fn : function
Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved.
all_effects : boolean
Return the single, top-priority effect if False. If True, return all effects (don't filter to top-priority).
Returns
-------
varcode.EffectCollection
Filtered effect collection, with only the variants passing the filter
"""
def top_priority_maybe(effect_collection):
"""
Always (unless all_effects=True) take the top priority effect per variant
so we end up with a single effect per variant.
"""
if all_effects:
return effect_collection
return EffectCollection(list(effect_collection.top_priority_effect_per_variant().values()))
def apply_filter_fn(filter_fn, effect):
"""
Return True if filter_fn is true for the effect or its alternate_effect.
If no alternate_effect, then just return True if filter_fn is True.
"""
applied = filter_fn(FilterableEffect(
effect=effect,
variant_collection=variant_collection,
patient=patient), **kwargs)
if hasattr(effect, "alternate_effect"):
applied_alternate = filter_fn(FilterableEffect(
effect=effect.alternate_effect,
variant_collection=variant_collection,
patient=patient), **kwargs)
return applied or applied_alternate
return applied
if filter_fn:
return top_priority_maybe(EffectCollection([
effect
for effect in effect_collection
if apply_filter_fn(filter_fn, effect)]))
else:
return top_priority_maybe(effect_collection)
|
python
|
def filter_effects(effect_collection, variant_collection, patient, filter_fn, all_effects, **kwargs):
"""Filter variants from the Effect Collection
Parameters
----------
effect_collection : varcode.EffectCollection
variant_collection : varcode.VariantCollection
patient : cohorts.Patient
filter_fn : function
Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved.
all_effects : boolean
Return the single, top-priority effect if False. If True, return all effects (don't filter to top-priority).
Returns
-------
varcode.EffectCollection
Filtered effect collection, with only the variants passing the filter
"""
def top_priority_maybe(effect_collection):
"""
Always (unless all_effects=True) take the top priority effect per variant
so we end up with a single effect per variant.
"""
if all_effects:
return effect_collection
return EffectCollection(list(effect_collection.top_priority_effect_per_variant().values()))
def apply_filter_fn(filter_fn, effect):
"""
Return True if filter_fn is true for the effect or its alternate_effect.
If no alternate_effect, then just return True if filter_fn is True.
"""
applied = filter_fn(FilterableEffect(
effect=effect,
variant_collection=variant_collection,
patient=patient), **kwargs)
if hasattr(effect, "alternate_effect"):
applied_alternate = filter_fn(FilterableEffect(
effect=effect.alternate_effect,
variant_collection=variant_collection,
patient=patient), **kwargs)
return applied or applied_alternate
return applied
if filter_fn:
return top_priority_maybe(EffectCollection([
effect
for effect in effect_collection
if apply_filter_fn(filter_fn, effect)]))
else:
return top_priority_maybe(effect_collection)
|
[
"def",
"filter_effects",
"(",
"effect_collection",
",",
"variant_collection",
",",
"patient",
",",
"filter_fn",
",",
"all_effects",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"top_priority_maybe",
"(",
"effect_collection",
")",
":",
"\"\"\"\n Always (unless all_effects=True) take the top priority effect per variant\n so we end up with a single effect per variant.\n \"\"\"",
"if",
"all_effects",
":",
"return",
"effect_collection",
"return",
"EffectCollection",
"(",
"list",
"(",
"effect_collection",
".",
"top_priority_effect_per_variant",
"(",
")",
".",
"values",
"(",
")",
")",
")",
"def",
"apply_filter_fn",
"(",
"filter_fn",
",",
"effect",
")",
":",
"\"\"\"\n Return True if filter_fn is true for the effect or its alternate_effect.\n If no alternate_effect, then just return True if filter_fn is True.\n \"\"\"",
"applied",
"=",
"filter_fn",
"(",
"FilterableEffect",
"(",
"effect",
"=",
"effect",
",",
"variant_collection",
"=",
"variant_collection",
",",
"patient",
"=",
"patient",
")",
",",
"*",
"*",
"kwargs",
")",
"if",
"hasattr",
"(",
"effect",
",",
"\"alternate_effect\"",
")",
":",
"applied_alternate",
"=",
"filter_fn",
"(",
"FilterableEffect",
"(",
"effect",
"=",
"effect",
".",
"alternate_effect",
",",
"variant_collection",
"=",
"variant_collection",
",",
"patient",
"=",
"patient",
")",
",",
"*",
"*",
"kwargs",
")",
"return",
"applied",
"or",
"applied_alternate",
"return",
"applied",
"if",
"filter_fn",
":",
"return",
"top_priority_maybe",
"(",
"EffectCollection",
"(",
"[",
"effect",
"for",
"effect",
"in",
"effect_collection",
"if",
"apply_filter_fn",
"(",
"filter_fn",
",",
"effect",
")",
"]",
")",
")",
"else",
":",
"return",
"top_priority_maybe",
"(",
"effect_collection",
")"
] |
Filter variants from the Effect Collection
Parameters
----------
effect_collection : varcode.EffectCollection
variant_collection : varcode.VariantCollection
patient : cohorts.Patient
filter_fn : function
Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved.
all_effects : boolean
Return the single, top-priority effect if False. If True, return all effects (don't filter to top-priority).
Returns
-------
varcode.EffectCollection
Filtered effect collection, with only the variants passing the filter
|
[
"Filter",
"variants",
"from",
"the",
"Effect",
"Collection"
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/varcode_utils.py#L107-L157
|
train
|
garethr/django-timelog
|
src/timelog/lib.py
|
count_lines_in
|
def count_lines_in(filename):
"Count lines in a file"
f = open(filename)
lines = 0
buf_size = 1024 * 1024
read_f = f.read # loop optimization
buf = read_f(buf_size)
while buf:
lines += buf.count('\n')
buf = read_f(buf_size)
return lines
|
python
|
def count_lines_in(filename):
"Count lines in a file"
f = open(filename)
lines = 0
buf_size = 1024 * 1024
read_f = f.read # loop optimization
buf = read_f(buf_size)
while buf:
lines += buf.count('\n')
buf = read_f(buf_size)
return lines
|
[
"def",
"count_lines_in",
"(",
"filename",
")",
":",
"f",
"=",
"open",
"(",
"filename",
")",
"lines",
"=",
"0",
"buf_size",
"=",
"1024",
"*",
"1024",
"read_f",
"=",
"f",
".",
"read",
"# loop optimization",
"buf",
"=",
"read_f",
"(",
"buf_size",
")",
"while",
"buf",
":",
"lines",
"+=",
"buf",
".",
"count",
"(",
"'\\n'",
")",
"buf",
"=",
"read_f",
"(",
"buf_size",
")",
"return",
"lines"
] |
Count lines in a file
|
[
"Count",
"lines",
"in",
"a",
"file"
] |
84c7015248a82faccb9d3fe4e6014645cc9ec103
|
https://github.com/garethr/django-timelog/blob/84c7015248a82faccb9d3fe4e6014645cc9ec103/src/timelog/lib.py#L16-L28
|
train
|
garethr/django-timelog
|
src/timelog/lib.py
|
view_name_from
|
def view_name_from(path):
"Resolve a path to the full python module name of the related view function"
try:
return CACHED_VIEWS[path]
except KeyError:
view = resolve(path)
module = path
name = ''
if hasattr(view.func, '__module__'):
module = resolve(path).func.__module__
if hasattr(view.func, '__name__'):
name = resolve(path).func.__name__
view = "%s.%s" % (module, name)
CACHED_VIEWS[path] = view
return view
|
python
|
def view_name_from(path):
"Resolve a path to the full python module name of the related view function"
try:
return CACHED_VIEWS[path]
except KeyError:
view = resolve(path)
module = path
name = ''
if hasattr(view.func, '__module__'):
module = resolve(path).func.__module__
if hasattr(view.func, '__name__'):
name = resolve(path).func.__name__
view = "%s.%s" % (module, name)
CACHED_VIEWS[path] = view
return view
|
[
"def",
"view_name_from",
"(",
"path",
")",
":",
"try",
":",
"return",
"CACHED_VIEWS",
"[",
"path",
"]",
"except",
"KeyError",
":",
"view",
"=",
"resolve",
"(",
"path",
")",
"module",
"=",
"path",
"name",
"=",
"''",
"if",
"hasattr",
"(",
"view",
".",
"func",
",",
"'__module__'",
")",
":",
"module",
"=",
"resolve",
"(",
"path",
")",
".",
"func",
".",
"__module__",
"if",
"hasattr",
"(",
"view",
".",
"func",
",",
"'__name__'",
")",
":",
"name",
"=",
"resolve",
"(",
"path",
")",
".",
"func",
".",
"__name__",
"view",
"=",
"\"%s.%s\"",
"%",
"(",
"module",
",",
"name",
")",
"CACHED_VIEWS",
"[",
"path",
"]",
"=",
"view",
"return",
"view"
] |
Resolve a path to the full python module name of the related view function
|
[
"Resolve",
"a",
"path",
"to",
"the",
"full",
"python",
"module",
"name",
"of",
"the",
"related",
"view",
"function"
] |
84c7015248a82faccb9d3fe4e6014645cc9ec103
|
https://github.com/garethr/django-timelog/blob/84c7015248a82faccb9d3fe4e6014645cc9ec103/src/timelog/lib.py#L30-L46
|
train
|
garethr/django-timelog
|
src/timelog/lib.py
|
generate_table_from
|
def generate_table_from(data):
"Output a nicely formatted ascii table"
table = Texttable(max_width=120)
table.add_row(["view", "method", "status", "count", "minimum", "maximum", "mean", "stdev", "queries", "querytime"])
table.set_cols_align(["l", "l", "l", "r", "r", "r", "r", "r", "r", "r"])
for item in sorted(data):
mean = round(sum(data[item]['times'])/data[item]['count'], 3)
mean_sql = round(sum(data[item]['sql'])/data[item]['count'], 3)
mean_sqltime = round(sum(data[item]['sqltime'])/data[item]['count'], 3)
sdsq = sum([(i - mean) ** 2 for i in data[item]['times']])
try:
stdev = '%.2f' % ((sdsq / (len(data[item]['times']) - 1)) ** .5)
except ZeroDivisionError:
stdev = '0.00'
minimum = "%.2f" % min(data[item]['times'])
maximum = "%.2f" % max(data[item]['times'])
table.add_row([data[item]['view'], data[item]['method'], data[item]['status'], data[item]['count'], minimum, maximum, '%.3f' % mean, stdev, mean_sql, mean_sqltime])
return table.draw()
|
python
|
def generate_table_from(data):
"Output a nicely formatted ascii table"
table = Texttable(max_width=120)
table.add_row(["view", "method", "status", "count", "minimum", "maximum", "mean", "stdev", "queries", "querytime"])
table.set_cols_align(["l", "l", "l", "r", "r", "r", "r", "r", "r", "r"])
for item in sorted(data):
mean = round(sum(data[item]['times'])/data[item]['count'], 3)
mean_sql = round(sum(data[item]['sql'])/data[item]['count'], 3)
mean_sqltime = round(sum(data[item]['sqltime'])/data[item]['count'], 3)
sdsq = sum([(i - mean) ** 2 for i in data[item]['times']])
try:
stdev = '%.2f' % ((sdsq / (len(data[item]['times']) - 1)) ** .5)
except ZeroDivisionError:
stdev = '0.00'
minimum = "%.2f" % min(data[item]['times'])
maximum = "%.2f" % max(data[item]['times'])
table.add_row([data[item]['view'], data[item]['method'], data[item]['status'], data[item]['count'], minimum, maximum, '%.3f' % mean, stdev, mean_sql, mean_sqltime])
return table.draw()
|
[
"def",
"generate_table_from",
"(",
"data",
")",
":",
"table",
"=",
"Texttable",
"(",
"max_width",
"=",
"120",
")",
"table",
".",
"add_row",
"(",
"[",
"\"view\"",
",",
"\"method\"",
",",
"\"status\"",
",",
"\"count\"",
",",
"\"minimum\"",
",",
"\"maximum\"",
",",
"\"mean\"",
",",
"\"stdev\"",
",",
"\"queries\"",
",",
"\"querytime\"",
"]",
")",
"table",
".",
"set_cols_align",
"(",
"[",
"\"l\"",
",",
"\"l\"",
",",
"\"l\"",
",",
"\"r\"",
",",
"\"r\"",
",",
"\"r\"",
",",
"\"r\"",
",",
"\"r\"",
",",
"\"r\"",
",",
"\"r\"",
"]",
")",
"for",
"item",
"in",
"sorted",
"(",
"data",
")",
":",
"mean",
"=",
"round",
"(",
"sum",
"(",
"data",
"[",
"item",
"]",
"[",
"'times'",
"]",
")",
"/",
"data",
"[",
"item",
"]",
"[",
"'count'",
"]",
",",
"3",
")",
"mean_sql",
"=",
"round",
"(",
"sum",
"(",
"data",
"[",
"item",
"]",
"[",
"'sql'",
"]",
")",
"/",
"data",
"[",
"item",
"]",
"[",
"'count'",
"]",
",",
"3",
")",
"mean_sqltime",
"=",
"round",
"(",
"sum",
"(",
"data",
"[",
"item",
"]",
"[",
"'sqltime'",
"]",
")",
"/",
"data",
"[",
"item",
"]",
"[",
"'count'",
"]",
",",
"3",
")",
"sdsq",
"=",
"sum",
"(",
"[",
"(",
"i",
"-",
"mean",
")",
"**",
"2",
"for",
"i",
"in",
"data",
"[",
"item",
"]",
"[",
"'times'",
"]",
"]",
")",
"try",
":",
"stdev",
"=",
"'%.2f'",
"%",
"(",
"(",
"sdsq",
"/",
"(",
"len",
"(",
"data",
"[",
"item",
"]",
"[",
"'times'",
"]",
")",
"-",
"1",
")",
")",
"**",
".5",
")",
"except",
"ZeroDivisionError",
":",
"stdev",
"=",
"'0.00'",
"minimum",
"=",
"\"%.2f\"",
"%",
"min",
"(",
"data",
"[",
"item",
"]",
"[",
"'times'",
"]",
")",
"maximum",
"=",
"\"%.2f\"",
"%",
"max",
"(",
"data",
"[",
"item",
"]",
"[",
"'times'",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"data",
"[",
"item",
"]",
"[",
"'view'",
"]",
",",
"data",
"[",
"item",
"]",
"[",
"'method'",
"]",
",",
"data",
"[",
"item",
"]",
"[",
"'status'",
"]",
",",
"data",
"[",
"item",
"]",
"[",
"'count'",
"]",
",",
"minimum",
",",
"maximum",
",",
"'%.3f'",
"%",
"mean",
",",
"stdev",
",",
"mean_sql",
",",
"mean_sqltime",
"]",
")",
"return",
"table",
".",
"draw",
"(",
")"
] |
Output a nicely formatted ascii table
|
[
"Output",
"a",
"nicely",
"formatted",
"ascii",
"table"
] |
84c7015248a82faccb9d3fe4e6014645cc9ec103
|
https://github.com/garethr/django-timelog/blob/84c7015248a82faccb9d3fe4e6014645cc9ec103/src/timelog/lib.py#L48-L70
|
train
|
garethr/django-timelog
|
src/timelog/lib.py
|
analyze_log_file
|
def analyze_log_file(logfile, pattern, reverse_paths=True, progress=True):
"Given a log file and regex group and extract the performance data"
if progress:
lines = count_lines_in(logfile)
pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=lines+1).start()
counter = 0
data = {}
compiled_pattern = compile(pattern)
for line in fileinput.input([logfile]):
if progress:
counter = counter + 1
parsed = compiled_pattern.findall(line)[0]
date = parsed[0]
method = parsed[1]
path = parsed[2]
status = parsed[3]
time = parsed[4]
sql = parsed[5]
sqltime = parsed[6]
try:
ignore = False
for ignored_path in IGNORE_PATHS:
compiled_path = compile(ignored_path)
if compiled_path.match(path):
ignore = True
if not ignore:
if reverse_paths:
view = view_name_from(path)
else:
view = path
key = "%s-%s-%s" % (view, status, method)
try:
data[key]['count'] = data[key]['count'] + 1
data[key]['times'].append(float(time))
data[key]['sql'].append(int(sql))
data[key]['sqltime'].append(float(sqltime))
except KeyError:
data[key] = {
'count': 1,
'status': status,
'view': view,
'method': method,
'times': [float(time)],
'sql': [int(sql)],
'sqltime': [float(sqltime)],
}
except Resolver404:
pass
if progress:
pbar.update(counter)
if progress:
pbar.finish()
return data
|
python
|
def analyze_log_file(logfile, pattern, reverse_paths=True, progress=True):
"Given a log file and regex group and extract the performance data"
if progress:
lines = count_lines_in(logfile)
pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=lines+1).start()
counter = 0
data = {}
compiled_pattern = compile(pattern)
for line in fileinput.input([logfile]):
if progress:
counter = counter + 1
parsed = compiled_pattern.findall(line)[0]
date = parsed[0]
method = parsed[1]
path = parsed[2]
status = parsed[3]
time = parsed[4]
sql = parsed[5]
sqltime = parsed[6]
try:
ignore = False
for ignored_path in IGNORE_PATHS:
compiled_path = compile(ignored_path)
if compiled_path.match(path):
ignore = True
if not ignore:
if reverse_paths:
view = view_name_from(path)
else:
view = path
key = "%s-%s-%s" % (view, status, method)
try:
data[key]['count'] = data[key]['count'] + 1
data[key]['times'].append(float(time))
data[key]['sql'].append(int(sql))
data[key]['sqltime'].append(float(sqltime))
except KeyError:
data[key] = {
'count': 1,
'status': status,
'view': view,
'method': method,
'times': [float(time)],
'sql': [int(sql)],
'sqltime': [float(sqltime)],
}
except Resolver404:
pass
if progress:
pbar.update(counter)
if progress:
pbar.finish()
return data
|
[
"def",
"analyze_log_file",
"(",
"logfile",
",",
"pattern",
",",
"reverse_paths",
"=",
"True",
",",
"progress",
"=",
"True",
")",
":",
"if",
"progress",
":",
"lines",
"=",
"count_lines_in",
"(",
"logfile",
")",
"pbar",
"=",
"ProgressBar",
"(",
"widgets",
"=",
"[",
"Percentage",
"(",
")",
",",
"Bar",
"(",
")",
"]",
",",
"maxval",
"=",
"lines",
"+",
"1",
")",
".",
"start",
"(",
")",
"counter",
"=",
"0",
"data",
"=",
"{",
"}",
"compiled_pattern",
"=",
"compile",
"(",
"pattern",
")",
"for",
"line",
"in",
"fileinput",
".",
"input",
"(",
"[",
"logfile",
"]",
")",
":",
"if",
"progress",
":",
"counter",
"=",
"counter",
"+",
"1",
"parsed",
"=",
"compiled_pattern",
".",
"findall",
"(",
"line",
")",
"[",
"0",
"]",
"date",
"=",
"parsed",
"[",
"0",
"]",
"method",
"=",
"parsed",
"[",
"1",
"]",
"path",
"=",
"parsed",
"[",
"2",
"]",
"status",
"=",
"parsed",
"[",
"3",
"]",
"time",
"=",
"parsed",
"[",
"4",
"]",
"sql",
"=",
"parsed",
"[",
"5",
"]",
"sqltime",
"=",
"parsed",
"[",
"6",
"]",
"try",
":",
"ignore",
"=",
"False",
"for",
"ignored_path",
"in",
"IGNORE_PATHS",
":",
"compiled_path",
"=",
"compile",
"(",
"ignored_path",
")",
"if",
"compiled_path",
".",
"match",
"(",
"path",
")",
":",
"ignore",
"=",
"True",
"if",
"not",
"ignore",
":",
"if",
"reverse_paths",
":",
"view",
"=",
"view_name_from",
"(",
"path",
")",
"else",
":",
"view",
"=",
"path",
"key",
"=",
"\"%s-%s-%s\"",
"%",
"(",
"view",
",",
"status",
",",
"method",
")",
"try",
":",
"data",
"[",
"key",
"]",
"[",
"'count'",
"]",
"=",
"data",
"[",
"key",
"]",
"[",
"'count'",
"]",
"+",
"1",
"data",
"[",
"key",
"]",
"[",
"'times'",
"]",
".",
"append",
"(",
"float",
"(",
"time",
")",
")",
"data",
"[",
"key",
"]",
"[",
"'sql'",
"]",
".",
"append",
"(",
"int",
"(",
"sql",
")",
")",
"data",
"[",
"key",
"]",
"[",
"'sqltime'",
"]",
".",
"append",
"(",
"float",
"(",
"sqltime",
")",
")",
"except",
"KeyError",
":",
"data",
"[",
"key",
"]",
"=",
"{",
"'count'",
":",
"1",
",",
"'status'",
":",
"status",
",",
"'view'",
":",
"view",
",",
"'method'",
":",
"method",
",",
"'times'",
":",
"[",
"float",
"(",
"time",
")",
"]",
",",
"'sql'",
":",
"[",
"int",
"(",
"sql",
")",
"]",
",",
"'sqltime'",
":",
"[",
"float",
"(",
"sqltime",
")",
"]",
",",
"}",
"except",
"Resolver404",
":",
"pass",
"if",
"progress",
":",
"pbar",
".",
"update",
"(",
"counter",
")",
"if",
"progress",
":",
"pbar",
".",
"finish",
"(",
")",
"return",
"data"
] |
Given a log file and regex group and extract the performance data
|
[
"Given",
"a",
"log",
"file",
"and",
"regex",
"group",
"and",
"extract",
"the",
"performance",
"data"
] |
84c7015248a82faccb9d3fe4e6014645cc9ec103
|
https://github.com/garethr/django-timelog/blob/84c7015248a82faccb9d3fe4e6014645cc9ec103/src/timelog/lib.py#L72-L132
|
train
|
hammerlab/cohorts
|
cohorts/collection.py
|
Collection.to_string
|
def to_string(self, limit=None):
"""
Create a string representation of this collection, showing up to
`limit` items.
"""
header = self.short_string()
if len(self) == 0:
return header
contents = ""
element_lines = [
" -- %s" % (element,)
for element in self.elements[:limit]
]
contents = "\n".join(element_lines)
if limit is not None and len(self.elements) > limit:
contents += "\n ... and %d more" % (len(self) - limit)
return "%s\n%s" % (header, contents)
|
python
|
def to_string(self, limit=None):
"""
Create a string representation of this collection, showing up to
`limit` items.
"""
header = self.short_string()
if len(self) == 0:
return header
contents = ""
element_lines = [
" -- %s" % (element,)
for element in self.elements[:limit]
]
contents = "\n".join(element_lines)
if limit is not None and len(self.elements) > limit:
contents += "\n ... and %d more" % (len(self) - limit)
return "%s\n%s" % (header, contents)
|
[
"def",
"to_string",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"header",
"=",
"self",
".",
"short_string",
"(",
")",
"if",
"len",
"(",
"self",
")",
"==",
"0",
":",
"return",
"header",
"contents",
"=",
"\"\"",
"element_lines",
"=",
"[",
"\" -- %s\"",
"%",
"(",
"element",
",",
")",
"for",
"element",
"in",
"self",
".",
"elements",
"[",
":",
"limit",
"]",
"]",
"contents",
"=",
"\"\\n\"",
".",
"join",
"(",
"element_lines",
")",
"if",
"limit",
"is",
"not",
"None",
"and",
"len",
"(",
"self",
".",
"elements",
")",
">",
"limit",
":",
"contents",
"+=",
"\"\\n ... and %d more\"",
"%",
"(",
"len",
"(",
"self",
")",
"-",
"limit",
")",
"return",
"\"%s\\n%s\"",
"%",
"(",
"header",
",",
"contents",
")"
] |
Create a string representation of this collection, showing up to
`limit` items.
|
[
"Create",
"a",
"string",
"representation",
"of",
"this",
"collection",
"showing",
"up",
"to",
"limit",
"items",
"."
] |
278b05e609e6c4d4a77c57d49446460be53ea33e
|
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/collection.py#L29-L46
|
train
|
alvarogzp/telegram-bot-framework
|
bot/action/standard/userinfo.py
|
UserStorageHandler.get_instance
|
def get_instance(cls, state):
""":rtype: UserStorageHandler"""
if cls.instance is None:
cls.instance = UserStorageHandler(state)
return cls.instance
|
python
|
def get_instance(cls, state):
""":rtype: UserStorageHandler"""
if cls.instance is None:
cls.instance = UserStorageHandler(state)
return cls.instance
|
[
"def",
"get_instance",
"(",
"cls",
",",
"state",
")",
":",
"if",
"cls",
".",
"instance",
"is",
"None",
":",
"cls",
".",
"instance",
"=",
"UserStorageHandler",
"(",
"state",
")",
"return",
"cls",
".",
"instance"
] |
:rtype: UserStorageHandler
|
[
":",
"rtype",
":",
"UserStorageHandler"
] |
7b597a415c1901901c677976cb13100fc3083107
|
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/userinfo.py#L26-L30
|
train
|
alvarogzp/telegram-bot-framework
|
bot/action/standard/benchmark.py
|
WorkersAction._get_active_threads_names
|
def _get_active_threads_names():
"""May contain sensitive info (like user ids). Use with care."""
active_threads = threading.enumerate()
return FormattedText().join(
[
FormattedText().newline().normal(" - {name}").start_format().bold(name=thread.name).end_format()
for thread in active_threads
]
)
|
python
|
def _get_active_threads_names():
"""May contain sensitive info (like user ids). Use with care."""
active_threads = threading.enumerate()
return FormattedText().join(
[
FormattedText().newline().normal(" - {name}").start_format().bold(name=thread.name).end_format()
for thread in active_threads
]
)
|
[
"def",
"_get_active_threads_names",
"(",
")",
":",
"active_threads",
"=",
"threading",
".",
"enumerate",
"(",
")",
"return",
"FormattedText",
"(",
")",
".",
"join",
"(",
"[",
"FormattedText",
"(",
")",
".",
"newline",
"(",
")",
".",
"normal",
"(",
"\" - {name}\"",
")",
".",
"start_format",
"(",
")",
".",
"bold",
"(",
"name",
"=",
"thread",
".",
"name",
")",
".",
"end_format",
"(",
")",
"for",
"thread",
"in",
"active_threads",
"]",
")"
] |
May contain sensitive info (like user ids). Use with care.
|
[
"May",
"contain",
"sensitive",
"info",
"(",
"like",
"user",
"ids",
")",
".",
"Use",
"with",
"care",
"."
] |
7b597a415c1901901c677976cb13100fc3083107
|
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/benchmark.py#L164-L172
|
train
|
alvarogzp/telegram-bot-framework
|
bot/action/standard/benchmark.py
|
WorkersAction._get_running_workers_names
|
def _get_running_workers_names(running_workers: list):
"""May contain sensitive info (like user ids). Use with care."""
return FormattedText().join(
[
FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format()
for worker in running_workers
]
)
|
python
|
def _get_running_workers_names(running_workers: list):
"""May contain sensitive info (like user ids). Use with care."""
return FormattedText().join(
[
FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format()
for worker in running_workers
]
)
|
[
"def",
"_get_running_workers_names",
"(",
"running_workers",
":",
"list",
")",
":",
"return",
"FormattedText",
"(",
")",
".",
"join",
"(",
"[",
"FormattedText",
"(",
")",
".",
"newline",
"(",
")",
".",
"normal",
"(",
"\" - {name}\"",
")",
".",
"start_format",
"(",
")",
".",
"bold",
"(",
"name",
"=",
"worker",
".",
"name",
")",
".",
"end_format",
"(",
")",
"for",
"worker",
"in",
"running_workers",
"]",
")"
] |
May contain sensitive info (like user ids). Use with care.
|
[
"May",
"contain",
"sensitive",
"info",
"(",
"like",
"user",
"ids",
")",
".",
"Use",
"with",
"care",
"."
] |
7b597a415c1901901c677976cb13100fc3083107
|
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/benchmark.py#L187-L194
|
train
|
alvarogzp/telegram-bot-framework
|
bot/action/standard/benchmark.py
|
WorkersAction._get_worker_pools_names
|
def _get_worker_pools_names(worker_pools: list):
"""May contain sensitive info (like user ids). Use with care."""
return FormattedText().join(
[
FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format()
for worker in worker_pools
]
)
|
python
|
def _get_worker_pools_names(worker_pools: list):
"""May contain sensitive info (like user ids). Use with care."""
return FormattedText().join(
[
FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format()
for worker in worker_pools
]
)
|
[
"def",
"_get_worker_pools_names",
"(",
"worker_pools",
":",
"list",
")",
":",
"return",
"FormattedText",
"(",
")",
".",
"join",
"(",
"[",
"FormattedText",
"(",
")",
".",
"newline",
"(",
")",
".",
"normal",
"(",
"\" - {name}\"",
")",
".",
"start_format",
"(",
")",
".",
"bold",
"(",
"name",
"=",
"worker",
".",
"name",
")",
".",
"end_format",
"(",
")",
"for",
"worker",
"in",
"worker_pools",
"]",
")"
] |
May contain sensitive info (like user ids). Use with care.
|
[
"May",
"contain",
"sensitive",
"info",
"(",
"like",
"user",
"ids",
")",
".",
"Use",
"with",
"care",
"."
] |
7b597a415c1901901c677976cb13100fc3083107
|
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/benchmark.py#L209-L216
|
train
|
alvarogzp/telegram-bot-framework
|
bot/action/standard/info/formatter/user.py
|
UserInfoFormatter.format
|
def format(self, member_info: bool = False):
"""
:param member_info: If True, adds also chat member info. Please, note that this additional info requires
to make ONE api call.
"""
user = self.api_object
self.__format_user(user)
if member_info and self.chat.type != CHAT_TYPE_PRIVATE:
self._add_empty()
self.__format_member(user)
|
python
|
def format(self, member_info: bool = False):
"""
:param member_info: If True, adds also chat member info. Please, note that this additional info requires
to make ONE api call.
"""
user = self.api_object
self.__format_user(user)
if member_info and self.chat.type != CHAT_TYPE_PRIVATE:
self._add_empty()
self.__format_member(user)
|
[
"def",
"format",
"(",
"self",
",",
"member_info",
":",
"bool",
"=",
"False",
")",
":",
"user",
"=",
"self",
".",
"api_object",
"self",
".",
"__format_user",
"(",
"user",
")",
"if",
"member_info",
"and",
"self",
".",
"chat",
".",
"type",
"!=",
"CHAT_TYPE_PRIVATE",
":",
"self",
".",
"_add_empty",
"(",
")",
"self",
".",
"__format_member",
"(",
"user",
")"
] |
:param member_info: If True, adds also chat member info. Please, note that this additional info requires
to make ONE api call.
|
[
":",
"param",
"member_info",
":",
"If",
"True",
"adds",
"also",
"chat",
"member",
"info",
".",
"Please",
"note",
"that",
"this",
"additional",
"info",
"requires",
"to",
"make",
"ONE",
"api",
"call",
"."
] |
7b597a415c1901901c677976cb13100fc3083107
|
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/info/formatter/user.py#L19-L28
|
train
|
alvarogzp/telegram-bot-framework
|
bot/bot.py
|
UpdatesProcessor.safe_log_error
|
def safe_log_error(self, error: Exception, *info: str):
"""Log error failing silently on error"""
self.__do_safe(lambda: self.logger.error(error, *info))
|
python
|
def safe_log_error(self, error: Exception, *info: str):
"""Log error failing silently on error"""
self.__do_safe(lambda: self.logger.error(error, *info))
|
[
"def",
"safe_log_error",
"(",
"self",
",",
"error",
":",
"Exception",
",",
"*",
"info",
":",
"str",
")",
":",
"self",
".",
"__do_safe",
"(",
"lambda",
":",
"self",
".",
"logger",
".",
"error",
"(",
"error",
",",
"*",
"info",
")",
")"
] |
Log error failing silently on error
|
[
"Log",
"error",
"failing",
"silently",
"on",
"error"
] |
7b597a415c1901901c677976cb13100fc3083107
|
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/bot.py#L173-L175
|
train
|
alvarogzp/telegram-bot-framework
|
bot/bot.py
|
UpdatesProcessor.safe_log_info
|
def safe_log_info(self, *info: str):
"""Log info failing silently on error"""
self.__do_safe(lambda: self.logger.info(*info))
|
python
|
def safe_log_info(self, *info: str):
"""Log info failing silently on error"""
self.__do_safe(lambda: self.logger.info(*info))
|
[
"def",
"safe_log_info",
"(",
"self",
",",
"*",
"info",
":",
"str",
")",
":",
"self",
".",
"__do_safe",
"(",
"lambda",
":",
"self",
".",
"logger",
".",
"info",
"(",
"*",
"info",
")",
")"
] |
Log info failing silently on error
|
[
"Log",
"info",
"failing",
"silently",
"on",
"error"
] |
7b597a415c1901901c677976cb13100fc3083107
|
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/bot.py#L177-L179
|
train
|
brentp/skidmarks
|
skidmarks.py
|
wald_wolfowitz
|
def wald_wolfowitz(sequence):
"""
implements the wald-wolfowitz runs test:
http://en.wikipedia.org/wiki/Wald-Wolfowitz_runs_test
http://support.sas.com/kb/33/092.html
:param sequence: any iterable with at most 2 values. e.g.
'1001001'
[1, 0, 1, 0, 1]
'abaaabbba'
:rtype: a dict with keys of
`n_runs`: the number of runs in the sequence
`p`: the support to reject the null-hypothesis that the number of runs
supports a random sequence
`z`: the z-score, used to calculate the p-value
`sd`, `mean`: the expected standard deviation, mean the number of runs,
given the ratio of numbers of 1's/0's in the sequence
>>> r = wald_wolfowitz('1000001')
>>> r['n_runs'] # should be 3, because 1, 0, 1
3
>>> r['p'] < 0.05 # not < 0.05 evidence to reject Ho of random sequence
False
# this should show significance for non-randomness
>>> li = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
>>> wald_wolfowitz(li)['p'] < 0.05
True
"""
R = n_runs = sum(1 for s in groupby(sequence, lambda a: a))
n = float(sum(1 for s in sequence if s == sequence[0]))
m = float(sum(1 for s in sequence if s != sequence[0]))
# expected mean runs
ER = ((2 * n * m ) / (n + m)) + 1
# expected variance runs
VR = (2 * n * m * (2 * n * m - n - m )) / ((n + m)**2 * (n + m - 1))
O = (ER - 1) * (ER - 2) / (n + m - 1.)
assert VR - O < 0.001, (VR, O)
SD = math.sqrt(VR)
# Z-score
Z = (R - ER) / SD
return {'z': Z, 'mean': ER, 'sd': SD, 'p': zprob(Z), 'n_runs': R}
|
python
|
def wald_wolfowitz(sequence):
"""
implements the wald-wolfowitz runs test:
http://en.wikipedia.org/wiki/Wald-Wolfowitz_runs_test
http://support.sas.com/kb/33/092.html
:param sequence: any iterable with at most 2 values. e.g.
'1001001'
[1, 0, 1, 0, 1]
'abaaabbba'
:rtype: a dict with keys of
`n_runs`: the number of runs in the sequence
`p`: the support to reject the null-hypothesis that the number of runs
supports a random sequence
`z`: the z-score, used to calculate the p-value
`sd`, `mean`: the expected standard deviation, mean the number of runs,
given the ratio of numbers of 1's/0's in the sequence
>>> r = wald_wolfowitz('1000001')
>>> r['n_runs'] # should be 3, because 1, 0, 1
3
>>> r['p'] < 0.05 # not < 0.05 evidence to reject Ho of random sequence
False
# this should show significance for non-randomness
>>> li = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
>>> wald_wolfowitz(li)['p'] < 0.05
True
"""
R = n_runs = sum(1 for s in groupby(sequence, lambda a: a))
n = float(sum(1 for s in sequence if s == sequence[0]))
m = float(sum(1 for s in sequence if s != sequence[0]))
# expected mean runs
ER = ((2 * n * m ) / (n + m)) + 1
# expected variance runs
VR = (2 * n * m * (2 * n * m - n - m )) / ((n + m)**2 * (n + m - 1))
O = (ER - 1) * (ER - 2) / (n + m - 1.)
assert VR - O < 0.001, (VR, O)
SD = math.sqrt(VR)
# Z-score
Z = (R - ER) / SD
return {'z': Z, 'mean': ER, 'sd': SD, 'p': zprob(Z), 'n_runs': R}
|
[
"def",
"wald_wolfowitz",
"(",
"sequence",
")",
":",
"R",
"=",
"n_runs",
"=",
"sum",
"(",
"1",
"for",
"s",
"in",
"groupby",
"(",
"sequence",
",",
"lambda",
"a",
":",
"a",
")",
")",
"n",
"=",
"float",
"(",
"sum",
"(",
"1",
"for",
"s",
"in",
"sequence",
"if",
"s",
"==",
"sequence",
"[",
"0",
"]",
")",
")",
"m",
"=",
"float",
"(",
"sum",
"(",
"1",
"for",
"s",
"in",
"sequence",
"if",
"s",
"!=",
"sequence",
"[",
"0",
"]",
")",
")",
"# expected mean runs",
"ER",
"=",
"(",
"(",
"2",
"*",
"n",
"*",
"m",
")",
"/",
"(",
"n",
"+",
"m",
")",
")",
"+",
"1",
"# expected variance runs",
"VR",
"=",
"(",
"2",
"*",
"n",
"*",
"m",
"*",
"(",
"2",
"*",
"n",
"*",
"m",
"-",
"n",
"-",
"m",
")",
")",
"/",
"(",
"(",
"n",
"+",
"m",
")",
"**",
"2",
"*",
"(",
"n",
"+",
"m",
"-",
"1",
")",
")",
"O",
"=",
"(",
"ER",
"-",
"1",
")",
"*",
"(",
"ER",
"-",
"2",
")",
"/",
"(",
"n",
"+",
"m",
"-",
"1.",
")",
"assert",
"VR",
"-",
"O",
"<",
"0.001",
",",
"(",
"VR",
",",
"O",
")",
"SD",
"=",
"math",
".",
"sqrt",
"(",
"VR",
")",
"# Z-score",
"Z",
"=",
"(",
"R",
"-",
"ER",
")",
"/",
"SD",
"return",
"{",
"'z'",
":",
"Z",
",",
"'mean'",
":",
"ER",
",",
"'sd'",
":",
"SD",
",",
"'p'",
":",
"zprob",
"(",
"Z",
")",
",",
"'n_runs'",
":",
"R",
"}"
] |
implements the wald-wolfowitz runs test:
http://en.wikipedia.org/wiki/Wald-Wolfowitz_runs_test
http://support.sas.com/kb/33/092.html
:param sequence: any iterable with at most 2 values. e.g.
'1001001'
[1, 0, 1, 0, 1]
'abaaabbba'
:rtype: a dict with keys of
`n_runs`: the number of runs in the sequence
`p`: the support to reject the null-hypothesis that the number of runs
supports a random sequence
`z`: the z-score, used to calculate the p-value
`sd`, `mean`: the expected standard deviation, mean the number of runs,
given the ratio of numbers of 1's/0's in the sequence
>>> r = wald_wolfowitz('1000001')
>>> r['n_runs'] # should be 3, because 1, 0, 1
3
>>> r['p'] < 0.05 # not < 0.05 evidence to reject Ho of random sequence
False
# this should show significance for non-randomness
>>> li = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
>>> wald_wolfowitz(li)['p'] < 0.05
True
|
[
"implements",
"the",
"wald",
"-",
"wolfowitz",
"runs",
"test",
":",
"http",
":",
"//",
"en",
".",
"wikipedia",
".",
"org",
"/",
"wiki",
"/",
"Wald",
"-",
"Wolfowitz_runs_test",
"http",
":",
"//",
"support",
".",
"sas",
".",
"com",
"/",
"kb",
"/",
"33",
"/",
"092",
".",
"html"
] |
f63b9f1b822cb47991215b655155b5041e86ea39
|
https://github.com/brentp/skidmarks/blob/f63b9f1b822cb47991215b655155b5041e86ea39/skidmarks.py#L51-L99
|
train
|
brentp/skidmarks
|
skidmarks.py
|
auto_correlation
|
def auto_correlation(sequence):
"""
test for the autocorrelation of a sequence between t and t - 1
as the 'auto_correlation' it is less likely that the sequence is
generated randomly.
:param sequence: any iterable with at most 2 values that can be turned
into a float via np.float . e.g.
'1001001'
[1, 0, 1, 0, 1]
[1.2,.1,.5,1]
:rtype: returns a dict of the linear regression stats of sequence[1:] vs.
sequence[:-1]
>>> result = auto_correlation('00000001111111111100000000')
>>> result['p'] < 0.05
True
>>> result['auto_correlation']
0.83766233766233755
"""
if isinstance(sequence, basestring):
sequence = map(int, sequence)
seq = np.array(list(sequence), dtype=np.float)
dseq = np.column_stack((seq[1:], seq[:-1]))
slope, intercept, r, ttp, see = linregress(seq[1:], seq[:-1])
cc = np.corrcoef(dseq, rowvar=0)[0][1]
return {'slope': slope, 'intercept': intercept, 'r-squared': r ** 2,
'p': ttp, 'see': see, 'auto_correlation': cc}
|
python
|
def auto_correlation(sequence):
"""
test for the autocorrelation of a sequence between t and t - 1
as the 'auto_correlation' it is less likely that the sequence is
generated randomly.
:param sequence: any iterable with at most 2 values that can be turned
into a float via np.float . e.g.
'1001001'
[1, 0, 1, 0, 1]
[1.2,.1,.5,1]
:rtype: returns a dict of the linear regression stats of sequence[1:] vs.
sequence[:-1]
>>> result = auto_correlation('00000001111111111100000000')
>>> result['p'] < 0.05
True
>>> result['auto_correlation']
0.83766233766233755
"""
if isinstance(sequence, basestring):
sequence = map(int, sequence)
seq = np.array(list(sequence), dtype=np.float)
dseq = np.column_stack((seq[1:], seq[:-1]))
slope, intercept, r, ttp, see = linregress(seq[1:], seq[:-1])
cc = np.corrcoef(dseq, rowvar=0)[0][1]
return {'slope': slope, 'intercept': intercept, 'r-squared': r ** 2,
'p': ttp, 'see': see, 'auto_correlation': cc}
|
[
"def",
"auto_correlation",
"(",
"sequence",
")",
":",
"if",
"isinstance",
"(",
"sequence",
",",
"basestring",
")",
":",
"sequence",
"=",
"map",
"(",
"int",
",",
"sequence",
")",
"seq",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"sequence",
")",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"dseq",
"=",
"np",
".",
"column_stack",
"(",
"(",
"seq",
"[",
"1",
":",
"]",
",",
"seq",
"[",
":",
"-",
"1",
"]",
")",
")",
"slope",
",",
"intercept",
",",
"r",
",",
"ttp",
",",
"see",
"=",
"linregress",
"(",
"seq",
"[",
"1",
":",
"]",
",",
"seq",
"[",
":",
"-",
"1",
"]",
")",
"cc",
"=",
"np",
".",
"corrcoef",
"(",
"dseq",
",",
"rowvar",
"=",
"0",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"return",
"{",
"'slope'",
":",
"slope",
",",
"'intercept'",
":",
"intercept",
",",
"'r-squared'",
":",
"r",
"**",
"2",
",",
"'p'",
":",
"ttp",
",",
"'see'",
":",
"see",
",",
"'auto_correlation'",
":",
"cc",
"}"
] |
test for the autocorrelation of a sequence between t and t - 1
as the 'auto_correlation' it is less likely that the sequence is
generated randomly.
:param sequence: any iterable with at most 2 values that can be turned
into a float via np.float . e.g.
'1001001'
[1, 0, 1, 0, 1]
[1.2,.1,.5,1]
:rtype: returns a dict of the linear regression stats of sequence[1:] vs.
sequence[:-1]
>>> result = auto_correlation('00000001111111111100000000')
>>> result['p'] < 0.05
True
>>> result['auto_correlation']
0.83766233766233755
|
[
"test",
"for",
"the",
"autocorrelation",
"of",
"a",
"sequence",
"between",
"t",
"and",
"t",
"-",
"1",
"as",
"the",
"auto_correlation",
"it",
"is",
"less",
"likely",
"that",
"the",
"sequence",
"is",
"generated",
"randomly",
".",
":",
"param",
"sequence",
":",
"any",
"iterable",
"with",
"at",
"most",
"2",
"values",
"that",
"can",
"be",
"turned",
"into",
"a",
"float",
"via",
"np",
".",
"float",
".",
"e",
".",
"g",
".",
"1001001",
"[",
"1",
"0",
"1",
"0",
"1",
"]",
"[",
"1",
".",
"2",
".",
"1",
".",
"5",
"1",
"]",
":",
"rtype",
":",
"returns",
"a",
"dict",
"of",
"the",
"linear",
"regression",
"stats",
"of",
"sequence",
"[",
"1",
":",
"]",
"vs",
".",
"sequence",
"[",
":",
"-",
"1",
"]"
] |
f63b9f1b822cb47991215b655155b5041e86ea39
|
https://github.com/brentp/skidmarks/blob/f63b9f1b822cb47991215b655155b5041e86ea39/skidmarks.py#L102-L129
|
train
|
twisted/txacme
|
src/txacme/client.py
|
_parse_header_links
|
def _parse_header_links(response):
"""
Parse the links from a Link: header field.
.. todo:: Links with the same relation collide at the moment.
:param bytes value: The header value.
:rtype: `dict`
:return: A dictionary of parsed links, keyed by ``rel`` or ``url``.
"""
values = response.headers.getRawHeaders(b'link', [b''])
value = b','.join(values).decode('ascii')
with LOG_HTTP_PARSE_LINKS(raw_link=value) as action:
links = {}
replace_chars = u' \'"'
for val in re.split(u', *<', value):
try:
url, params = val.split(u';', 1)
except ValueError:
url, params = val, u''
link = {}
link[u'url'] = url.strip(u'<> \'"')
for param in params.split(u';'):
try:
key, value = param.split(u'=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links[link.get(u'rel') or link.get(u'url')] = link
action.add_success_fields(parsed_links=links)
return links
|
python
|
def _parse_header_links(response):
"""
Parse the links from a Link: header field.
.. todo:: Links with the same relation collide at the moment.
:param bytes value: The header value.
:rtype: `dict`
:return: A dictionary of parsed links, keyed by ``rel`` or ``url``.
"""
values = response.headers.getRawHeaders(b'link', [b''])
value = b','.join(values).decode('ascii')
with LOG_HTTP_PARSE_LINKS(raw_link=value) as action:
links = {}
replace_chars = u' \'"'
for val in re.split(u', *<', value):
try:
url, params = val.split(u';', 1)
except ValueError:
url, params = val, u''
link = {}
link[u'url'] = url.strip(u'<> \'"')
for param in params.split(u';'):
try:
key, value = param.split(u'=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links[link.get(u'rel') or link.get(u'url')] = link
action.add_success_fields(parsed_links=links)
return links
|
[
"def",
"_parse_header_links",
"(",
"response",
")",
":",
"values",
"=",
"response",
".",
"headers",
".",
"getRawHeaders",
"(",
"b'link'",
",",
"[",
"b''",
"]",
")",
"value",
"=",
"b','",
".",
"join",
"(",
"values",
")",
".",
"decode",
"(",
"'ascii'",
")",
"with",
"LOG_HTTP_PARSE_LINKS",
"(",
"raw_link",
"=",
"value",
")",
"as",
"action",
":",
"links",
"=",
"{",
"}",
"replace_chars",
"=",
"u' \\'\"'",
"for",
"val",
"in",
"re",
".",
"split",
"(",
"u', *<'",
",",
"value",
")",
":",
"try",
":",
"url",
",",
"params",
"=",
"val",
".",
"split",
"(",
"u';'",
",",
"1",
")",
"except",
"ValueError",
":",
"url",
",",
"params",
"=",
"val",
",",
"u''",
"link",
"=",
"{",
"}",
"link",
"[",
"u'url'",
"]",
"=",
"url",
".",
"strip",
"(",
"u'<> \\'\"'",
")",
"for",
"param",
"in",
"params",
".",
"split",
"(",
"u';'",
")",
":",
"try",
":",
"key",
",",
"value",
"=",
"param",
".",
"split",
"(",
"u'='",
")",
"except",
"ValueError",
":",
"break",
"link",
"[",
"key",
".",
"strip",
"(",
"replace_chars",
")",
"]",
"=",
"value",
".",
"strip",
"(",
"replace_chars",
")",
"links",
"[",
"link",
".",
"get",
"(",
"u'rel'",
")",
"or",
"link",
".",
"get",
"(",
"u'url'",
")",
"]",
"=",
"link",
"action",
".",
"add_success_fields",
"(",
"parsed_links",
"=",
"links",
")",
"return",
"links"
] |
Parse the links from a Link: header field.
.. todo:: Links with the same relation collide at the moment.
:param bytes value: The header value.
:rtype: `dict`
:return: A dictionary of parsed links, keyed by ``rel`` or ``url``.
|
[
"Parse",
"the",
"links",
"from",
"a",
"Link",
":",
"header",
"field",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L37-L69
|
train
|
twisted/txacme
|
src/txacme/client.py
|
_default_client
|
def _default_client(jws_client, reactor, key, alg):
"""
Make a client if we didn't get one.
"""
if jws_client is None:
pool = HTTPConnectionPool(reactor)
agent = Agent(reactor, pool=pool)
jws_client = JWSClient(HTTPClient(agent=agent), key, alg)
return jws_client
|
python
|
def _default_client(jws_client, reactor, key, alg):
"""
Make a client if we didn't get one.
"""
if jws_client is None:
pool = HTTPConnectionPool(reactor)
agent = Agent(reactor, pool=pool)
jws_client = JWSClient(HTTPClient(agent=agent), key, alg)
return jws_client
|
[
"def",
"_default_client",
"(",
"jws_client",
",",
"reactor",
",",
"key",
",",
"alg",
")",
":",
"if",
"jws_client",
"is",
"None",
":",
"pool",
"=",
"HTTPConnectionPool",
"(",
"reactor",
")",
"agent",
"=",
"Agent",
"(",
"reactor",
",",
"pool",
"=",
"pool",
")",
"jws_client",
"=",
"JWSClient",
"(",
"HTTPClient",
"(",
"agent",
"=",
"agent",
")",
",",
"key",
",",
"alg",
")",
"return",
"jws_client"
] |
Make a client if we didn't get one.
|
[
"Make",
"a",
"client",
"if",
"we",
"didn",
"t",
"get",
"one",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L72-L80
|
train
|
twisted/txacme
|
src/txacme/client.py
|
_find_supported_challenge
|
def _find_supported_challenge(authzr, responders):
"""
Find a challenge combination that consists of a single challenge that the
responder can satisfy.
:param ~acme.messages.AuthorizationResource auth: The authorization to
examine.
:type responder: List[`~txacme.interfaces.IResponder`]
:param responder: The possible responders to use.
:raises NoSupportedChallenges: When a suitable challenge combination is not
found.
:rtype: Tuple[`~txacme.interfaces.IResponder`,
`~acme.messages.ChallengeBody`]
:return: The responder and challenge that were found.
"""
matches = [
(responder, challbs[0])
for challbs in authzr.body.resolved_combinations
for responder in responders
if [challb.typ for challb in challbs] == [responder.challenge_type]]
if len(matches) == 0:
raise NoSupportedChallenges(authzr)
else:
return matches[0]
|
python
|
def _find_supported_challenge(authzr, responders):
"""
Find a challenge combination that consists of a single challenge that the
responder can satisfy.
:param ~acme.messages.AuthorizationResource auth: The authorization to
examine.
:type responder: List[`~txacme.interfaces.IResponder`]
:param responder: The possible responders to use.
:raises NoSupportedChallenges: When a suitable challenge combination is not
found.
:rtype: Tuple[`~txacme.interfaces.IResponder`,
`~acme.messages.ChallengeBody`]
:return: The responder and challenge that were found.
"""
matches = [
(responder, challbs[0])
for challbs in authzr.body.resolved_combinations
for responder in responders
if [challb.typ for challb in challbs] == [responder.challenge_type]]
if len(matches) == 0:
raise NoSupportedChallenges(authzr)
else:
return matches[0]
|
[
"def",
"_find_supported_challenge",
"(",
"authzr",
",",
"responders",
")",
":",
"matches",
"=",
"[",
"(",
"responder",
",",
"challbs",
"[",
"0",
"]",
")",
"for",
"challbs",
"in",
"authzr",
".",
"body",
".",
"resolved_combinations",
"for",
"responder",
"in",
"responders",
"if",
"[",
"challb",
".",
"typ",
"for",
"challb",
"in",
"challbs",
"]",
"==",
"[",
"responder",
".",
"challenge_type",
"]",
"]",
"if",
"len",
"(",
"matches",
")",
"==",
"0",
":",
"raise",
"NoSupportedChallenges",
"(",
"authzr",
")",
"else",
":",
"return",
"matches",
"[",
"0",
"]"
] |
Find a challenge combination that consists of a single challenge that the
responder can satisfy.
:param ~acme.messages.AuthorizationResource auth: The authorization to
examine.
:type responder: List[`~txacme.interfaces.IResponder`]
:param responder: The possible responders to use.
:raises NoSupportedChallenges: When a suitable challenge combination is not
found.
:rtype: Tuple[`~txacme.interfaces.IResponder`,
`~acme.messages.ChallengeBody`]
:return: The responder and challenge that were found.
|
[
"Find",
"a",
"challenge",
"combination",
"that",
"consists",
"of",
"a",
"single",
"challenge",
"that",
"the",
"responder",
"can",
"satisfy",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L505-L531
|
train
|
twisted/txacme
|
src/txacme/client.py
|
answer_challenge
|
def answer_challenge(authzr, client, responders):
"""
Complete an authorization using a responder.
:param ~acme.messages.AuthorizationResource auth: The authorization to
complete.
:param .Client client: The ACME client.
:type responders: List[`~txacme.interfaces.IResponder`]
:param responders: A list of responders that can be used to complete the
challenge with.
:return: A deferred firing when the authorization is verified.
"""
responder, challb = _find_supported_challenge(authzr, responders)
response = challb.response(client.key)
def _stop_responding():
return maybeDeferred(
responder.stop_responding,
authzr.body.identifier.value,
challb.chall,
response)
return (
maybeDeferred(
responder.start_responding,
authzr.body.identifier.value,
challb.chall,
response)
.addCallback(lambda _: client.answer_challenge(challb, response))
.addCallback(lambda _: _stop_responding)
)
|
python
|
def answer_challenge(authzr, client, responders):
"""
Complete an authorization using a responder.
:param ~acme.messages.AuthorizationResource auth: The authorization to
complete.
:param .Client client: The ACME client.
:type responders: List[`~txacme.interfaces.IResponder`]
:param responders: A list of responders that can be used to complete the
challenge with.
:return: A deferred firing when the authorization is verified.
"""
responder, challb = _find_supported_challenge(authzr, responders)
response = challb.response(client.key)
def _stop_responding():
return maybeDeferred(
responder.stop_responding,
authzr.body.identifier.value,
challb.chall,
response)
return (
maybeDeferred(
responder.start_responding,
authzr.body.identifier.value,
challb.chall,
response)
.addCallback(lambda _: client.answer_challenge(challb, response))
.addCallback(lambda _: _stop_responding)
)
|
[
"def",
"answer_challenge",
"(",
"authzr",
",",
"client",
",",
"responders",
")",
":",
"responder",
",",
"challb",
"=",
"_find_supported_challenge",
"(",
"authzr",
",",
"responders",
")",
"response",
"=",
"challb",
".",
"response",
"(",
"client",
".",
"key",
")",
"def",
"_stop_responding",
"(",
")",
":",
"return",
"maybeDeferred",
"(",
"responder",
".",
"stop_responding",
",",
"authzr",
".",
"body",
".",
"identifier",
".",
"value",
",",
"challb",
".",
"chall",
",",
"response",
")",
"return",
"(",
"maybeDeferred",
"(",
"responder",
".",
"start_responding",
",",
"authzr",
".",
"body",
".",
"identifier",
".",
"value",
",",
"challb",
".",
"chall",
",",
"response",
")",
".",
"addCallback",
"(",
"lambda",
"_",
":",
"client",
".",
"answer_challenge",
"(",
"challb",
",",
"response",
")",
")",
".",
"addCallback",
"(",
"lambda",
"_",
":",
"_stop_responding",
")",
")"
] |
Complete an authorization using a responder.
:param ~acme.messages.AuthorizationResource auth: The authorization to
complete.
:param .Client client: The ACME client.
:type responders: List[`~txacme.interfaces.IResponder`]
:param responders: A list of responders that can be used to complete the
challenge with.
:return: A deferred firing when the authorization is verified.
|
[
"Complete",
"an",
"authorization",
"using",
"a",
"responder",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L534-L565
|
train
|
twisted/txacme
|
src/txacme/client.py
|
poll_until_valid
|
def poll_until_valid(authzr, clock, client, timeout=300.0):
"""
Poll an authorization until it is in a state other than pending or
processing.
:param ~acme.messages.AuthorizationResource auth: The authorization to
complete.
:param clock: The ``IReactorTime`` implementation to use; usually the
reactor, when not testing.
:param .Client client: The ACME client.
:param float timeout: Maximum time to poll in seconds, before giving up.
:raises txacme.client.AuthorizationFailed: if the authorization is no
longer in the pending, processing, or valid states.
:raises: ``twisted.internet.defer.CancelledError`` if the authorization was
still in pending or processing state when the timeout was reached.
:rtype: Deferred[`~acme.messages.AuthorizationResource`]
:return: A deferred firing when the authorization has completed/failed; if
the authorization is valid, the authorization resource will be
returned.
"""
def repoll(result):
authzr, retry_after = result
if authzr.body.status in {STATUS_PENDING, STATUS_PROCESSING}:
return (
deferLater(clock, retry_after, lambda: None)
.addCallback(lambda _: client.poll(authzr))
.addCallback(repoll)
)
if authzr.body.status != STATUS_VALID:
raise AuthorizationFailed(authzr)
return authzr
def cancel_timeout(result):
if timeout_call.active():
timeout_call.cancel()
return result
d = client.poll(authzr).addCallback(repoll)
timeout_call = clock.callLater(timeout, d.cancel)
d.addBoth(cancel_timeout)
return d
|
python
|
def poll_until_valid(authzr, clock, client, timeout=300.0):
"""
Poll an authorization until it is in a state other than pending or
processing.
:param ~acme.messages.AuthorizationResource auth: The authorization to
complete.
:param clock: The ``IReactorTime`` implementation to use; usually the
reactor, when not testing.
:param .Client client: The ACME client.
:param float timeout: Maximum time to poll in seconds, before giving up.
:raises txacme.client.AuthorizationFailed: if the authorization is no
longer in the pending, processing, or valid states.
:raises: ``twisted.internet.defer.CancelledError`` if the authorization was
still in pending or processing state when the timeout was reached.
:rtype: Deferred[`~acme.messages.AuthorizationResource`]
:return: A deferred firing when the authorization has completed/failed; if
the authorization is valid, the authorization resource will be
returned.
"""
def repoll(result):
authzr, retry_after = result
if authzr.body.status in {STATUS_PENDING, STATUS_PROCESSING}:
return (
deferLater(clock, retry_after, lambda: None)
.addCallback(lambda _: client.poll(authzr))
.addCallback(repoll)
)
if authzr.body.status != STATUS_VALID:
raise AuthorizationFailed(authzr)
return authzr
def cancel_timeout(result):
if timeout_call.active():
timeout_call.cancel()
return result
d = client.poll(authzr).addCallback(repoll)
timeout_call = clock.callLater(timeout, d.cancel)
d.addBoth(cancel_timeout)
return d
|
[
"def",
"poll_until_valid",
"(",
"authzr",
",",
"clock",
",",
"client",
",",
"timeout",
"=",
"300.0",
")",
":",
"def",
"repoll",
"(",
"result",
")",
":",
"authzr",
",",
"retry_after",
"=",
"result",
"if",
"authzr",
".",
"body",
".",
"status",
"in",
"{",
"STATUS_PENDING",
",",
"STATUS_PROCESSING",
"}",
":",
"return",
"(",
"deferLater",
"(",
"clock",
",",
"retry_after",
",",
"lambda",
":",
"None",
")",
".",
"addCallback",
"(",
"lambda",
"_",
":",
"client",
".",
"poll",
"(",
"authzr",
")",
")",
".",
"addCallback",
"(",
"repoll",
")",
")",
"if",
"authzr",
".",
"body",
".",
"status",
"!=",
"STATUS_VALID",
":",
"raise",
"AuthorizationFailed",
"(",
"authzr",
")",
"return",
"authzr",
"def",
"cancel_timeout",
"(",
"result",
")",
":",
"if",
"timeout_call",
".",
"active",
"(",
")",
":",
"timeout_call",
".",
"cancel",
"(",
")",
"return",
"result",
"d",
"=",
"client",
".",
"poll",
"(",
"authzr",
")",
".",
"addCallback",
"(",
"repoll",
")",
"timeout_call",
"=",
"clock",
".",
"callLater",
"(",
"timeout",
",",
"d",
".",
"cancel",
")",
"d",
".",
"addBoth",
"(",
"cancel_timeout",
")",
"return",
"d"
] |
Poll an authorization until it is in a state other than pending or
processing.
:param ~acme.messages.AuthorizationResource auth: The authorization to
complete.
:param clock: The ``IReactorTime`` implementation to use; usually the
reactor, when not testing.
:param .Client client: The ACME client.
:param float timeout: Maximum time to poll in seconds, before giving up.
:raises txacme.client.AuthorizationFailed: if the authorization is no
longer in the pending, processing, or valid states.
:raises: ``twisted.internet.defer.CancelledError`` if the authorization was
still in pending or processing state when the timeout was reached.
:rtype: Deferred[`~acme.messages.AuthorizationResource`]
:return: A deferred firing when the authorization has completed/failed; if
the authorization is valid, the authorization resource will be
returned.
|
[
"Poll",
"an",
"authorization",
"until",
"it",
"is",
"in",
"a",
"state",
"other",
"than",
"pending",
"or",
"processing",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L568-L609
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client.from_url
|
def from_url(cls, reactor, url, key, alg=RS256, jws_client=None):
"""
Construct a client from an ACME directory at a given URL.
:param url: The ``twisted.python.url.URL`` to fetch the directory from.
See `txacme.urls` for constants for various well-known public
directories.
:param reactor: The Twisted reactor to use.
:param ~josepy.jwk.JWK key: The client key to use.
:param alg: The signing algorithm to use. Needs to be compatible with
the type of key used.
:param JWSClient jws_client: The underlying client to use, or ``None``
to construct one.
:return: The constructed client.
:rtype: Deferred[`Client`]
"""
action = LOG_ACME_CONSUME_DIRECTORY(
url=url, key_type=key.typ, alg=alg.name)
with action.context():
check_directory_url_type(url)
jws_client = _default_client(jws_client, reactor, key, alg)
return (
DeferredContext(jws_client.get(url.asText()))
.addCallback(json_content)
.addCallback(messages.Directory.from_json)
.addCallback(
tap(lambda d: action.add_success_fields(directory=d)))
.addCallback(cls, reactor, key, jws_client)
.addActionFinish())
|
python
|
def from_url(cls, reactor, url, key, alg=RS256, jws_client=None):
"""
Construct a client from an ACME directory at a given URL.
:param url: The ``twisted.python.url.URL`` to fetch the directory from.
See `txacme.urls` for constants for various well-known public
directories.
:param reactor: The Twisted reactor to use.
:param ~josepy.jwk.JWK key: The client key to use.
:param alg: The signing algorithm to use. Needs to be compatible with
the type of key used.
:param JWSClient jws_client: The underlying client to use, or ``None``
to construct one.
:return: The constructed client.
:rtype: Deferred[`Client`]
"""
action = LOG_ACME_CONSUME_DIRECTORY(
url=url, key_type=key.typ, alg=alg.name)
with action.context():
check_directory_url_type(url)
jws_client = _default_client(jws_client, reactor, key, alg)
return (
DeferredContext(jws_client.get(url.asText()))
.addCallback(json_content)
.addCallback(messages.Directory.from_json)
.addCallback(
tap(lambda d: action.add_success_fields(directory=d)))
.addCallback(cls, reactor, key, jws_client)
.addActionFinish())
|
[
"def",
"from_url",
"(",
"cls",
",",
"reactor",
",",
"url",
",",
"key",
",",
"alg",
"=",
"RS256",
",",
"jws_client",
"=",
"None",
")",
":",
"action",
"=",
"LOG_ACME_CONSUME_DIRECTORY",
"(",
"url",
"=",
"url",
",",
"key_type",
"=",
"key",
".",
"typ",
",",
"alg",
"=",
"alg",
".",
"name",
")",
"with",
"action",
".",
"context",
"(",
")",
":",
"check_directory_url_type",
"(",
"url",
")",
"jws_client",
"=",
"_default_client",
"(",
"jws_client",
",",
"reactor",
",",
"key",
",",
"alg",
")",
"return",
"(",
"DeferredContext",
"(",
"jws_client",
".",
"get",
"(",
"url",
".",
"asText",
"(",
")",
")",
")",
".",
"addCallback",
"(",
"json_content",
")",
".",
"addCallback",
"(",
"messages",
".",
"Directory",
".",
"from_json",
")",
".",
"addCallback",
"(",
"tap",
"(",
"lambda",
"d",
":",
"action",
".",
"add_success_fields",
"(",
"directory",
"=",
"d",
")",
")",
")",
".",
"addCallback",
"(",
"cls",
",",
"reactor",
",",
"key",
",",
"jws_client",
")",
".",
"addActionFinish",
"(",
")",
")"
] |
Construct a client from an ACME directory at a given URL.
:param url: The ``twisted.python.url.URL`` to fetch the directory from.
See `txacme.urls` for constants for various well-known public
directories.
:param reactor: The Twisted reactor to use.
:param ~josepy.jwk.JWK key: The client key to use.
:param alg: The signing algorithm to use. Needs to be compatible with
the type of key used.
:param JWSClient jws_client: The underlying client to use, or ``None``
to construct one.
:return: The constructed client.
:rtype: Deferred[`Client`]
|
[
"Construct",
"a",
"client",
"from",
"an",
"ACME",
"directory",
"at",
"a",
"given",
"URL",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L109-L138
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client.register
|
def register(self, new_reg=None):
"""
Create a new registration with the ACME server.
:param ~acme.messages.NewRegistration new_reg: The registration message
to use, or ``None`` to construct one.
:return: The registration resource.
:rtype: Deferred[`~acme.messages.RegistrationResource`]
"""
if new_reg is None:
new_reg = messages.NewRegistration()
action = LOG_ACME_REGISTER(registration=new_reg)
with action.context():
return (
DeferredContext(
self.update_registration(
new_reg, uri=self.directory[new_reg]))
.addErrback(self._maybe_registered, new_reg)
.addCallback(
tap(lambda r: action.add_success_fields(registration=r)))
.addActionFinish())
|
python
|
def register(self, new_reg=None):
"""
Create a new registration with the ACME server.
:param ~acme.messages.NewRegistration new_reg: The registration message
to use, or ``None`` to construct one.
:return: The registration resource.
:rtype: Deferred[`~acme.messages.RegistrationResource`]
"""
if new_reg is None:
new_reg = messages.NewRegistration()
action = LOG_ACME_REGISTER(registration=new_reg)
with action.context():
return (
DeferredContext(
self.update_registration(
new_reg, uri=self.directory[new_reg]))
.addErrback(self._maybe_registered, new_reg)
.addCallback(
tap(lambda r: action.add_success_fields(registration=r)))
.addActionFinish())
|
[
"def",
"register",
"(",
"self",
",",
"new_reg",
"=",
"None",
")",
":",
"if",
"new_reg",
"is",
"None",
":",
"new_reg",
"=",
"messages",
".",
"NewRegistration",
"(",
")",
"action",
"=",
"LOG_ACME_REGISTER",
"(",
"registration",
"=",
"new_reg",
")",
"with",
"action",
".",
"context",
"(",
")",
":",
"return",
"(",
"DeferredContext",
"(",
"self",
".",
"update_registration",
"(",
"new_reg",
",",
"uri",
"=",
"self",
".",
"directory",
"[",
"new_reg",
"]",
")",
")",
".",
"addErrback",
"(",
"self",
".",
"_maybe_registered",
",",
"new_reg",
")",
".",
"addCallback",
"(",
"tap",
"(",
"lambda",
"r",
":",
"action",
".",
"add_success_fields",
"(",
"registration",
"=",
"r",
")",
")",
")",
".",
"addActionFinish",
"(",
")",
")"
] |
Create a new registration with the ACME server.
:param ~acme.messages.NewRegistration new_reg: The registration message
to use, or ``None`` to construct one.
:return: The registration resource.
:rtype: Deferred[`~acme.messages.RegistrationResource`]
|
[
"Create",
"a",
"new",
"registration",
"with",
"the",
"ACME",
"server",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L140-L161
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client._maybe_location
|
def _maybe_location(cls, response, uri=None):
"""
Get the Location: if there is one.
"""
location = response.headers.getRawHeaders(b'location', [None])[0]
if location is not None:
return location.decode('ascii')
return uri
|
python
|
def _maybe_location(cls, response, uri=None):
"""
Get the Location: if there is one.
"""
location = response.headers.getRawHeaders(b'location', [None])[0]
if location is not None:
return location.decode('ascii')
return uri
|
[
"def",
"_maybe_location",
"(",
"cls",
",",
"response",
",",
"uri",
"=",
"None",
")",
":",
"location",
"=",
"response",
".",
"headers",
".",
"getRawHeaders",
"(",
"b'location'",
",",
"[",
"None",
"]",
")",
"[",
"0",
"]",
"if",
"location",
"is",
"not",
"None",
":",
"return",
"location",
".",
"decode",
"(",
"'ascii'",
")",
"return",
"uri"
] |
Get the Location: if there is one.
|
[
"Get",
"the",
"Location",
":",
"if",
"there",
"is",
"one",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L164-L171
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client._maybe_registered
|
def _maybe_registered(self, failure, new_reg):
"""
If the registration already exists, we should just load it.
"""
failure.trap(ServerError)
response = failure.value.response
if response.code == http.CONFLICT:
reg = new_reg.update(
resource=messages.UpdateRegistration.resource_type)
uri = self._maybe_location(response)
return self.update_registration(reg, uri=uri)
return failure
|
python
|
def _maybe_registered(self, failure, new_reg):
"""
If the registration already exists, we should just load it.
"""
failure.trap(ServerError)
response = failure.value.response
if response.code == http.CONFLICT:
reg = new_reg.update(
resource=messages.UpdateRegistration.resource_type)
uri = self._maybe_location(response)
return self.update_registration(reg, uri=uri)
return failure
|
[
"def",
"_maybe_registered",
"(",
"self",
",",
"failure",
",",
"new_reg",
")",
":",
"failure",
".",
"trap",
"(",
"ServerError",
")",
"response",
"=",
"failure",
".",
"value",
".",
"response",
"if",
"response",
".",
"code",
"==",
"http",
".",
"CONFLICT",
":",
"reg",
"=",
"new_reg",
".",
"update",
"(",
"resource",
"=",
"messages",
".",
"UpdateRegistration",
".",
"resource_type",
")",
"uri",
"=",
"self",
".",
"_maybe_location",
"(",
"response",
")",
"return",
"self",
".",
"update_registration",
"(",
"reg",
",",
"uri",
"=",
"uri",
")",
"return",
"failure"
] |
If the registration already exists, we should just load it.
|
[
"If",
"the",
"registration",
"already",
"exists",
"we",
"should",
"just",
"load",
"it",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L173-L184
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client.agree_to_tos
|
def agree_to_tos(self, regr):
"""
Accept the terms-of-service for a registration.
:param ~acme.messages.RegistrationResource regr: The registration to
update.
:return: The updated registration resource.
:rtype: Deferred[`~acme.messages.RegistrationResource`]
"""
return self.update_registration(
regr.update(
body=regr.body.update(
agreement=regr.terms_of_service)))
|
python
|
def agree_to_tos(self, regr):
"""
Accept the terms-of-service for a registration.
:param ~acme.messages.RegistrationResource regr: The registration to
update.
:return: The updated registration resource.
:rtype: Deferred[`~acme.messages.RegistrationResource`]
"""
return self.update_registration(
regr.update(
body=regr.body.update(
agreement=regr.terms_of_service)))
|
[
"def",
"agree_to_tos",
"(",
"self",
",",
"regr",
")",
":",
"return",
"self",
".",
"update_registration",
"(",
"regr",
".",
"update",
"(",
"body",
"=",
"regr",
".",
"body",
".",
"update",
"(",
"agreement",
"=",
"regr",
".",
"terms_of_service",
")",
")",
")"
] |
Accept the terms-of-service for a registration.
:param ~acme.messages.RegistrationResource regr: The registration to
update.
:return: The updated registration resource.
:rtype: Deferred[`~acme.messages.RegistrationResource`]
|
[
"Accept",
"the",
"terms",
"-",
"of",
"-",
"service",
"for",
"a",
"registration",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L186-L199
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client.update_registration
|
def update_registration(self, regr, uri=None):
"""
Submit a registration to the server to update it.
:param ~acme.messages.RegistrationResource regr: The registration to
update. Can be a :class:`~acme.messages.NewRegistration` instead,
in order to create a new registration.
:param str uri: The url to submit to. Must be
specified if a :class:`~acme.messages.NewRegistration` is provided.
:return: The updated registration resource.
:rtype: Deferred[`~acme.messages.RegistrationResource`]
"""
if uri is None:
uri = regr.uri
if isinstance(regr, messages.RegistrationResource):
message = messages.UpdateRegistration(**dict(regr.body))
else:
message = regr
action = LOG_ACME_UPDATE_REGISTRATION(uri=uri, registration=message)
with action.context():
return (
DeferredContext(self._client.post(uri, message))
.addCallback(self._parse_regr_response, uri=uri)
.addCallback(self._check_regr, regr)
.addCallback(
tap(lambda r: action.add_success_fields(registration=r)))
.addActionFinish())
|
python
|
def update_registration(self, regr, uri=None):
"""
Submit a registration to the server to update it.
:param ~acme.messages.RegistrationResource regr: The registration to
update. Can be a :class:`~acme.messages.NewRegistration` instead,
in order to create a new registration.
:param str uri: The url to submit to. Must be
specified if a :class:`~acme.messages.NewRegistration` is provided.
:return: The updated registration resource.
:rtype: Deferred[`~acme.messages.RegistrationResource`]
"""
if uri is None:
uri = regr.uri
if isinstance(regr, messages.RegistrationResource):
message = messages.UpdateRegistration(**dict(regr.body))
else:
message = regr
action = LOG_ACME_UPDATE_REGISTRATION(uri=uri, registration=message)
with action.context():
return (
DeferredContext(self._client.post(uri, message))
.addCallback(self._parse_regr_response, uri=uri)
.addCallback(self._check_regr, regr)
.addCallback(
tap(lambda r: action.add_success_fields(registration=r)))
.addActionFinish())
|
[
"def",
"update_registration",
"(",
"self",
",",
"regr",
",",
"uri",
"=",
"None",
")",
":",
"if",
"uri",
"is",
"None",
":",
"uri",
"=",
"regr",
".",
"uri",
"if",
"isinstance",
"(",
"regr",
",",
"messages",
".",
"RegistrationResource",
")",
":",
"message",
"=",
"messages",
".",
"UpdateRegistration",
"(",
"*",
"*",
"dict",
"(",
"regr",
".",
"body",
")",
")",
"else",
":",
"message",
"=",
"regr",
"action",
"=",
"LOG_ACME_UPDATE_REGISTRATION",
"(",
"uri",
"=",
"uri",
",",
"registration",
"=",
"message",
")",
"with",
"action",
".",
"context",
"(",
")",
":",
"return",
"(",
"DeferredContext",
"(",
"self",
".",
"_client",
".",
"post",
"(",
"uri",
",",
"message",
")",
")",
".",
"addCallback",
"(",
"self",
".",
"_parse_regr_response",
",",
"uri",
"=",
"uri",
")",
".",
"addCallback",
"(",
"self",
".",
"_check_regr",
",",
"regr",
")",
".",
"addCallback",
"(",
"tap",
"(",
"lambda",
"r",
":",
"action",
".",
"add_success_fields",
"(",
"registration",
"=",
"r",
")",
")",
")",
".",
"addActionFinish",
"(",
")",
")"
] |
Submit a registration to the server to update it.
:param ~acme.messages.RegistrationResource regr: The registration to
update. Can be a :class:`~acme.messages.NewRegistration` instead,
in order to create a new registration.
:param str uri: The url to submit to. Must be
specified if a :class:`~acme.messages.NewRegistration` is provided.
:return: The updated registration resource.
:rtype: Deferred[`~acme.messages.RegistrationResource`]
|
[
"Submit",
"a",
"registration",
"to",
"the",
"server",
"to",
"update",
"it",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L201-L228
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client._parse_regr_response
|
def _parse_regr_response(self, response, uri=None, new_authzr_uri=None,
terms_of_service=None):
"""
Parse a registration response from the server.
"""
links = _parse_header_links(response)
if u'terms-of-service' in links:
terms_of_service = links[u'terms-of-service'][u'url']
if u'next' in links:
new_authzr_uri = links[u'next'][u'url']
if new_authzr_uri is None:
raise errors.ClientError('"next" link missing')
return (
response.json()
.addCallback(
lambda body:
messages.RegistrationResource(
body=messages.Registration.from_json(body),
uri=self._maybe_location(response, uri=uri),
new_authzr_uri=new_authzr_uri,
terms_of_service=terms_of_service))
)
|
python
|
def _parse_regr_response(self, response, uri=None, new_authzr_uri=None,
terms_of_service=None):
"""
Parse a registration response from the server.
"""
links = _parse_header_links(response)
if u'terms-of-service' in links:
terms_of_service = links[u'terms-of-service'][u'url']
if u'next' in links:
new_authzr_uri = links[u'next'][u'url']
if new_authzr_uri is None:
raise errors.ClientError('"next" link missing')
return (
response.json()
.addCallback(
lambda body:
messages.RegistrationResource(
body=messages.Registration.from_json(body),
uri=self._maybe_location(response, uri=uri),
new_authzr_uri=new_authzr_uri,
terms_of_service=terms_of_service))
)
|
[
"def",
"_parse_regr_response",
"(",
"self",
",",
"response",
",",
"uri",
"=",
"None",
",",
"new_authzr_uri",
"=",
"None",
",",
"terms_of_service",
"=",
"None",
")",
":",
"links",
"=",
"_parse_header_links",
"(",
"response",
")",
"if",
"u'terms-of-service'",
"in",
"links",
":",
"terms_of_service",
"=",
"links",
"[",
"u'terms-of-service'",
"]",
"[",
"u'url'",
"]",
"if",
"u'next'",
"in",
"links",
":",
"new_authzr_uri",
"=",
"links",
"[",
"u'next'",
"]",
"[",
"u'url'",
"]",
"if",
"new_authzr_uri",
"is",
"None",
":",
"raise",
"errors",
".",
"ClientError",
"(",
"'\"next\" link missing'",
")",
"return",
"(",
"response",
".",
"json",
"(",
")",
".",
"addCallback",
"(",
"lambda",
"body",
":",
"messages",
".",
"RegistrationResource",
"(",
"body",
"=",
"messages",
".",
"Registration",
".",
"from_json",
"(",
"body",
")",
",",
"uri",
"=",
"self",
".",
"_maybe_location",
"(",
"response",
",",
"uri",
"=",
"uri",
")",
",",
"new_authzr_uri",
"=",
"new_authzr_uri",
",",
"terms_of_service",
"=",
"terms_of_service",
")",
")",
")"
] |
Parse a registration response from the server.
|
[
"Parse",
"a",
"registration",
"response",
"from",
"the",
"server",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L230-L251
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client._check_regr
|
def _check_regr(self, regr, new_reg):
"""
Check that a registration response contains the registration we were
expecting.
"""
body = getattr(new_reg, 'body', new_reg)
for k, v in body.items():
if k == 'resource' or not v:
continue
if regr.body[k] != v:
raise errors.UnexpectedUpdate(regr)
if regr.body.key != self.key.public_key():
raise errors.UnexpectedUpdate(regr)
return regr
|
python
|
def _check_regr(self, regr, new_reg):
"""
Check that a registration response contains the registration we were
expecting.
"""
body = getattr(new_reg, 'body', new_reg)
for k, v in body.items():
if k == 'resource' or not v:
continue
if regr.body[k] != v:
raise errors.UnexpectedUpdate(regr)
if regr.body.key != self.key.public_key():
raise errors.UnexpectedUpdate(regr)
return regr
|
[
"def",
"_check_regr",
"(",
"self",
",",
"regr",
",",
"new_reg",
")",
":",
"body",
"=",
"getattr",
"(",
"new_reg",
",",
"'body'",
",",
"new_reg",
")",
"for",
"k",
",",
"v",
"in",
"body",
".",
"items",
"(",
")",
":",
"if",
"k",
"==",
"'resource'",
"or",
"not",
"v",
":",
"continue",
"if",
"regr",
".",
"body",
"[",
"k",
"]",
"!=",
"v",
":",
"raise",
"errors",
".",
"UnexpectedUpdate",
"(",
"regr",
")",
"if",
"regr",
".",
"body",
".",
"key",
"!=",
"self",
".",
"key",
".",
"public_key",
"(",
")",
":",
"raise",
"errors",
".",
"UnexpectedUpdate",
"(",
"regr",
")",
"return",
"regr"
] |
Check that a registration response contains the registration we were
expecting.
|
[
"Check",
"that",
"a",
"registration",
"response",
"contains",
"the",
"registration",
"we",
"were",
"expecting",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L253-L266
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client.request_challenges
|
def request_challenges(self, identifier):
"""
Create a new authorization.
:param ~acme.messages.Identifier identifier: The identifier to
authorize.
:return: The new authorization resource.
:rtype: Deferred[`~acme.messages.AuthorizationResource`]
"""
action = LOG_ACME_CREATE_AUTHORIZATION(identifier=identifier)
with action.context():
message = messages.NewAuthorization(identifier=identifier)
return (
DeferredContext(
self._client.post(self.directory[message], message))
.addCallback(self._expect_response, http.CREATED)
.addCallback(self._parse_authorization)
.addCallback(self._check_authorization, identifier)
.addCallback(
tap(lambda a: action.add_success_fields(authorization=a)))
.addActionFinish())
|
python
|
def request_challenges(self, identifier):
"""
Create a new authorization.
:param ~acme.messages.Identifier identifier: The identifier to
authorize.
:return: The new authorization resource.
:rtype: Deferred[`~acme.messages.AuthorizationResource`]
"""
action = LOG_ACME_CREATE_AUTHORIZATION(identifier=identifier)
with action.context():
message = messages.NewAuthorization(identifier=identifier)
return (
DeferredContext(
self._client.post(self.directory[message], message))
.addCallback(self._expect_response, http.CREATED)
.addCallback(self._parse_authorization)
.addCallback(self._check_authorization, identifier)
.addCallback(
tap(lambda a: action.add_success_fields(authorization=a)))
.addActionFinish())
|
[
"def",
"request_challenges",
"(",
"self",
",",
"identifier",
")",
":",
"action",
"=",
"LOG_ACME_CREATE_AUTHORIZATION",
"(",
"identifier",
"=",
"identifier",
")",
"with",
"action",
".",
"context",
"(",
")",
":",
"message",
"=",
"messages",
".",
"NewAuthorization",
"(",
"identifier",
"=",
"identifier",
")",
"return",
"(",
"DeferredContext",
"(",
"self",
".",
"_client",
".",
"post",
"(",
"self",
".",
"directory",
"[",
"message",
"]",
",",
"message",
")",
")",
".",
"addCallback",
"(",
"self",
".",
"_expect_response",
",",
"http",
".",
"CREATED",
")",
".",
"addCallback",
"(",
"self",
".",
"_parse_authorization",
")",
".",
"addCallback",
"(",
"self",
".",
"_check_authorization",
",",
"identifier",
")",
".",
"addCallback",
"(",
"tap",
"(",
"lambda",
"a",
":",
"action",
".",
"add_success_fields",
"(",
"authorization",
"=",
"a",
")",
")",
")",
".",
"addActionFinish",
"(",
")",
")"
] |
Create a new authorization.
:param ~acme.messages.Identifier identifier: The identifier to
authorize.
:return: The new authorization resource.
:rtype: Deferred[`~acme.messages.AuthorizationResource`]
|
[
"Create",
"a",
"new",
"authorization",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L268-L289
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client._expect_response
|
def _expect_response(cls, response, code):
"""
Ensure we got the expected response code.
"""
if response.code != code:
raise errors.ClientError(
'Expected {!r} response but got {!r}'.format(
code, response.code))
return response
|
python
|
def _expect_response(cls, response, code):
"""
Ensure we got the expected response code.
"""
if response.code != code:
raise errors.ClientError(
'Expected {!r} response but got {!r}'.format(
code, response.code))
return response
|
[
"def",
"_expect_response",
"(",
"cls",
",",
"response",
",",
"code",
")",
":",
"if",
"response",
".",
"code",
"!=",
"code",
":",
"raise",
"errors",
".",
"ClientError",
"(",
"'Expected {!r} response but got {!r}'",
".",
"format",
"(",
"code",
",",
"response",
".",
"code",
")",
")",
"return",
"response"
] |
Ensure we got the expected response code.
|
[
"Ensure",
"we",
"got",
"the",
"expected",
"response",
"code",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L292-L300
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client._parse_authorization
|
def _parse_authorization(cls, response, uri=None):
"""
Parse an authorization resource.
"""
links = _parse_header_links(response)
try:
new_cert_uri = links[u'next'][u'url']
except KeyError:
raise errors.ClientError('"next" link missing')
return (
response.json()
.addCallback(
lambda body: messages.AuthorizationResource(
body=messages.Authorization.from_json(body),
uri=cls._maybe_location(response, uri=uri),
new_cert_uri=new_cert_uri))
)
|
python
|
def _parse_authorization(cls, response, uri=None):
"""
Parse an authorization resource.
"""
links = _parse_header_links(response)
try:
new_cert_uri = links[u'next'][u'url']
except KeyError:
raise errors.ClientError('"next" link missing')
return (
response.json()
.addCallback(
lambda body: messages.AuthorizationResource(
body=messages.Authorization.from_json(body),
uri=cls._maybe_location(response, uri=uri),
new_cert_uri=new_cert_uri))
)
|
[
"def",
"_parse_authorization",
"(",
"cls",
",",
"response",
",",
"uri",
"=",
"None",
")",
":",
"links",
"=",
"_parse_header_links",
"(",
"response",
")",
"try",
":",
"new_cert_uri",
"=",
"links",
"[",
"u'next'",
"]",
"[",
"u'url'",
"]",
"except",
"KeyError",
":",
"raise",
"errors",
".",
"ClientError",
"(",
"'\"next\" link missing'",
")",
"return",
"(",
"response",
".",
"json",
"(",
")",
".",
"addCallback",
"(",
"lambda",
"body",
":",
"messages",
".",
"AuthorizationResource",
"(",
"body",
"=",
"messages",
".",
"Authorization",
".",
"from_json",
"(",
"body",
")",
",",
"uri",
"=",
"cls",
".",
"_maybe_location",
"(",
"response",
",",
"uri",
"=",
"uri",
")",
",",
"new_cert_uri",
"=",
"new_cert_uri",
")",
")",
")"
] |
Parse an authorization resource.
|
[
"Parse",
"an",
"authorization",
"resource",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L303-L319
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client._check_authorization
|
def _check_authorization(cls, authzr, identifier):
"""
Check that the authorization we got is the one we expected.
"""
if authzr.body.identifier != identifier:
raise errors.UnexpectedUpdate(authzr)
return authzr
|
python
|
def _check_authorization(cls, authzr, identifier):
"""
Check that the authorization we got is the one we expected.
"""
if authzr.body.identifier != identifier:
raise errors.UnexpectedUpdate(authzr)
return authzr
|
[
"def",
"_check_authorization",
"(",
"cls",
",",
"authzr",
",",
"identifier",
")",
":",
"if",
"authzr",
".",
"body",
".",
"identifier",
"!=",
"identifier",
":",
"raise",
"errors",
".",
"UnexpectedUpdate",
"(",
"authzr",
")",
"return",
"authzr"
] |
Check that the authorization we got is the one we expected.
|
[
"Check",
"that",
"the",
"authorization",
"we",
"got",
"is",
"the",
"one",
"we",
"expected",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L322-L328
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client.answer_challenge
|
def answer_challenge(self, challenge_body, response):
"""
Respond to an authorization challenge.
:param ~acme.messages.ChallengeBody challenge_body: The challenge being
responded to.
:param ~acme.challenges.ChallengeResponse response: The response to the
challenge.
:return: The updated challenge resource.
:rtype: Deferred[`~acme.messages.ChallengeResource`]
"""
action = LOG_ACME_ANSWER_CHALLENGE(
challenge_body=challenge_body, response=response)
with action.context():
return (
DeferredContext(
self._client.post(challenge_body.uri, response))
.addCallback(self._parse_challenge)
.addCallback(self._check_challenge, challenge_body)
.addCallback(
tap(lambda c:
action.add_success_fields(challenge_resource=c)))
.addActionFinish())
|
python
|
def answer_challenge(self, challenge_body, response):
"""
Respond to an authorization challenge.
:param ~acme.messages.ChallengeBody challenge_body: The challenge being
responded to.
:param ~acme.challenges.ChallengeResponse response: The response to the
challenge.
:return: The updated challenge resource.
:rtype: Deferred[`~acme.messages.ChallengeResource`]
"""
action = LOG_ACME_ANSWER_CHALLENGE(
challenge_body=challenge_body, response=response)
with action.context():
return (
DeferredContext(
self._client.post(challenge_body.uri, response))
.addCallback(self._parse_challenge)
.addCallback(self._check_challenge, challenge_body)
.addCallback(
tap(lambda c:
action.add_success_fields(challenge_resource=c)))
.addActionFinish())
|
[
"def",
"answer_challenge",
"(",
"self",
",",
"challenge_body",
",",
"response",
")",
":",
"action",
"=",
"LOG_ACME_ANSWER_CHALLENGE",
"(",
"challenge_body",
"=",
"challenge_body",
",",
"response",
"=",
"response",
")",
"with",
"action",
".",
"context",
"(",
")",
":",
"return",
"(",
"DeferredContext",
"(",
"self",
".",
"_client",
".",
"post",
"(",
"challenge_body",
".",
"uri",
",",
"response",
")",
")",
".",
"addCallback",
"(",
"self",
".",
"_parse_challenge",
")",
".",
"addCallback",
"(",
"self",
".",
"_check_challenge",
",",
"challenge_body",
")",
".",
"addCallback",
"(",
"tap",
"(",
"lambda",
"c",
":",
"action",
".",
"add_success_fields",
"(",
"challenge_resource",
"=",
"c",
")",
")",
")",
".",
"addActionFinish",
"(",
")",
")"
] |
Respond to an authorization challenge.
:param ~acme.messages.ChallengeBody challenge_body: The challenge being
responded to.
:param ~acme.challenges.ChallengeResponse response: The response to the
challenge.
:return: The updated challenge resource.
:rtype: Deferred[`~acme.messages.ChallengeResource`]
|
[
"Respond",
"to",
"an",
"authorization",
"challenge",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L330-L353
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client._parse_challenge
|
def _parse_challenge(cls, response):
"""
Parse a challenge resource.
"""
links = _parse_header_links(response)
try:
authzr_uri = links['up']['url']
except KeyError:
raise errors.ClientError('"up" link missing')
return (
response.json()
.addCallback(
lambda body: messages.ChallengeResource(
authzr_uri=authzr_uri,
body=messages.ChallengeBody.from_json(body)))
)
|
python
|
def _parse_challenge(cls, response):
"""
Parse a challenge resource.
"""
links = _parse_header_links(response)
try:
authzr_uri = links['up']['url']
except KeyError:
raise errors.ClientError('"up" link missing')
return (
response.json()
.addCallback(
lambda body: messages.ChallengeResource(
authzr_uri=authzr_uri,
body=messages.ChallengeBody.from_json(body)))
)
|
[
"def",
"_parse_challenge",
"(",
"cls",
",",
"response",
")",
":",
"links",
"=",
"_parse_header_links",
"(",
"response",
")",
"try",
":",
"authzr_uri",
"=",
"links",
"[",
"'up'",
"]",
"[",
"'url'",
"]",
"except",
"KeyError",
":",
"raise",
"errors",
".",
"ClientError",
"(",
"'\"up\" link missing'",
")",
"return",
"(",
"response",
".",
"json",
"(",
")",
".",
"addCallback",
"(",
"lambda",
"body",
":",
"messages",
".",
"ChallengeResource",
"(",
"authzr_uri",
"=",
"authzr_uri",
",",
"body",
"=",
"messages",
".",
"ChallengeBody",
".",
"from_json",
"(",
"body",
")",
")",
")",
")"
] |
Parse a challenge resource.
|
[
"Parse",
"a",
"challenge",
"resource",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L356-L371
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client._check_challenge
|
def _check_challenge(cls, challenge, challenge_body):
"""
Check that the challenge resource we got is the one we expected.
"""
if challenge.uri != challenge_body.uri:
raise errors.UnexpectedUpdate(challenge.uri)
return challenge
|
python
|
def _check_challenge(cls, challenge, challenge_body):
"""
Check that the challenge resource we got is the one we expected.
"""
if challenge.uri != challenge_body.uri:
raise errors.UnexpectedUpdate(challenge.uri)
return challenge
|
[
"def",
"_check_challenge",
"(",
"cls",
",",
"challenge",
",",
"challenge_body",
")",
":",
"if",
"challenge",
".",
"uri",
"!=",
"challenge_body",
".",
"uri",
":",
"raise",
"errors",
".",
"UnexpectedUpdate",
"(",
"challenge",
".",
"uri",
")",
"return",
"challenge"
] |
Check that the challenge resource we got is the one we expected.
|
[
"Check",
"that",
"the",
"challenge",
"resource",
"we",
"got",
"is",
"the",
"one",
"we",
"expected",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L374-L380
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client.poll
|
def poll(self, authzr):
"""
Update an authorization from the server (usually to check its status).
"""
action = LOG_ACME_POLL_AUTHORIZATION(authorization=authzr)
with action.context():
return (
DeferredContext(self._client.get(authzr.uri))
# Spec says we should get 202 while pending, Boulder actually
# sends us 200 always, so just don't check.
# .addCallback(self._expect_response, http.ACCEPTED)
.addCallback(
lambda res:
self._parse_authorization(res, uri=authzr.uri)
.addCallback(
self._check_authorization, authzr.body.identifier)
.addCallback(
lambda authzr:
(authzr,
self.retry_after(res, _now=self._clock.seconds)))
)
.addCallback(tap(
lambda a_r: action.add_success_fields(
authorization=a_r[0], retry_after=a_r[1])))
.addActionFinish())
|
python
|
def poll(self, authzr):
"""
Update an authorization from the server (usually to check its status).
"""
action = LOG_ACME_POLL_AUTHORIZATION(authorization=authzr)
with action.context():
return (
DeferredContext(self._client.get(authzr.uri))
# Spec says we should get 202 while pending, Boulder actually
# sends us 200 always, so just don't check.
# .addCallback(self._expect_response, http.ACCEPTED)
.addCallback(
lambda res:
self._parse_authorization(res, uri=authzr.uri)
.addCallback(
self._check_authorization, authzr.body.identifier)
.addCallback(
lambda authzr:
(authzr,
self.retry_after(res, _now=self._clock.seconds)))
)
.addCallback(tap(
lambda a_r: action.add_success_fields(
authorization=a_r[0], retry_after=a_r[1])))
.addActionFinish())
|
[
"def",
"poll",
"(",
"self",
",",
"authzr",
")",
":",
"action",
"=",
"LOG_ACME_POLL_AUTHORIZATION",
"(",
"authorization",
"=",
"authzr",
")",
"with",
"action",
".",
"context",
"(",
")",
":",
"return",
"(",
"DeferredContext",
"(",
"self",
".",
"_client",
".",
"get",
"(",
"authzr",
".",
"uri",
")",
")",
"# Spec says we should get 202 while pending, Boulder actually",
"# sends us 200 always, so just don't check.",
"# .addCallback(self._expect_response, http.ACCEPTED)",
".",
"addCallback",
"(",
"lambda",
"res",
":",
"self",
".",
"_parse_authorization",
"(",
"res",
",",
"uri",
"=",
"authzr",
".",
"uri",
")",
".",
"addCallback",
"(",
"self",
".",
"_check_authorization",
",",
"authzr",
".",
"body",
".",
"identifier",
")",
".",
"addCallback",
"(",
"lambda",
"authzr",
":",
"(",
"authzr",
",",
"self",
".",
"retry_after",
"(",
"res",
",",
"_now",
"=",
"self",
".",
"_clock",
".",
"seconds",
")",
")",
")",
")",
".",
"addCallback",
"(",
"tap",
"(",
"lambda",
"a_r",
":",
"action",
".",
"add_success_fields",
"(",
"authorization",
"=",
"a_r",
"[",
"0",
"]",
",",
"retry_after",
"=",
"a_r",
"[",
"1",
"]",
")",
")",
")",
".",
"addActionFinish",
"(",
")",
")"
] |
Update an authorization from the server (usually to check its status).
|
[
"Update",
"an",
"authorization",
"from",
"the",
"server",
"(",
"usually",
"to",
"check",
"its",
"status",
")",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L382-L406
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client.retry_after
|
def retry_after(cls, response, default=5, _now=time.time):
"""
Parse the Retry-After value from a response.
"""
val = response.headers.getRawHeaders(b'retry-after', [default])[0]
try:
return int(val)
except ValueError:
return http.stringToDatetime(val) - _now()
|
python
|
def retry_after(cls, response, default=5, _now=time.time):
"""
Parse the Retry-After value from a response.
"""
val = response.headers.getRawHeaders(b'retry-after', [default])[0]
try:
return int(val)
except ValueError:
return http.stringToDatetime(val) - _now()
|
[
"def",
"retry_after",
"(",
"cls",
",",
"response",
",",
"default",
"=",
"5",
",",
"_now",
"=",
"time",
".",
"time",
")",
":",
"val",
"=",
"response",
".",
"headers",
".",
"getRawHeaders",
"(",
"b'retry-after'",
",",
"[",
"default",
"]",
")",
"[",
"0",
"]",
"try",
":",
"return",
"int",
"(",
"val",
")",
"except",
"ValueError",
":",
"return",
"http",
".",
"stringToDatetime",
"(",
"val",
")",
"-",
"_now",
"(",
")"
] |
Parse the Retry-After value from a response.
|
[
"Parse",
"the",
"Retry",
"-",
"After",
"value",
"from",
"a",
"response",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L409-L417
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client.request_issuance
|
def request_issuance(self, csr):
"""
Request a certificate.
Authorizations should have already been completed for all of the names
requested in the CSR.
Note that unlike `acme.client.Client.request_issuance`, the certificate
resource will have the body data as raw bytes.
.. seealso:: `txacme.util.csr_for_names`
.. todo:: Delayed issuance is not currently supported, the server must
issue the requested certificate immediately.
:param csr: A certificate request message: normally
`txacme.messages.CertificateRequest` or
`acme.messages.CertificateRequest`.
:rtype: Deferred[`acme.messages.CertificateResource`]
:return: The issued certificate.
"""
action = LOG_ACME_REQUEST_CERTIFICATE()
with action.context():
return (
DeferredContext(
self._client.post(
self.directory[csr], csr,
content_type=DER_CONTENT_TYPE,
headers=Headers({b'Accept': [DER_CONTENT_TYPE]})))
.addCallback(self._expect_response, http.CREATED)
.addCallback(self._parse_certificate)
.addActionFinish())
|
python
|
def request_issuance(self, csr):
"""
Request a certificate.
Authorizations should have already been completed for all of the names
requested in the CSR.
Note that unlike `acme.client.Client.request_issuance`, the certificate
resource will have the body data as raw bytes.
.. seealso:: `txacme.util.csr_for_names`
.. todo:: Delayed issuance is not currently supported, the server must
issue the requested certificate immediately.
:param csr: A certificate request message: normally
`txacme.messages.CertificateRequest` or
`acme.messages.CertificateRequest`.
:rtype: Deferred[`acme.messages.CertificateResource`]
:return: The issued certificate.
"""
action = LOG_ACME_REQUEST_CERTIFICATE()
with action.context():
return (
DeferredContext(
self._client.post(
self.directory[csr], csr,
content_type=DER_CONTENT_TYPE,
headers=Headers({b'Accept': [DER_CONTENT_TYPE]})))
.addCallback(self._expect_response, http.CREATED)
.addCallback(self._parse_certificate)
.addActionFinish())
|
[
"def",
"request_issuance",
"(",
"self",
",",
"csr",
")",
":",
"action",
"=",
"LOG_ACME_REQUEST_CERTIFICATE",
"(",
")",
"with",
"action",
".",
"context",
"(",
")",
":",
"return",
"(",
"DeferredContext",
"(",
"self",
".",
"_client",
".",
"post",
"(",
"self",
".",
"directory",
"[",
"csr",
"]",
",",
"csr",
",",
"content_type",
"=",
"DER_CONTENT_TYPE",
",",
"headers",
"=",
"Headers",
"(",
"{",
"b'Accept'",
":",
"[",
"DER_CONTENT_TYPE",
"]",
"}",
")",
")",
")",
".",
"addCallback",
"(",
"self",
".",
"_expect_response",
",",
"http",
".",
"CREATED",
")",
".",
"addCallback",
"(",
"self",
".",
"_parse_certificate",
")",
".",
"addActionFinish",
"(",
")",
")"
] |
Request a certificate.
Authorizations should have already been completed for all of the names
requested in the CSR.
Note that unlike `acme.client.Client.request_issuance`, the certificate
resource will have the body data as raw bytes.
.. seealso:: `txacme.util.csr_for_names`
.. todo:: Delayed issuance is not currently supported, the server must
issue the requested certificate immediately.
:param csr: A certificate request message: normally
`txacme.messages.CertificateRequest` or
`acme.messages.CertificateRequest`.
:rtype: Deferred[`acme.messages.CertificateResource`]
:return: The issued certificate.
|
[
"Request",
"a",
"certificate",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L419-L451
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client._parse_certificate
|
def _parse_certificate(cls, response):
"""
Parse a response containing a certificate resource.
"""
links = _parse_header_links(response)
try:
cert_chain_uri = links[u'up'][u'url']
except KeyError:
cert_chain_uri = None
return (
response.content()
.addCallback(
lambda body: messages.CertificateResource(
uri=cls._maybe_location(response),
cert_chain_uri=cert_chain_uri,
body=body))
)
|
python
|
def _parse_certificate(cls, response):
"""
Parse a response containing a certificate resource.
"""
links = _parse_header_links(response)
try:
cert_chain_uri = links[u'up'][u'url']
except KeyError:
cert_chain_uri = None
return (
response.content()
.addCallback(
lambda body: messages.CertificateResource(
uri=cls._maybe_location(response),
cert_chain_uri=cert_chain_uri,
body=body))
)
|
[
"def",
"_parse_certificate",
"(",
"cls",
",",
"response",
")",
":",
"links",
"=",
"_parse_header_links",
"(",
"response",
")",
"try",
":",
"cert_chain_uri",
"=",
"links",
"[",
"u'up'",
"]",
"[",
"u'url'",
"]",
"except",
"KeyError",
":",
"cert_chain_uri",
"=",
"None",
"return",
"(",
"response",
".",
"content",
"(",
")",
".",
"addCallback",
"(",
"lambda",
"body",
":",
"messages",
".",
"CertificateResource",
"(",
"uri",
"=",
"cls",
".",
"_maybe_location",
"(",
"response",
")",
",",
"cert_chain_uri",
"=",
"cert_chain_uri",
",",
"body",
"=",
"body",
")",
")",
")"
] |
Parse a response containing a certificate resource.
|
[
"Parse",
"a",
"response",
"containing",
"a",
"certificate",
"resource",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L454-L470
|
train
|
twisted/txacme
|
src/txacme/client.py
|
Client.fetch_chain
|
def fetch_chain(self, certr, max_length=10):
"""
Fetch the intermediary chain for a certificate.
:param acme.messages.CertificateResource certr: The certificate to
fetch the chain for.
:param int max_length: The maximum length of the chain that will be
fetched.
:rtype: Deferred[List[`acme.messages.CertificateResource`]]
:return: The issuer certificate chain, ordered with the trust anchor
last.
"""
action = LOG_ACME_FETCH_CHAIN()
with action.context():
if certr.cert_chain_uri is None:
return succeed([])
elif max_length < 1:
raise errors.ClientError('chain too long')
return (
DeferredContext(
self._client.get(
certr.cert_chain_uri,
content_type=DER_CONTENT_TYPE,
headers=Headers({b'Accept': [DER_CONTENT_TYPE]})))
.addCallback(self._parse_certificate)
.addCallback(
lambda issuer:
self.fetch_chain(issuer, max_length=max_length - 1)
.addCallback(lambda chain: [issuer] + chain))
.addActionFinish())
|
python
|
def fetch_chain(self, certr, max_length=10):
"""
Fetch the intermediary chain for a certificate.
:param acme.messages.CertificateResource certr: The certificate to
fetch the chain for.
:param int max_length: The maximum length of the chain that will be
fetched.
:rtype: Deferred[List[`acme.messages.CertificateResource`]]
:return: The issuer certificate chain, ordered with the trust anchor
last.
"""
action = LOG_ACME_FETCH_CHAIN()
with action.context():
if certr.cert_chain_uri is None:
return succeed([])
elif max_length < 1:
raise errors.ClientError('chain too long')
return (
DeferredContext(
self._client.get(
certr.cert_chain_uri,
content_type=DER_CONTENT_TYPE,
headers=Headers({b'Accept': [DER_CONTENT_TYPE]})))
.addCallback(self._parse_certificate)
.addCallback(
lambda issuer:
self.fetch_chain(issuer, max_length=max_length - 1)
.addCallback(lambda chain: [issuer] + chain))
.addActionFinish())
|
[
"def",
"fetch_chain",
"(",
"self",
",",
"certr",
",",
"max_length",
"=",
"10",
")",
":",
"action",
"=",
"LOG_ACME_FETCH_CHAIN",
"(",
")",
"with",
"action",
".",
"context",
"(",
")",
":",
"if",
"certr",
".",
"cert_chain_uri",
"is",
"None",
":",
"return",
"succeed",
"(",
"[",
"]",
")",
"elif",
"max_length",
"<",
"1",
":",
"raise",
"errors",
".",
"ClientError",
"(",
"'chain too long'",
")",
"return",
"(",
"DeferredContext",
"(",
"self",
".",
"_client",
".",
"get",
"(",
"certr",
".",
"cert_chain_uri",
",",
"content_type",
"=",
"DER_CONTENT_TYPE",
",",
"headers",
"=",
"Headers",
"(",
"{",
"b'Accept'",
":",
"[",
"DER_CONTENT_TYPE",
"]",
"}",
")",
")",
")",
".",
"addCallback",
"(",
"self",
".",
"_parse_certificate",
")",
".",
"addCallback",
"(",
"lambda",
"issuer",
":",
"self",
".",
"fetch_chain",
"(",
"issuer",
",",
"max_length",
"=",
"max_length",
"-",
"1",
")",
".",
"addCallback",
"(",
"lambda",
"chain",
":",
"[",
"issuer",
"]",
"+",
"chain",
")",
")",
".",
"addActionFinish",
"(",
")",
")"
] |
Fetch the intermediary chain for a certificate.
:param acme.messages.CertificateResource certr: The certificate to
fetch the chain for.
:param int max_length: The maximum length of the chain that will be
fetched.
:rtype: Deferred[List[`acme.messages.CertificateResource`]]
:return: The issuer certificate chain, ordered with the trust anchor
last.
|
[
"Fetch",
"the",
"intermediary",
"chain",
"for",
"a",
"certificate",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L472-L502
|
train
|
twisted/txacme
|
src/txacme/client.py
|
JWSClient._wrap_in_jws
|
def _wrap_in_jws(self, nonce, obj):
"""
Wrap ``JSONDeSerializable`` object in JWS.
.. todo:: Implement ``acmePath``.
:param ~josepy.interfaces.JSONDeSerializable obj:
:param bytes nonce:
:rtype: `bytes`
:return: JSON-encoded data
"""
with LOG_JWS_SIGN(key_type=self._key.typ, alg=self._alg.name,
nonce=nonce):
jobj = obj.json_dumps().encode()
return (
JWS.sign(
payload=jobj, key=self._key, alg=self._alg, nonce=nonce)
.json_dumps()
.encode())
|
python
|
def _wrap_in_jws(self, nonce, obj):
"""
Wrap ``JSONDeSerializable`` object in JWS.
.. todo:: Implement ``acmePath``.
:param ~josepy.interfaces.JSONDeSerializable obj:
:param bytes nonce:
:rtype: `bytes`
:return: JSON-encoded data
"""
with LOG_JWS_SIGN(key_type=self._key.typ, alg=self._alg.name,
nonce=nonce):
jobj = obj.json_dumps().encode()
return (
JWS.sign(
payload=jobj, key=self._key, alg=self._alg, nonce=nonce)
.json_dumps()
.encode())
|
[
"def",
"_wrap_in_jws",
"(",
"self",
",",
"nonce",
",",
"obj",
")",
":",
"with",
"LOG_JWS_SIGN",
"(",
"key_type",
"=",
"self",
".",
"_key",
".",
"typ",
",",
"alg",
"=",
"self",
".",
"_alg",
".",
"name",
",",
"nonce",
"=",
"nonce",
")",
":",
"jobj",
"=",
"obj",
".",
"json_dumps",
"(",
")",
".",
"encode",
"(",
")",
"return",
"(",
"JWS",
".",
"sign",
"(",
"payload",
"=",
"jobj",
",",
"key",
"=",
"self",
".",
"_key",
",",
"alg",
"=",
"self",
".",
"_alg",
",",
"nonce",
"=",
"nonce",
")",
".",
"json_dumps",
"(",
")",
".",
"encode",
"(",
")",
")"
] |
Wrap ``JSONDeSerializable`` object in JWS.
.. todo:: Implement ``acmePath``.
:param ~josepy.interfaces.JSONDeSerializable obj:
:param bytes nonce:
:rtype: `bytes`
:return: JSON-encoded data
|
[
"Wrap",
"JSONDeSerializable",
"object",
"in",
"JWS",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L678-L697
|
train
|
twisted/txacme
|
src/txacme/client.py
|
JWSClient._check_response
|
def _check_response(cls, response, content_type=JSON_CONTENT_TYPE):
"""
Check response content and its type.
.. note::
Unlike :mod:`acme.client`, checking is strict.
:param bytes content_type: Expected Content-Type response header. If
the response Content-Type does not match, :exc:`ClientError` is
raised.
:raises .ServerError: If server response body carries HTTP Problem
(draft-ietf-appsawg-http-problem-00).
:raises ~acme.errors.ClientError: In case of other networking errors.
"""
def _got_failure(f):
f.trap(ValueError)
return None
def _got_json(jobj):
if 400 <= response.code < 600:
if response_ct == JSON_ERROR_CONTENT_TYPE and jobj is not None:
raise ServerError(
messages.Error.from_json(jobj), response)
else:
# response is not JSON object
raise errors.ClientError(response)
elif response_ct != content_type:
raise errors.ClientError(
'Unexpected response Content-Type: {0!r}'.format(
response_ct))
elif content_type == JSON_CONTENT_TYPE and jobj is None:
raise errors.ClientError(response)
return response
response_ct = response.headers.getRawHeaders(
b'Content-Type', [None])[0]
action = LOG_JWS_CHECK_RESPONSE(
expected_content_type=content_type,
response_content_type=response_ct)
with action.context():
# TODO: response.json() is called twice, once here, and
# once in _get and _post clients
return (
DeferredContext(response.json())
.addErrback(_got_failure)
.addCallback(_got_json)
.addActionFinish())
|
python
|
def _check_response(cls, response, content_type=JSON_CONTENT_TYPE):
"""
Check response content and its type.
.. note::
Unlike :mod:`acme.client`, checking is strict.
:param bytes content_type: Expected Content-Type response header. If
the response Content-Type does not match, :exc:`ClientError` is
raised.
:raises .ServerError: If server response body carries HTTP Problem
(draft-ietf-appsawg-http-problem-00).
:raises ~acme.errors.ClientError: In case of other networking errors.
"""
def _got_failure(f):
f.trap(ValueError)
return None
def _got_json(jobj):
if 400 <= response.code < 600:
if response_ct == JSON_ERROR_CONTENT_TYPE and jobj is not None:
raise ServerError(
messages.Error.from_json(jobj), response)
else:
# response is not JSON object
raise errors.ClientError(response)
elif response_ct != content_type:
raise errors.ClientError(
'Unexpected response Content-Type: {0!r}'.format(
response_ct))
elif content_type == JSON_CONTENT_TYPE and jobj is None:
raise errors.ClientError(response)
return response
response_ct = response.headers.getRawHeaders(
b'Content-Type', [None])[0]
action = LOG_JWS_CHECK_RESPONSE(
expected_content_type=content_type,
response_content_type=response_ct)
with action.context():
# TODO: response.json() is called twice, once here, and
# once in _get and _post clients
return (
DeferredContext(response.json())
.addErrback(_got_failure)
.addCallback(_got_json)
.addActionFinish())
|
[
"def",
"_check_response",
"(",
"cls",
",",
"response",
",",
"content_type",
"=",
"JSON_CONTENT_TYPE",
")",
":",
"def",
"_got_failure",
"(",
"f",
")",
":",
"f",
".",
"trap",
"(",
"ValueError",
")",
"return",
"None",
"def",
"_got_json",
"(",
"jobj",
")",
":",
"if",
"400",
"<=",
"response",
".",
"code",
"<",
"600",
":",
"if",
"response_ct",
"==",
"JSON_ERROR_CONTENT_TYPE",
"and",
"jobj",
"is",
"not",
"None",
":",
"raise",
"ServerError",
"(",
"messages",
".",
"Error",
".",
"from_json",
"(",
"jobj",
")",
",",
"response",
")",
"else",
":",
"# response is not JSON object",
"raise",
"errors",
".",
"ClientError",
"(",
"response",
")",
"elif",
"response_ct",
"!=",
"content_type",
":",
"raise",
"errors",
".",
"ClientError",
"(",
"'Unexpected response Content-Type: {0!r}'",
".",
"format",
"(",
"response_ct",
")",
")",
"elif",
"content_type",
"==",
"JSON_CONTENT_TYPE",
"and",
"jobj",
"is",
"None",
":",
"raise",
"errors",
".",
"ClientError",
"(",
"response",
")",
"return",
"response",
"response_ct",
"=",
"response",
".",
"headers",
".",
"getRawHeaders",
"(",
"b'Content-Type'",
",",
"[",
"None",
"]",
")",
"[",
"0",
"]",
"action",
"=",
"LOG_JWS_CHECK_RESPONSE",
"(",
"expected_content_type",
"=",
"content_type",
",",
"response_content_type",
"=",
"response_ct",
")",
"with",
"action",
".",
"context",
"(",
")",
":",
"# TODO: response.json() is called twice, once here, and",
"# once in _get and _post clients",
"return",
"(",
"DeferredContext",
"(",
"response",
".",
"json",
"(",
")",
")",
".",
"addErrback",
"(",
"_got_failure",
")",
".",
"addCallback",
"(",
"_got_json",
")",
".",
"addActionFinish",
"(",
")",
")"
] |
Check response content and its type.
.. note::
Unlike :mod:`acme.client`, checking is strict.
:param bytes content_type: Expected Content-Type response header. If
the response Content-Type does not match, :exc:`ClientError` is
raised.
:raises .ServerError: If server response body carries HTTP Problem
(draft-ietf-appsawg-http-problem-00).
:raises ~acme.errors.ClientError: In case of other networking errors.
|
[
"Check",
"response",
"content",
"and",
"its",
"type",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L700-L748
|
train
|
twisted/txacme
|
src/txacme/client.py
|
JWSClient._send_request
|
def _send_request(self, method, url, *args, **kwargs):
"""
Send HTTP request.
:param str method: The HTTP method to use.
:param str url: The URL to make the request to.
:return: Deferred firing with the HTTP response.
"""
action = LOG_JWS_REQUEST(url=url)
with action.context():
headers = kwargs.setdefault('headers', Headers())
headers.setRawHeaders(b'user-agent', [self._user_agent])
kwargs.setdefault('timeout', self.timeout)
return (
DeferredContext(
self._treq.request(method, url, *args, **kwargs))
.addCallback(
tap(lambda r: action.add_success_fields(
code=r.code,
content_type=r.headers.getRawHeaders(
b'content-type', [None])[0])))
.addActionFinish())
|
python
|
def _send_request(self, method, url, *args, **kwargs):
"""
Send HTTP request.
:param str method: The HTTP method to use.
:param str url: The URL to make the request to.
:return: Deferred firing with the HTTP response.
"""
action = LOG_JWS_REQUEST(url=url)
with action.context():
headers = kwargs.setdefault('headers', Headers())
headers.setRawHeaders(b'user-agent', [self._user_agent])
kwargs.setdefault('timeout', self.timeout)
return (
DeferredContext(
self._treq.request(method, url, *args, **kwargs))
.addCallback(
tap(lambda r: action.add_success_fields(
code=r.code,
content_type=r.headers.getRawHeaders(
b'content-type', [None])[0])))
.addActionFinish())
|
[
"def",
"_send_request",
"(",
"self",
",",
"method",
",",
"url",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"action",
"=",
"LOG_JWS_REQUEST",
"(",
"url",
"=",
"url",
")",
"with",
"action",
".",
"context",
"(",
")",
":",
"headers",
"=",
"kwargs",
".",
"setdefault",
"(",
"'headers'",
",",
"Headers",
"(",
")",
")",
"headers",
".",
"setRawHeaders",
"(",
"b'user-agent'",
",",
"[",
"self",
".",
"_user_agent",
"]",
")",
"kwargs",
".",
"setdefault",
"(",
"'timeout'",
",",
"self",
".",
"timeout",
")",
"return",
"(",
"DeferredContext",
"(",
"self",
".",
"_treq",
".",
"request",
"(",
"method",
",",
"url",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
".",
"addCallback",
"(",
"tap",
"(",
"lambda",
"r",
":",
"action",
".",
"add_success_fields",
"(",
"code",
"=",
"r",
".",
"code",
",",
"content_type",
"=",
"r",
".",
"headers",
".",
"getRawHeaders",
"(",
"b'content-type'",
",",
"[",
"None",
"]",
")",
"[",
"0",
"]",
")",
")",
")",
".",
"addActionFinish",
"(",
")",
")"
] |
Send HTTP request.
:param str method: The HTTP method to use.
:param str url: The URL to make the request to.
:return: Deferred firing with the HTTP response.
|
[
"Send",
"HTTP",
"request",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L750-L772
|
train
|
twisted/txacme
|
src/txacme/client.py
|
JWSClient.head
|
def head(self, url, *args, **kwargs):
"""
Send HEAD request without checking the response.
Note that ``_check_response`` is not called, as there will be no
response body to check.
:param str url: The URL to make the request to.
"""
with LOG_JWS_HEAD().context():
return DeferredContext(
self._send_request(u'HEAD', url, *args, **kwargs)
).addActionFinish()
|
python
|
def head(self, url, *args, **kwargs):
"""
Send HEAD request without checking the response.
Note that ``_check_response`` is not called, as there will be no
response body to check.
:param str url: The URL to make the request to.
"""
with LOG_JWS_HEAD().context():
return DeferredContext(
self._send_request(u'HEAD', url, *args, **kwargs)
).addActionFinish()
|
[
"def",
"head",
"(",
"self",
",",
"url",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"LOG_JWS_HEAD",
"(",
")",
".",
"context",
"(",
")",
":",
"return",
"DeferredContext",
"(",
"self",
".",
"_send_request",
"(",
"u'HEAD'",
",",
"url",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
".",
"addActionFinish",
"(",
")"
] |
Send HEAD request without checking the response.
Note that ``_check_response`` is not called, as there will be no
response body to check.
:param str url: The URL to make the request to.
|
[
"Send",
"HEAD",
"request",
"without",
"checking",
"the",
"response",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L774-L786
|
train
|
twisted/txacme
|
src/txacme/client.py
|
JWSClient.get
|
def get(self, url, content_type=JSON_CONTENT_TYPE, **kwargs):
"""
Send GET request and check response.
:param str method: The HTTP method to use.
:param str url: The URL to make the request to.
:raises txacme.client.ServerError: If server response body carries HTTP
Problem (draft-ietf-appsawg-http-problem-00).
:raises acme.errors.ClientError: In case of other protocol errors.
:return: Deferred firing with the checked HTTP response.
"""
with LOG_JWS_GET().context():
return (
DeferredContext(self._send_request(u'GET', url, **kwargs))
.addCallback(self._check_response, content_type=content_type)
.addActionFinish())
|
python
|
def get(self, url, content_type=JSON_CONTENT_TYPE, **kwargs):
"""
Send GET request and check response.
:param str method: The HTTP method to use.
:param str url: The URL to make the request to.
:raises txacme.client.ServerError: If server response body carries HTTP
Problem (draft-ietf-appsawg-http-problem-00).
:raises acme.errors.ClientError: In case of other protocol errors.
:return: Deferred firing with the checked HTTP response.
"""
with LOG_JWS_GET().context():
return (
DeferredContext(self._send_request(u'GET', url, **kwargs))
.addCallback(self._check_response, content_type=content_type)
.addActionFinish())
|
[
"def",
"get",
"(",
"self",
",",
"url",
",",
"content_type",
"=",
"JSON_CONTENT_TYPE",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"LOG_JWS_GET",
"(",
")",
".",
"context",
"(",
")",
":",
"return",
"(",
"DeferredContext",
"(",
"self",
".",
"_send_request",
"(",
"u'GET'",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
")",
".",
"addCallback",
"(",
"self",
".",
"_check_response",
",",
"content_type",
"=",
"content_type",
")",
".",
"addActionFinish",
"(",
")",
")"
] |
Send GET request and check response.
:param str method: The HTTP method to use.
:param str url: The URL to make the request to.
:raises txacme.client.ServerError: If server response body carries HTTP
Problem (draft-ietf-appsawg-http-problem-00).
:raises acme.errors.ClientError: In case of other protocol errors.
:return: Deferred firing with the checked HTTP response.
|
[
"Send",
"GET",
"request",
"and",
"check",
"response",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L788-L805
|
train
|
twisted/txacme
|
src/txacme/client.py
|
JWSClient._add_nonce
|
def _add_nonce(self, response):
"""
Store a nonce from a response we received.
:param twisted.web.iweb.IResponse response: The HTTP response.
:return: The response, unmodified.
"""
nonce = response.headers.getRawHeaders(
REPLAY_NONCE_HEADER, [None])[0]
with LOG_JWS_ADD_NONCE(raw_nonce=nonce) as action:
if nonce is None:
raise errors.MissingNonce(response)
else:
try:
decoded_nonce = Header._fields['nonce'].decode(
nonce.decode('ascii')
)
action.add_success_fields(nonce=decoded_nonce)
except DeserializationError as error:
raise errors.BadNonce(nonce, error)
self._nonces.add(decoded_nonce)
return response
|
python
|
def _add_nonce(self, response):
"""
Store a nonce from a response we received.
:param twisted.web.iweb.IResponse response: The HTTP response.
:return: The response, unmodified.
"""
nonce = response.headers.getRawHeaders(
REPLAY_NONCE_HEADER, [None])[0]
with LOG_JWS_ADD_NONCE(raw_nonce=nonce) as action:
if nonce is None:
raise errors.MissingNonce(response)
else:
try:
decoded_nonce = Header._fields['nonce'].decode(
nonce.decode('ascii')
)
action.add_success_fields(nonce=decoded_nonce)
except DeserializationError as error:
raise errors.BadNonce(nonce, error)
self._nonces.add(decoded_nonce)
return response
|
[
"def",
"_add_nonce",
"(",
"self",
",",
"response",
")",
":",
"nonce",
"=",
"response",
".",
"headers",
".",
"getRawHeaders",
"(",
"REPLAY_NONCE_HEADER",
",",
"[",
"None",
"]",
")",
"[",
"0",
"]",
"with",
"LOG_JWS_ADD_NONCE",
"(",
"raw_nonce",
"=",
"nonce",
")",
"as",
"action",
":",
"if",
"nonce",
"is",
"None",
":",
"raise",
"errors",
".",
"MissingNonce",
"(",
"response",
")",
"else",
":",
"try",
":",
"decoded_nonce",
"=",
"Header",
".",
"_fields",
"[",
"'nonce'",
"]",
".",
"decode",
"(",
"nonce",
".",
"decode",
"(",
"'ascii'",
")",
")",
"action",
".",
"add_success_fields",
"(",
"nonce",
"=",
"decoded_nonce",
")",
"except",
"DeserializationError",
"as",
"error",
":",
"raise",
"errors",
".",
"BadNonce",
"(",
"nonce",
",",
"error",
")",
"self",
".",
"_nonces",
".",
"add",
"(",
"decoded_nonce",
")",
"return",
"response"
] |
Store a nonce from a response we received.
:param twisted.web.iweb.IResponse response: The HTTP response.
:return: The response, unmodified.
|
[
"Store",
"a",
"nonce",
"from",
"a",
"response",
"we",
"received",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L807-L829
|
train
|
twisted/txacme
|
src/txacme/client.py
|
JWSClient._get_nonce
|
def _get_nonce(self, url):
"""
Get a nonce to use in a request, removing it from the nonces on hand.
"""
action = LOG_JWS_GET_NONCE()
if len(self._nonces) > 0:
with action:
nonce = self._nonces.pop()
action.add_success_fields(nonce=nonce)
return succeed(nonce)
else:
with action.context():
return (
DeferredContext(self.head(url))
.addCallback(self._add_nonce)
.addCallback(lambda _: self._nonces.pop())
.addCallback(tap(
lambda nonce: action.add_success_fields(nonce=nonce)))
.addActionFinish())
|
python
|
def _get_nonce(self, url):
"""
Get a nonce to use in a request, removing it from the nonces on hand.
"""
action = LOG_JWS_GET_NONCE()
if len(self._nonces) > 0:
with action:
nonce = self._nonces.pop()
action.add_success_fields(nonce=nonce)
return succeed(nonce)
else:
with action.context():
return (
DeferredContext(self.head(url))
.addCallback(self._add_nonce)
.addCallback(lambda _: self._nonces.pop())
.addCallback(tap(
lambda nonce: action.add_success_fields(nonce=nonce)))
.addActionFinish())
|
[
"def",
"_get_nonce",
"(",
"self",
",",
"url",
")",
":",
"action",
"=",
"LOG_JWS_GET_NONCE",
"(",
")",
"if",
"len",
"(",
"self",
".",
"_nonces",
")",
">",
"0",
":",
"with",
"action",
":",
"nonce",
"=",
"self",
".",
"_nonces",
".",
"pop",
"(",
")",
"action",
".",
"add_success_fields",
"(",
"nonce",
"=",
"nonce",
")",
"return",
"succeed",
"(",
"nonce",
")",
"else",
":",
"with",
"action",
".",
"context",
"(",
")",
":",
"return",
"(",
"DeferredContext",
"(",
"self",
".",
"head",
"(",
"url",
")",
")",
".",
"addCallback",
"(",
"self",
".",
"_add_nonce",
")",
".",
"addCallback",
"(",
"lambda",
"_",
":",
"self",
".",
"_nonces",
".",
"pop",
"(",
")",
")",
".",
"addCallback",
"(",
"tap",
"(",
"lambda",
"nonce",
":",
"action",
".",
"add_success_fields",
"(",
"nonce",
"=",
"nonce",
")",
")",
")",
".",
"addActionFinish",
"(",
")",
")"
] |
Get a nonce to use in a request, removing it from the nonces on hand.
|
[
"Get",
"a",
"nonce",
"to",
"use",
"in",
"a",
"request",
"removing",
"it",
"from",
"the",
"nonces",
"on",
"hand",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L831-L849
|
train
|
twisted/txacme
|
src/txacme/client.py
|
JWSClient._post
|
def _post(self, url, obj, content_type, **kwargs):
"""
POST an object and check the response.
:param str url: The URL to request.
:param ~josepy.interfaces.JSONDeSerializable obj: The serializable
payload of the request.
:param bytes content_type: The expected content type of the response.
:raises txacme.client.ServerError: If server response body carries HTTP
Problem (draft-ietf-appsawg-http-problem-00).
:raises acme.errors.ClientError: In case of other protocol errors.
"""
with LOG_JWS_POST().context():
headers = kwargs.setdefault('headers', Headers())
headers.setRawHeaders(b'content-type', [JSON_CONTENT_TYPE])
return (
DeferredContext(self._get_nonce(url))
.addCallback(self._wrap_in_jws, obj)
.addCallback(
lambda data: self._send_request(
u'POST', url, data=data, **kwargs))
.addCallback(self._add_nonce)
.addCallback(self._check_response, content_type=content_type)
.addActionFinish())
|
python
|
def _post(self, url, obj, content_type, **kwargs):
"""
POST an object and check the response.
:param str url: The URL to request.
:param ~josepy.interfaces.JSONDeSerializable obj: The serializable
payload of the request.
:param bytes content_type: The expected content type of the response.
:raises txacme.client.ServerError: If server response body carries HTTP
Problem (draft-ietf-appsawg-http-problem-00).
:raises acme.errors.ClientError: In case of other protocol errors.
"""
with LOG_JWS_POST().context():
headers = kwargs.setdefault('headers', Headers())
headers.setRawHeaders(b'content-type', [JSON_CONTENT_TYPE])
return (
DeferredContext(self._get_nonce(url))
.addCallback(self._wrap_in_jws, obj)
.addCallback(
lambda data: self._send_request(
u'POST', url, data=data, **kwargs))
.addCallback(self._add_nonce)
.addCallback(self._check_response, content_type=content_type)
.addActionFinish())
|
[
"def",
"_post",
"(",
"self",
",",
"url",
",",
"obj",
",",
"content_type",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"LOG_JWS_POST",
"(",
")",
".",
"context",
"(",
")",
":",
"headers",
"=",
"kwargs",
".",
"setdefault",
"(",
"'headers'",
",",
"Headers",
"(",
")",
")",
"headers",
".",
"setRawHeaders",
"(",
"b'content-type'",
",",
"[",
"JSON_CONTENT_TYPE",
"]",
")",
"return",
"(",
"DeferredContext",
"(",
"self",
".",
"_get_nonce",
"(",
"url",
")",
")",
".",
"addCallback",
"(",
"self",
".",
"_wrap_in_jws",
",",
"obj",
")",
".",
"addCallback",
"(",
"lambda",
"data",
":",
"self",
".",
"_send_request",
"(",
"u'POST'",
",",
"url",
",",
"data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")",
")",
".",
"addCallback",
"(",
"self",
".",
"_add_nonce",
")",
".",
"addCallback",
"(",
"self",
".",
"_check_response",
",",
"content_type",
"=",
"content_type",
")",
".",
"addActionFinish",
"(",
")",
")"
] |
POST an object and check the response.
:param str url: The URL to request.
:param ~josepy.interfaces.JSONDeSerializable obj: The serializable
payload of the request.
:param bytes content_type: The expected content type of the response.
:raises txacme.client.ServerError: If server response body carries HTTP
Problem (draft-ietf-appsawg-http-problem-00).
:raises acme.errors.ClientError: In case of other protocol errors.
|
[
"POST",
"an",
"object",
"and",
"check",
"the",
"response",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L851-L875
|
train
|
twisted/txacme
|
src/txacme/client.py
|
JWSClient.post
|
def post(self, url, obj, content_type=JSON_CONTENT_TYPE, **kwargs):
"""
POST an object and check the response. Retry once if a badNonce error
is received.
:param str url: The URL to request.
:param ~josepy.interfaces.JSONDeSerializable obj: The serializable
payload of the request.
:param bytes content_type: The expected content type of the response.
By default, JSON.
:raises txacme.client.ServerError: If server response body carries HTTP
Problem (draft-ietf-appsawg-http-problem-00).
:raises acme.errors.ClientError: In case of other protocol errors.
"""
def retry_bad_nonce(f):
f.trap(ServerError)
# The current RFC draft defines the namespace as
# urn:ietf:params:acme:error:<code>, but earlier drafts (and some
# current implementations) use urn:acme:error:<code> instead. We
# don't really care about the namespace here, just the error code.
if f.value.message.typ.split(':')[-1] == 'badNonce':
# If one nonce is bad, others likely are too. Let's clear them
# and re-add the one we just got.
self._nonces.clear()
self._add_nonce(f.value.response)
return self._post(url, obj, content_type, **kwargs)
return f
return (
self._post(url, obj, content_type, **kwargs)
.addErrback(retry_bad_nonce))
|
python
|
def post(self, url, obj, content_type=JSON_CONTENT_TYPE, **kwargs):
"""
POST an object and check the response. Retry once if a badNonce error
is received.
:param str url: The URL to request.
:param ~josepy.interfaces.JSONDeSerializable obj: The serializable
payload of the request.
:param bytes content_type: The expected content type of the response.
By default, JSON.
:raises txacme.client.ServerError: If server response body carries HTTP
Problem (draft-ietf-appsawg-http-problem-00).
:raises acme.errors.ClientError: In case of other protocol errors.
"""
def retry_bad_nonce(f):
f.trap(ServerError)
# The current RFC draft defines the namespace as
# urn:ietf:params:acme:error:<code>, but earlier drafts (and some
# current implementations) use urn:acme:error:<code> instead. We
# don't really care about the namespace here, just the error code.
if f.value.message.typ.split(':')[-1] == 'badNonce':
# If one nonce is bad, others likely are too. Let's clear them
# and re-add the one we just got.
self._nonces.clear()
self._add_nonce(f.value.response)
return self._post(url, obj, content_type, **kwargs)
return f
return (
self._post(url, obj, content_type, **kwargs)
.addErrback(retry_bad_nonce))
|
[
"def",
"post",
"(",
"self",
",",
"url",
",",
"obj",
",",
"content_type",
"=",
"JSON_CONTENT_TYPE",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"retry_bad_nonce",
"(",
"f",
")",
":",
"f",
".",
"trap",
"(",
"ServerError",
")",
"# The current RFC draft defines the namespace as",
"# urn:ietf:params:acme:error:<code>, but earlier drafts (and some",
"# current implementations) use urn:acme:error:<code> instead. We",
"# don't really care about the namespace here, just the error code.",
"if",
"f",
".",
"value",
".",
"message",
".",
"typ",
".",
"split",
"(",
"':'",
")",
"[",
"-",
"1",
"]",
"==",
"'badNonce'",
":",
"# If one nonce is bad, others likely are too. Let's clear them",
"# and re-add the one we just got.",
"self",
".",
"_nonces",
".",
"clear",
"(",
")",
"self",
".",
"_add_nonce",
"(",
"f",
".",
"value",
".",
"response",
")",
"return",
"self",
".",
"_post",
"(",
"url",
",",
"obj",
",",
"content_type",
",",
"*",
"*",
"kwargs",
")",
"return",
"f",
"return",
"(",
"self",
".",
"_post",
"(",
"url",
",",
"obj",
",",
"content_type",
",",
"*",
"*",
"kwargs",
")",
".",
"addErrback",
"(",
"retry_bad_nonce",
")",
")"
] |
POST an object and check the response. Retry once if a badNonce error
is received.
:param str url: The URL to request.
:param ~josepy.interfaces.JSONDeSerializable obj: The serializable
payload of the request.
:param bytes content_type: The expected content type of the response.
By default, JSON.
:raises txacme.client.ServerError: If server response body carries HTTP
Problem (draft-ietf-appsawg-http-problem-00).
:raises acme.errors.ClientError: In case of other protocol errors.
|
[
"POST",
"an",
"object",
"and",
"check",
"the",
"response",
".",
"Retry",
"once",
"if",
"a",
"badNonce",
"error",
"is",
"received",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L877-L907
|
train
|
twisted/txacme
|
src/txacme/challenges/_libcloud.py
|
_daemon_thread
|
def _daemon_thread(*a, **kw):
"""
Create a `threading.Thread`, but always set ``daemon``.
"""
thread = Thread(*a, **kw)
thread.daemon = True
return thread
|
python
|
def _daemon_thread(*a, **kw):
"""
Create a `threading.Thread`, but always set ``daemon``.
"""
thread = Thread(*a, **kw)
thread.daemon = True
return thread
|
[
"def",
"_daemon_thread",
"(",
"*",
"a",
",",
"*",
"*",
"kw",
")",
":",
"thread",
"=",
"Thread",
"(",
"*",
"a",
",",
"*",
"*",
"kw",
")",
"thread",
".",
"daemon",
"=",
"True",
"return",
"thread"
] |
Create a `threading.Thread`, but always set ``daemon``.
|
[
"Create",
"a",
"threading",
".",
"Thread",
"but",
"always",
"set",
"daemon",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/challenges/_libcloud.py#L18-L24
|
train
|
twisted/txacme
|
src/txacme/challenges/_libcloud.py
|
_defer_to_worker
|
def _defer_to_worker(deliver, worker, work, *args, **kwargs):
"""
Run a task in a worker, delivering the result as a ``Deferred`` in the
reactor thread.
"""
deferred = Deferred()
def wrapped_work():
try:
result = work(*args, **kwargs)
except BaseException:
f = Failure()
deliver(lambda: deferred.errback(f))
else:
deliver(lambda: deferred.callback(result))
worker.do(wrapped_work)
return deferred
|
python
|
def _defer_to_worker(deliver, worker, work, *args, **kwargs):
"""
Run a task in a worker, delivering the result as a ``Deferred`` in the
reactor thread.
"""
deferred = Deferred()
def wrapped_work():
try:
result = work(*args, **kwargs)
except BaseException:
f = Failure()
deliver(lambda: deferred.errback(f))
else:
deliver(lambda: deferred.callback(result))
worker.do(wrapped_work)
return deferred
|
[
"def",
"_defer_to_worker",
"(",
"deliver",
",",
"worker",
",",
"work",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"deferred",
"=",
"Deferred",
"(",
")",
"def",
"wrapped_work",
"(",
")",
":",
"try",
":",
"result",
"=",
"work",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"BaseException",
":",
"f",
"=",
"Failure",
"(",
")",
"deliver",
"(",
"lambda",
":",
"deferred",
".",
"errback",
"(",
"f",
")",
")",
"else",
":",
"deliver",
"(",
"lambda",
":",
"deferred",
".",
"callback",
"(",
"result",
")",
")",
"worker",
".",
"do",
"(",
"wrapped_work",
")",
"return",
"deferred"
] |
Run a task in a worker, delivering the result as a ``Deferred`` in the
reactor thread.
|
[
"Run",
"a",
"task",
"in",
"a",
"worker",
"delivering",
"the",
"result",
"as",
"a",
"Deferred",
"in",
"the",
"reactor",
"thread",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/challenges/_libcloud.py#L27-L43
|
train
|
twisted/txacme
|
src/txacme/challenges/_libcloud.py
|
_split_zone
|
def _split_zone(server_name, zone_name):
"""
Split the zone portion off from a DNS label.
:param str server_name: The full DNS label.
:param str zone_name: The zone name suffix.
"""
server_name = server_name.rstrip(u'.')
zone_name = zone_name.rstrip(u'.')
if not (server_name == zone_name or
server_name.endswith(u'.' + zone_name)):
raise NotInZone(server_name=server_name, zone_name=zone_name)
return server_name[:-len(zone_name)].rstrip(u'.')
|
python
|
def _split_zone(server_name, zone_name):
"""
Split the zone portion off from a DNS label.
:param str server_name: The full DNS label.
:param str zone_name: The zone name suffix.
"""
server_name = server_name.rstrip(u'.')
zone_name = zone_name.rstrip(u'.')
if not (server_name == zone_name or
server_name.endswith(u'.' + zone_name)):
raise NotInZone(server_name=server_name, zone_name=zone_name)
return server_name[:-len(zone_name)].rstrip(u'.')
|
[
"def",
"_split_zone",
"(",
"server_name",
",",
"zone_name",
")",
":",
"server_name",
"=",
"server_name",
".",
"rstrip",
"(",
"u'.'",
")",
"zone_name",
"=",
"zone_name",
".",
"rstrip",
"(",
"u'.'",
")",
"if",
"not",
"(",
"server_name",
"==",
"zone_name",
"or",
"server_name",
".",
"endswith",
"(",
"u'.'",
"+",
"zone_name",
")",
")",
":",
"raise",
"NotInZone",
"(",
"server_name",
"=",
"server_name",
",",
"zone_name",
"=",
"zone_name",
")",
"return",
"server_name",
"[",
":",
"-",
"len",
"(",
"zone_name",
")",
"]",
".",
"rstrip",
"(",
"u'.'",
")"
] |
Split the zone portion off from a DNS label.
:param str server_name: The full DNS label.
:param str zone_name: The zone name suffix.
|
[
"Split",
"the",
"zone",
"portion",
"off",
"from",
"a",
"DNS",
"label",
"."
] |
9478381cc63c6d53d14bf8db8407c923f472989a
|
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/challenges/_libcloud.py#L46-L58
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.