text
stringlengths
81
112k
Load Kallisto gene quantification given a patient Parameters ---------- patient : Patient Returns ------- data: Pandas dataframe Pandas dataframe of sample's Kallisto data columns include patient_id, target_id, length, eff_length, est_counts, tpm def _load_single_patient_kallisto(self, patient): """ Load Kallisto gene quantification given a patient Parameters ---------- patient : Patient Returns ------- data: Pandas dataframe Pandas dataframe of sample's Kallisto data columns include patient_id, target_id, length, eff_length, est_counts, tpm """ data = pd.read_csv(patient.tumor_sample.kallisto_path, sep="\t") data["patient_id"] = patient.id return data
Load a Cufflinks gene expression data for a cohort Parameters ---------- filter_ok : bool, optional If true, filter Cufflinks data to row with FPKM_status == "OK" Returns ------- cufflinks_data : Pandas dataframe Pandas dataframe with Cufflinks data for all patients columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi def load_cufflinks(self, filter_ok=True): """ Load a Cufflinks gene expression data for a cohort Parameters ---------- filter_ok : bool, optional If true, filter Cufflinks data to row with FPKM_status == "OK" Returns ------- cufflinks_data : Pandas dataframe Pandas dataframe with Cufflinks data for all patients columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi """ return \ pd.concat( [self._load_single_patient_cufflinks(patient, filter_ok) for patient in self], copy=False )
Load Cufflinks gene quantification given a patient Parameters ---------- patient : Patient filter_ok : bool, optional If true, filter Cufflinks data to row with FPKM_status == "OK" Returns ------- data: Pandas dataframe Pandas dataframe of sample's Cufflinks data columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi def _load_single_patient_cufflinks(self, patient, filter_ok): """ Load Cufflinks gene quantification given a patient Parameters ---------- patient : Patient filter_ok : bool, optional If true, filter Cufflinks data to row with FPKM_status == "OK" Returns ------- data: Pandas dataframe Pandas dataframe of sample's Cufflinks data columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi """ data = pd.read_csv(patient.tumor_sample.cufflinks_path, sep="\t") data["patient_id"] = patient.id if filter_ok: # Filter to OK FPKM counts data = data[data["FPKM_status"] == "OK"] return data
Mostly replicates topiary.build_epitope_collection_from_binding_predictions Note: topiary needs to do fancy stuff like subsequence_protein_offset + binding_prediction.offset in order to figure out whether a variant is in the peptide because it only has the variant's offset into the full protein; but isovar gives us the variant's offset into the protein subsequence (dictated by protein_sequence_length); so all we need to do is map that onto the smaller 8-11mer peptides generated by mhctools. def get_filtered_isovar_epitopes(self, epitopes, ic50_cutoff): """ Mostly replicates topiary.build_epitope_collection_from_binding_predictions Note: topiary needs to do fancy stuff like subsequence_protein_offset + binding_prediction.offset in order to figure out whether a variant is in the peptide because it only has the variant's offset into the full protein; but isovar gives us the variant's offset into the protein subsequence (dictated by protein_sequence_length); so all we need to do is map that onto the smaller 8-11mer peptides generated by mhctools. """ mutant_binding_predictions = [] for binding_prediction in epitopes: peptide = binding_prediction.peptide peptide_offset = binding_prediction.offset isovar_row = dict(binding_prediction.source_sequence_key) is_mutant = contains_mutant_residues( peptide_start_in_protein=peptide_offset, peptide_length=len(peptide), mutation_start_in_protein=isovar_row["variant_aa_interval_start"], mutation_end_in_protein=isovar_row["variant_aa_interval_end"]) if is_mutant and binding_prediction.value <= ic50_cutoff: mutant_binding_predictions.append(binding_prediction) return EpitopeCollection(mutant_binding_predictions)
Plot an ROC curve for benefit and a given variable Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` bootstrap_samples : int, optional Number of boostrap samples to use to compute the AUC ax : Axes, default None Axes to plot on Returns ------- (mean_auc_score, plot): (float, matplotlib plot) Returns the average AUC for the given predictor over `bootstrap_samples` and the associated ROC curve def plot_roc_curve(self, on, bootstrap_samples=100, ax=None, **kwargs): """Plot an ROC curve for benefit and a given variable Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` bootstrap_samples : int, optional Number of boostrap samples to use to compute the AUC ax : Axes, default None Axes to plot on Returns ------- (mean_auc_score, plot): (float, matplotlib plot) Returns the average AUC for the given predictor over `bootstrap_samples` and the associated ROC curve """ plot_col, df = self.as_dataframe(on, return_cols=True, **kwargs) df = filter_not_null(df, "benefit") df = filter_not_null(df, plot_col) df.benefit = df.benefit.astype(bool) return roc_curve_plot(df, plot_col, "benefit", bootstrap_samples, ax=ax)
Plot a comparison of benefit/response in the cohort on a given variable def plot_benefit(self, on, benefit_col="benefit", label="Response", ax=None, alternative="two-sided", boolean_value_map={}, order=None, **kwargs): """Plot a comparison of benefit/response in the cohort on a given variable """ no_benefit_plot_name = "No %s" % self.benefit_plot_name boolean_value_map = boolean_value_map or {True: self.benefit_plot_name, False: no_benefit_plot_name} order = order or [no_benefit_plot_name, self.benefit_plot_name] return self.plot_boolean(on=on, boolean_col=benefit_col, alternative=alternative, boolean_label=label, boolean_value_map=boolean_value_map, order=order, ax=ax, **kwargs)
Plot a comparison of `boolean_col` in the cohort on a given variable via `on` or `col`. If the variable (through `on` or `col`) is binary this will compare odds-ratios and perform a Fisher's exact test. If the variable is numeric, this will compare the distributions through a Mann-Whitney test and plot the distributions with box-strip plot Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` plot_col : str, optional If on has many columns, this is the one whose values we are plotting. If on has a single column, this is unnecessary. We might want many columns if, e.g. we're generating boolean_col from a function as well. boolean_col : str Column name of boolean column to plot or compare against. boolean_label : None, optional Label to give boolean column in the plot boolean_value_map : dict, optional Map of conversions for values in the boolean column, i.e. {True: 'High', False: 'Low'} order : None, optional Order of the labels on the x-axis ax : None, optional Axes to plot on alternative : str, optional Choose the sidedness of the mannwhitneyu or Fisher's Exact test. Returns ------- (Test statistic, p-value): (float, float) def plot_boolean(self, on, boolean_col, plot_col=None, boolean_label=None, boolean_value_map={}, order=None, ax=None, alternative="two-sided", **kwargs): """Plot a comparison of `boolean_col` in the cohort on a given variable via `on` or `col`. If the variable (through `on` or `col`) is binary this will compare odds-ratios and perform a Fisher's exact test. If the variable is numeric, this will compare the distributions through a Mann-Whitney test and plot the distributions with box-strip plot Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` plot_col : str, optional If on has many columns, this is the one whose values we are plotting. If on has a single column, this is unnecessary. We might want many columns if, e.g. we're generating boolean_col from a function as well. boolean_col : str Column name of boolean column to plot or compare against. boolean_label : None, optional Label to give boolean column in the plot boolean_value_map : dict, optional Map of conversions for values in the boolean column, i.e. {True: 'High', False: 'Low'} order : None, optional Order of the labels on the x-axis ax : None, optional Axes to plot on alternative : str, optional Choose the sidedness of the mannwhitneyu or Fisher's Exact test. Returns ------- (Test statistic, p-value): (float, float) """ cols, df = self.as_dataframe(on, return_cols=True, **kwargs) plot_col = self.plot_col_from_cols(cols=cols, plot_col=plot_col) df = filter_not_null(df, boolean_col) df = filter_not_null(df, plot_col) if boolean_label: df[boolean_label] = df[boolean_col] boolean_col = boolean_label condition_value = None if boolean_value_map: assert set(boolean_value_map.keys()) == set([True, False]), \ "Improper mapping of boolean column provided" df[boolean_col] = df[boolean_col].map(lambda v: boolean_value_map[v]) condition_value = boolean_value_map[True] if df[plot_col].dtype == "bool": results = fishers_exact_plot( data=df, condition1=boolean_col, condition2=plot_col, condition1_value=condition_value, alternative=alternative, order=order, ax=ax) else: results = mann_whitney_plot( data=df, condition=boolean_col, distribution=plot_col, condition_value=condition_value, alternative=alternative, order=order, ax=ax) return results
Plot a Kaplan Meier survival curve by splitting the cohort into two groups Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` how : {"os", "pfs"}, optional Whether to plot OS (overall survival) or PFS (progression free survival) survival_units : str Unit of time for the survival measure, i.e. Days or Months strata : str (optional) column name of stratifying variable ci_show : bool Display the confidence interval around the survival curve threshold : int, "median", "median-per-strata" or None (optional) Threshold of `col` on which to split the cohort def plot_survival(self, on, how="os", survival_units="Days", strata=None, ax=None, ci_show=False, with_condition_color="#B38600", no_condition_color="#A941AC", with_condition_label=None, no_condition_label=None, color_map=None, label_map=None, color_palette="Set2", threshold=None, **kwargs): """Plot a Kaplan Meier survival curve by splitting the cohort into two groups Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` how : {"os", "pfs"}, optional Whether to plot OS (overall survival) or PFS (progression free survival) survival_units : str Unit of time for the survival measure, i.e. Days or Months strata : str (optional) column name of stratifying variable ci_show : bool Display the confidence interval around the survival curve threshold : int, "median", "median-per-strata" or None (optional) Threshold of `col` on which to split the cohort """ assert how in ["os", "pfs"], "Invalid choice of survival plot type %s" % how cols, df = self.as_dataframe(on, return_cols=True, **kwargs) plot_col = self.plot_col_from_cols(cols=cols, only_allow_one=True) df = filter_not_null(df, plot_col) results = plot_kmf( df=df, condition_col=plot_col, xlabel=survival_units, ylabel="Overall Survival (%)" if how == "os" else "Progression-Free Survival (%)", censor_col="deceased" if how == "os" else "progressed_or_deceased", survival_col=how, strata_col=strata, threshold=threshold, ax=ax, ci_show=ci_show, with_condition_color=with_condition_color, no_condition_color=no_condition_color, with_condition_label=with_condition_label, no_condition_label=no_condition_label, color_palette=color_palette, label_map=label_map, color_map=color_map, ) return results
Plot the correlation between two variables. Parameters ---------- on : list or dict of functions or strings See `cohort.load.as_dataframe` x_col : str, optional If `on` is a dict, this guarantees we have the expected ordering. plot_type : str, optional Specify "jointplot", "regplot", "boxplot", or "barplot". stat_func : function, optional. Specify which function to use for the statistical test. show_stat_func : bool, optional Whether or not to show the stat_func result in the plot itself. plot_kwargs : dict, optional kwargs to pass through to plotting functions. def plot_correlation(self, on, x_col=None, plot_type="jointplot", stat_func=pearsonr, show_stat_func=True, plot_kwargs={}, **kwargs): """Plot the correlation between two variables. Parameters ---------- on : list or dict of functions or strings See `cohort.load.as_dataframe` x_col : str, optional If `on` is a dict, this guarantees we have the expected ordering. plot_type : str, optional Specify "jointplot", "regplot", "boxplot", or "barplot". stat_func : function, optional. Specify which function to use for the statistical test. show_stat_func : bool, optional Whether or not to show the stat_func result in the plot itself. plot_kwargs : dict, optional kwargs to pass through to plotting functions. """ if plot_type not in ["boxplot", "barplot", "jointplot", "regplot"]: raise ValueError("Invalid plot_type %s" % plot_type) plot_cols, df = self.as_dataframe(on, return_cols=True, **kwargs) if len(plot_cols) != 2: raise ValueError("Must be comparing two columns, but there are %d columns" % len(plot_cols)) for plot_col in plot_cols: df = filter_not_null(df, plot_col) if x_col is None: x_col = plot_cols[0] y_col = plot_cols[1] else: if x_col == plot_cols[0]: y_col = plot_cols[1] else: y_col = plot_cols[0] series_x = df[x_col] series_y = df[y_col] coeff, p_value = stat_func(series_x, series_y) if plot_type == "jointplot": plot = sb.jointplot(data=df, x=x_col, y=y_col, stat_func=stat_func if show_stat_func else None, **plot_kwargs) elif plot_type == "regplot": plot = sb.regplot(data=df, x=x_col, y=y_col, **plot_kwargs) elif plot_type == "boxplot": plot = stripboxplot(data=df, x=x_col, y=y_col, **plot_kwargs) else: plot = sb.barplot(data=df, x=x_col, y=y_col, **plot_kwargs) return CorrelationResults(coeff=coeff, p_value=p_value, stat_func=stat_func, series_x=series_x, series_y=series_y, plot=plot)
Utility function to return a list of patient ids in the Cohort def _list_patient_ids(self): """ Utility function to return a list of patient ids in the Cohort """ results = [] for patient in self: results.append(patient.id) return(results)
Utility function to summarize provenance files for cached items used by a Cohort, for each cache_dir that exists. Only existing cache_dirs are summarized. This is a summary of provenance files because the function checks to see whether all patients data have the same provenance within the cache dir. The function assumes that it will be desireable to have all patients data generated using the same environment, for each cache type. At the moment, most PROVENANCE files contain details about packages used to generat e the cached data file. However, this function is generic & so it summarizes the contents of those files irrespective of their contents. Returns ---------- Dict containing summarized provenance for each existing cache_dir, after checking to see that provenance files are identical among all patients in the data frame for that cache_dir. If conflicting PROVENANCE files are discovered within a cache-dir: - a warning is generated, describing the conflict - and, a value of `None` is returned in the dictionary for that cache-dir See also ----------- * `?cohorts.Cohort.summarize_provenance` which summarizes provenance files among cache_dirs. * `?cohorts.Cohort.summarize_dataframe` which hashes/summarizes contents of the data frame for this cohort. def summarize_provenance_per_cache(self): """Utility function to summarize provenance files for cached items used by a Cohort, for each cache_dir that exists. Only existing cache_dirs are summarized. This is a summary of provenance files because the function checks to see whether all patients data have the same provenance within the cache dir. The function assumes that it will be desireable to have all patients data generated using the same environment, for each cache type. At the moment, most PROVENANCE files contain details about packages used to generat e the cached data file. However, this function is generic & so it summarizes the contents of those files irrespective of their contents. Returns ---------- Dict containing summarized provenance for each existing cache_dir, after checking to see that provenance files are identical among all patients in the data frame for that cache_dir. If conflicting PROVENANCE files are discovered within a cache-dir: - a warning is generated, describing the conflict - and, a value of `None` is returned in the dictionary for that cache-dir See also ----------- * `?cohorts.Cohort.summarize_provenance` which summarizes provenance files among cache_dirs. * `?cohorts.Cohort.summarize_dataframe` which hashes/summarizes contents of the data frame for this cohort. """ provenance_summary = {} df = self.as_dataframe() for cache in self.cache_names: cache_name = self.cache_names[cache] cache_provenance = None num_discrepant = 0 this_cache_dir = path.join(self.cache_dir, cache_name) if path.exists(this_cache_dir): for patient_id in self._list_patient_ids(): patient_cache_dir = path.join(this_cache_dir, patient_id) try: this_provenance = self.load_provenance(patient_cache_dir = patient_cache_dir) except: this_provenance = None if this_provenance: if not(cache_provenance): cache_provenance = this_provenance else: num_discrepant += compare_provenance(this_provenance, cache_provenance) if num_discrepant == 0: provenance_summary[cache_name] = cache_provenance else: provenance_summary[cache_name] = None return(provenance_summary)
Summarize default dataframe for this cohort using a hash function. Useful for confirming the version of data used in various reports, e.g. ipynbs def summarize_dataframe(self): """Summarize default dataframe for this cohort using a hash function. Useful for confirming the version of data used in various reports, e.g. ipynbs """ if self.dataframe_hash: return(self.dataframe_hash) else: df = self._as_dataframe_unmodified() return(self.dataframe_hash)
Utility function to summarize provenance files for cached items used by a Cohort. At the moment, most PROVENANCE files contain details about packages used to generate files. However, this function is generic & so it summarizes the contents of those files irrespective of their contents. Returns ---------- Dict containing summary of provenance items, among all cache dirs used by the Cohort. IE if all provenances are identical across all cache dirs, then a single set of provenances is returned. Otherwise, if all provenances are not identical, the provenance items per cache_dir are returned. See also ---------- `?cohorts.Cohort.summarize_provenance_per_cache` which is used to summarize provenance for each existing cache_dir. def summarize_provenance(self): """Utility function to summarize provenance files for cached items used by a Cohort. At the moment, most PROVENANCE files contain details about packages used to generate files. However, this function is generic & so it summarizes the contents of those files irrespective of their contents. Returns ---------- Dict containing summary of provenance items, among all cache dirs used by the Cohort. IE if all provenances are identical across all cache dirs, then a single set of provenances is returned. Otherwise, if all provenances are not identical, the provenance items per cache_dir are returned. See also ---------- `?cohorts.Cohort.summarize_provenance_per_cache` which is used to summarize provenance for each existing cache_dir. """ provenance_per_cache = self.summarize_provenance_per_cache() summary_provenance = None num_discrepant = 0 for cache in provenance_per_cache: if not(summary_provenance): ## pick arbitrary provenance & call this the "summary" (for now) summary_provenance = provenance_per_cache[cache] summary_provenance_name = cache ## for each cache, check equivalence with summary_provenance num_discrepant += compare_provenance( provenance_per_cache[cache], summary_provenance, left_outer_diff = "In %s but not in %s" % (cache, summary_provenance_name), right_outer_diff = "In %s but not in %s" % (summary_provenance_name, cache) ) ## compare provenance across cached items if num_discrepant == 0: prov = summary_provenance ## report summary provenance if exists else: prov = provenance_per_cache ## otherwise, return provenance per cache return(prov)
Utility function to summarize data source status for this Cohort, useful for confirming the state of data used for an analysis Returns ---------- Dictionary with summary of data sources Currently contains - dataframe_hash: hash of the dataframe (see `?cohorts.Cohort.summarize_dataframe`) - provenance_file_summary: summary of provenance file contents (see `?cohorts.Cohort.summarize_provenance`) def summarize_data_sources(self): """Utility function to summarize data source status for this Cohort, useful for confirming the state of data used for an analysis Returns ---------- Dictionary with summary of data sources Currently contains - dataframe_hash: hash of the dataframe (see `?cohorts.Cohort.summarize_dataframe`) - provenance_file_summary: summary of provenance file contents (see `?cohorts.Cohort.summarize_provenance`) """ provenance_file_summary = self.summarize_provenance() dataframe_hash = self.summarize_dataframe() results = { "provenance_file_summary": provenance_file_summary, "dataframe_hash": dataframe_hash } return(results)
Parse out the variant calling statistics for a given variant from a Strelka VCF Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of sample to variant calling statistics, corresponds to the sample columns in a Strelka VCF Returns ------- SomaticVariantStats def strelka_somatic_variant_stats(variant, variant_metadata): """Parse out the variant calling statistics for a given variant from a Strelka VCF Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of sample to variant calling statistics, corresponds to the sample columns in a Strelka VCF Returns ------- SomaticVariantStats """ sample_info = variant_metadata["sample_info"] # Ensure there are exactly two samples in the VCF, a tumor and normal assert len(sample_info) == 2, "More than two samples found in the somatic VCF" tumor_stats = _strelka_variant_stats(variant, sample_info["TUMOR"]) normal_stats = _strelka_variant_stats(variant, sample_info["NORMAL"]) return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
Parse a single sample"s variant calling statistics based on Strelka VCF output Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of Strelka-specific variant calling fields Returns ------- VariantStats def _strelka_variant_stats(variant, sample_info): """Parse a single sample"s variant calling statistics based on Strelka VCF output Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of Strelka-specific variant calling fields Returns ------- VariantStats """ if variant.is_deletion or variant.is_insertion: # ref: https://sites.google.com/site/strelkasomaticvariantcaller/home/somatic-variant-output ref_depth = int(sample_info['TAR'][0]) # number of reads supporting ref allele (non-deletion) alt_depth = int(sample_info['TIR'][0]) # number of reads supporting alt allele (deletion) depth = ref_depth + alt_depth else: # Retrieve the Tier 1 counts from Strelka ref_depth = int(sample_info[variant.ref+"U"][0]) alt_depth = int(sample_info[variant.alt+"U"][0]) depth = alt_depth + ref_depth if depth > 0: vaf = float(alt_depth) / depth else: # unclear how to define vaf if no reads support variant # up to user to interpret this (hopefully filtered out in QC settings) vaf = None return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf)
Parse out the variant calling statistics for a given variant from a Mutect VCF Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of sample to variant calling statistics, corresponds to the sample columns in a Mutect VCF Returns ------- SomaticVariantStats def mutect_somatic_variant_stats(variant, variant_metadata): """Parse out the variant calling statistics for a given variant from a Mutect VCF Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of sample to variant calling statistics, corresponds to the sample columns in a Mutect VCF Returns ------- SomaticVariantStats """ sample_info = variant_metadata["sample_info"] # Ensure there are exactly two samples in the VCF, a tumor and normal assert len(sample_info) == 2, "More than two samples found in the somatic VCF" # Find the sample with the genotype field set to variant in the VCF tumor_sample_infos = [info for info in sample_info.values() if info["GT"] == "0/1"] # Ensure there is only one such sample assert len(tumor_sample_infos) == 1, "More than one tumor sample found in the VCF file" tumor_sample_info = tumor_sample_infos[0] normal_sample_info = [info for info in sample_info.values() if info["GT"] != "0/1"][0] tumor_stats = _mutect_variant_stats(variant, tumor_sample_info) normal_stats = _mutect_variant_stats(variant, normal_sample_info) return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
Parse a single sample"s variant calling statistics based on Mutect"s (v1) VCF output Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of Mutect-specific variant calling fields Returns ------- VariantStats def _mutect_variant_stats(variant, sample_info): """Parse a single sample"s variant calling statistics based on Mutect"s (v1) VCF output Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of Mutect-specific variant calling fields Returns ------- VariantStats """ # Parse out the AD (or allele depth field), which is an array of [REF_DEPTH, ALT_DEPTH] ref_depth, alt_depth = sample_info["AD"] depth = int(ref_depth) + int(alt_depth) vaf = float(alt_depth) / depth return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf)
Parse out the variant calling statistics for a given variant from a MAF file Assumes the MAF format described here: https://www.biostars.org/p/161298/#161777 Parameters ---------- variant : varcode.Variant variant_metadata : dict Dictionary of metadata for this variant Returns ------- SomaticVariantStats def maf_somatic_variant_stats(variant, variant_metadata): """ Parse out the variant calling statistics for a given variant from a MAF file Assumes the MAF format described here: https://www.biostars.org/p/161298/#161777 Parameters ---------- variant : varcode.Variant variant_metadata : dict Dictionary of metadata for this variant Returns ------- SomaticVariantStats """ tumor_stats = None normal_stats = None if "t_ref_count" in variant_metadata: tumor_stats = _maf_variant_stats(variant, variant_metadata, prefix="t") if "n_ref_count" in variant_metadata: normal_stats = _maf_variant_stats(variant, variant_metadata, prefix="n") return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
Return True if variant_file given is in strelka format def _vcf_is_strelka(variant_file, variant_metadata): """Return True if variant_file given is in strelka format """ if "strelka" in variant_file.lower(): return True elif "NORMAL" in variant_metadata["sample_info"].keys(): return True else: vcf_reader = vcf.Reader(open(variant_file, "r")) try: vcf_type = vcf_reader.metadata["content"] except KeyError: vcf_type = "" if "strelka" in vcf_type.lower(): return True return False
Parse the variant calling stats from a variant called from multiple variant files. The stats are merged based on `merge_fn` Parameters ---------- variant : varcode.Variant metadata : dict Dictionary of variant file to variant calling metadata from that file merge_fn : function Function from list of SomaticVariantStats to single SomaticVariantStats. This is used if a variant is called by multiple callers or appears in multiple VCFs. By default, this uses the data from the caller that had a higher tumor depth. Returns ------- SomaticVariantStats def variant_stats_from_variant(variant, metadata, merge_fn=(lambda all_stats: \ max(all_stats, key=(lambda stats: stats.tumor_stats.depth)))): """Parse the variant calling stats from a variant called from multiple variant files. The stats are merged based on `merge_fn` Parameters ---------- variant : varcode.Variant metadata : dict Dictionary of variant file to variant calling metadata from that file merge_fn : function Function from list of SomaticVariantStats to single SomaticVariantStats. This is used if a variant is called by multiple callers or appears in multiple VCFs. By default, this uses the data from the caller that had a higher tumor depth. Returns ------- SomaticVariantStats """ all_stats = [] for (variant_file, variant_metadata) in metadata.items(): if _vcf_is_maf(variant_file=variant_file): stats = maf_somatic_variant_stats(variant, variant_metadata) elif _vcf_is_strelka(variant_file=variant_file, variant_metadata=variant_metadata): stats = strelka_somatic_variant_stats(variant, variant_metadata) elif _vcf_is_mutect(variant_file=variant_file, variant_metadata=variant_metadata): stats = mutect_somatic_variant_stats(variant, variant_metadata) else: raise ValueError("Cannot parse sample fields, variant file {} is from an unsupported caller.".format(variant_file)) all_stats.append(stats) return merge_fn(all_stats)
:return: True if it should continue running, False if it should end its execution. def _get_and_execute(self): """ :return: True if it should continue running, False if it should end its execution. """ try: work = self.queue.get(timeout=self.max_seconds_idle) except queue.Empty: # max_seconds_idle has been exhausted, exiting self.end_notify() return False else: self._work(work) self.queue.task_done() return True
:param full_info: If True, adds more info about the chat. Please, note that this additional info requires to make up to THREE synchronous api calls. def format(self, full_info: bool = False): """ :param full_info: If True, adds more info about the chat. Please, note that this additional info requires to make up to THREE synchronous api calls. """ chat = self.api_object if full_info: self.__format_full(chat) else: self.__format_simple(chat)
:rtype: list(setting_name, value, default_value, is_set, is_supported) def list(self): """ :rtype: list(setting_name, value, default_value, is_set, is_supported) """ settings = [] for setting in _SETTINGS: value = self.get(setting) is_set = self.is_set(setting) default_value = self.get_default_value(setting) is_supported = True settings.append((setting, value, default_value, is_set, is_supported)) for setting in sorted(self.settings_state.list_keys()): if not self.is_supported(setting): value = self.get(setting) default_value = None is_set = True is_supported = False settings.append((setting, value, default_value, is_set, is_supported)) return settings
Load in Pageant CoverageDepth results with Ensembl loci. coverage_path is a path to Pageant CoverageDepth output directory, with one subdirectory per patient and a `cdf.csv` file inside each patient subdir. If min_normal_depth is 0, calculate tumor coverage. Otherwise, calculate join tumor/normal coverage. pageant_dir_fn is a function that takes in a Patient and produces a Pageant dir name. Last tested with Pageant CoverageDepth version 1ca9ed2. def load_ensembl_coverage(cohort, coverage_path, min_tumor_depth, min_normal_depth=0, pageant_dir_fn=None): """ Load in Pageant CoverageDepth results with Ensembl loci. coverage_path is a path to Pageant CoverageDepth output directory, with one subdirectory per patient and a `cdf.csv` file inside each patient subdir. If min_normal_depth is 0, calculate tumor coverage. Otherwise, calculate join tumor/normal coverage. pageant_dir_fn is a function that takes in a Patient and produces a Pageant dir name. Last tested with Pageant CoverageDepth version 1ca9ed2. """ # Function to grab the pageant file name using the Patient if pageant_dir_fn is None: pageant_dir_fn = lambda patient: patient.id columns_both = [ "depth1", # Normal "depth2", # Tumor "onBP1", "onBP2", "numOnLoci", "fracBPOn1", "fracBPOn2", "fracLociOn", "offBP1", "offBP2", "numOffLoci", "fracBPOff1", "fracBPOff2", "fracLociOff", ] columns_single = [ "depth", "onBP", "numOnLoci", "fracBPOn", "fracLociOn", "offBP", "numOffLoci", "fracBPOff", "fracLociOff" ] if min_normal_depth < 0: raise ValueError("min_normal_depth must be >= 0") use_tumor_only = (min_normal_depth == 0) columns = columns_single if use_tumor_only else columns_both ensembl_loci_dfs = [] for patient in cohort: patient_ensembl_loci_df = pd.read_csv( path.join(coverage_path, pageant_dir_fn(patient), "cdf.csv"), names=columns, header=1) # pylint: disable=no-member # pylint gets confused by read_csv if use_tumor_only: depth_mask = (patient_ensembl_loci_df.depth == min_tumor_depth) else: depth_mask = ( (patient_ensembl_loci_df.depth1 == min_normal_depth) & (patient_ensembl_loci_df.depth2 == min_tumor_depth)) patient_ensembl_loci_df = patient_ensembl_loci_df[depth_mask] assert len(patient_ensembl_loci_df) == 1, ( "Incorrect number of tumor={}, normal={} depth loci results: {} for patient {}".format( min_tumor_depth, min_normal_depth, len(patient_ensembl_loci_df), patient)) patient_ensembl_loci_df["patient_id"] = patient.id ensembl_loci_dfs.append(patient_ensembl_loci_df) ensembl_loci_df = pd.concat(ensembl_loci_dfs) ensembl_loci_df["MB"] = ensembl_loci_df.numOnLoci / 1000000.0 return ensembl_loci_df[["patient_id", "numOnLoci", "MB"]]
Using the size of the y axis, return a fraction of that size. def vertical_percent(plot, percent=0.1): """ Using the size of the y axis, return a fraction of that size. """ plot_bottom, plot_top = plot.get_ylim() return percent * (plot_top - plot_bottom)
Hide tick values that are outside of [min_tick_value, max_tick_value] def hide_ticks(plot, min_tick_value=None, max_tick_value=None): """Hide tick values that are outside of [min_tick_value, max_tick_value]""" for tick, tick_value in zip(plot.get_yticklabels(), plot.get_yticks()): tick_label = as_numeric(tick_value) if tick_label: if (min_tick_value is not None and tick_label < min_tick_value or max_tick_value is not None and tick_label > max_tick_value): tick.set_visible(False)
Add a p-value significance indicator. def add_significance_indicator(plot, col_a=0, col_b=1, significant=False): """ Add a p-value significance indicator. """ plot_bottom, plot_top = plot.get_ylim() # Give the plot a little room for the significance indicator line_height = vertical_percent(plot, 0.1) # Add some extra spacing below the indicator plot_top = plot_top + line_height # Add some extra spacing above the indicator plot.set_ylim(top=plot_top + line_height * 2) color = "black" line_top = plot_top + line_height plot.plot([col_a, col_a, col_b, col_b], [plot_top, line_top, line_top, plot_top], lw=1.5, color=color) indicator = "*" if significant else "ns" plot.text((col_a + col_b) * 0.5, line_top, indicator, ha="center", va="bottom", color=color)
Overlay a stripplot on top of a boxplot. def stripboxplot(x, y, data, ax=None, significant=None, **kwargs): """ Overlay a stripplot on top of a boxplot. """ ax = sb.boxplot( x=x, y=y, data=data, ax=ax, fliersize=0, **kwargs ) plot = sb.stripplot( x=x, y=y, data=data, ax=ax, jitter=kwargs.pop("jitter", 0.05), color=kwargs.pop("color", "0.3"), **kwargs ) if data[y].min() >= 0: hide_negative_y_ticks(plot) if significant is not None: add_significance_indicator(plot=plot, significant=significant) return plot
Perform a Fisher's exact test to compare to binary columns Parameters ---------- data: Pandas dataframe Dataframe to retrieve information from condition1: str First binary column to compare (and used for test sidedness) condition2: str Second binary column to compare ax : Axes, default None Axes to plot on condition1_value: If `condition1` is not a binary column, split on =/!= to condition1_value alternative: Specify the sidedness of the test: "two-sided", "less" or "greater" def fishers_exact_plot(data, condition1, condition2, ax=None, condition1_value=None, alternative="two-sided", **kwargs): """ Perform a Fisher's exact test to compare to binary columns Parameters ---------- data: Pandas dataframe Dataframe to retrieve information from condition1: str First binary column to compare (and used for test sidedness) condition2: str Second binary column to compare ax : Axes, default None Axes to plot on condition1_value: If `condition1` is not a binary column, split on =/!= to condition1_value alternative: Specify the sidedness of the test: "two-sided", "less" or "greater" """ plot = sb.barplot( x=condition1, y=condition2, ax=ax, data=data, **kwargs ) plot.set_ylabel("Percent %s" % condition2) condition1_mask = get_condition_mask(data, condition1, condition1_value) count_table = pd.crosstab(data[condition1], data[condition2]) print(count_table) oddsratio, p_value = fisher_exact(count_table, alternative=alternative) add_significance_indicator(plot=plot, significant=p_value <= 0.05) only_percentage_ticks(plot) if alternative != "two-sided": raise ValueError("We need to better understand the one-sided Fisher's Exact test") sided_str = "two-sided" print("Fisher's Exact Test: OR: {}, p-value={} ({})".format(oddsratio, p_value, sided_str)) return FishersExactResults(oddsratio=oddsratio, p_value=p_value, sided_str=sided_str, with_condition1_series=data[condition1_mask][condition2], without_condition1_series=data[~condition1_mask][condition2], plot=plot)
Create a box plot comparing a condition and perform a Mann Whitney test to compare the distribution in condition A v B Parameters ---------- data: Pandas dataframe Dataframe to retrieve information from condition: str Column to use as the splitting criteria distribution: str Column to use as the Y-axis or distribution in the test ax : Axes, default None Axes to plot on condition_value: If `condition` is not a binary column, split on =/!= to condition_value alternative: Specify the sidedness of the Mann-Whitney test: "two-sided", "less" or "greater" skip_plot: Calculate the test statistic and p-value, but don't plot. def mann_whitney_plot(data, condition, distribution, ax=None, condition_value=None, alternative="two-sided", skip_plot=False, **kwargs): """ Create a box plot comparing a condition and perform a Mann Whitney test to compare the distribution in condition A v B Parameters ---------- data: Pandas dataframe Dataframe to retrieve information from condition: str Column to use as the splitting criteria distribution: str Column to use as the Y-axis or distribution in the test ax : Axes, default None Axes to plot on condition_value: If `condition` is not a binary column, split on =/!= to condition_value alternative: Specify the sidedness of the Mann-Whitney test: "two-sided", "less" or "greater" skip_plot: Calculate the test statistic and p-value, but don't plot. """ condition_mask = get_condition_mask(data, condition, condition_value) U, p_value = mannwhitneyu( data[condition_mask][distribution], data[~condition_mask][distribution], alternative=alternative ) plot = None if not skip_plot: plot = stripboxplot( x=condition, y=distribution, data=data, ax=ax, significant=p_value <= 0.05, **kwargs ) sided_str = sided_str_from_alternative(alternative, condition) print("Mann-Whitney test: U={}, p-value={} ({})".format(U, p_value, sided_str)) return MannWhitneyResults(U=U, p_value=p_value, sided_str=sided_str, with_condition_series=data[condition_mask][distribution], without_condition_series=data[~condition_mask][distribution], plot=plot)
Create a ROC curve and compute the bootstrap AUC for the given variable and outcome Parameters ---------- data : Pandas dataframe Dataframe to retrieve information from value_column : str Column to retrieve the values from outcome_column : str Column to use as the outcome variable bootstrap_samples : int, optional Number of bootstrap samples to use to compute the AUC ax : Axes, default None Axes to plot on Returns ------- (mean_bootstrap_auc, roc_plot) : (float, matplotlib plot) Mean AUC for the given number of bootstrap samples and the plot def roc_curve_plot(data, value_column, outcome_column, bootstrap_samples=100, ax=None): """Create a ROC curve and compute the bootstrap AUC for the given variable and outcome Parameters ---------- data : Pandas dataframe Dataframe to retrieve information from value_column : str Column to retrieve the values from outcome_column : str Column to use as the outcome variable bootstrap_samples : int, optional Number of bootstrap samples to use to compute the AUC ax : Axes, default None Axes to plot on Returns ------- (mean_bootstrap_auc, roc_plot) : (float, matplotlib plot) Mean AUC for the given number of bootstrap samples and the plot """ scores = bootstrap_auc(df=data, col=value_column, pred_col=outcome_column, n_bootstrap=bootstrap_samples) mean_bootstrap_auc = scores.mean() print("{}, Bootstrap (samples = {}) AUC:{}, std={}".format( value_column, bootstrap_samples, mean_bootstrap_auc, scores.std())) outcome = data[outcome_column].astype(int) values = data[value_column] fpr, tpr, thresholds = roc_curve(outcome, values) if ax is None: ax = plt.gca() roc_plot = ax.plot(fpr, tpr, lw=1, label=value_column) ax.set_xlim([-0.05, 1.05]) ax.set_ylim([-0.05, 1.05]) ax.set_xlabel('False Positive Rate') ax.set_ylabel('True Positive Rate') ax.legend(loc=2, borderaxespad=0.) ax.set_title('{} ROC Curve (n={})'.format(value_column, len(values))) return (mean_bootstrap_auc, roc_plot)
Return full cache_dir, according to following logic: - if cache_dir is a full path (per path.isabs), return that value - if not and if cache_root_dir is not None, join two paths - otherwise, log warnings and return None Separately, if args or kwargs are given, format cache_dir using kwargs def get_cache_dir(cache_dir, cache_root_dir=None, *args, **kwargs): """ Return full cache_dir, according to following logic: - if cache_dir is a full path (per path.isabs), return that value - if not and if cache_root_dir is not None, join two paths - otherwise, log warnings and return None Separately, if args or kwargs are given, format cache_dir using kwargs """ cache_dir = cache_dir.format(*args, **kwargs) if path.isabs(cache_dir): if cache_root_dir is not None: logger.warning('cache_dir ({}) is a full path; ignoring cache_root_dir'.format(cache_dir)) return cache_dir if cache_root_dir is not None: return path.join(cache_root_dir, cache_dir) else: logger.warning("cache dir is not full path & cache_root_dir not given. Caching may not work as expected!") return None
Utility script applying several regexs to a string. Intended to be used by `strip_column_names`. This function will: 1. replace informative punctuation components with text 2. (optionally) remove text within parentheses 3. replace remaining punctuation/whitespace with _ 4. strip leading/trailing punctuation/whitespace Parameters ---------- col_name (str): input character string keep_paren_contents (logical): controls behavior of within-paren elements of text - if True, (the default) all text within parens retained - if False, text within parens will be removed from the field name Returns -------- modified string for new field name Examples -------- > print([_strip_column_name(col) for col in ['PD-L1','PD L1','PD L1_']]) def _strip_column_name(col_name, keep_paren_contents=True): """ Utility script applying several regexs to a string. Intended to be used by `strip_column_names`. This function will: 1. replace informative punctuation components with text 2. (optionally) remove text within parentheses 3. replace remaining punctuation/whitespace with _ 4. strip leading/trailing punctuation/whitespace Parameters ---------- col_name (str): input character string keep_paren_contents (logical): controls behavior of within-paren elements of text - if True, (the default) all text within parens retained - if False, text within parens will be removed from the field name Returns -------- modified string for new field name Examples -------- > print([_strip_column_name(col) for col in ['PD-L1','PD L1','PD L1_']]) """ # start with input new_col_name = col_name # replace meaningful punctuation with text equivalents # surround each with whitespace to enforce consistent use of _ punctuation_to_text = { '<=': 'le', '>=': 'ge', '=<': 'le', '=>': 'ge', '<': 'lt', '>': 'gt', '#': 'num' } for punctuation, punctuation_text in punctuation_to_text.items(): new_col_name = new_col_name.replace(punctuation, punctuation_text) # remove contents within () if not(keep_paren_contents): new_col_name = re.sub('\([^)]*\)', '', new_col_name) # replace remaining punctuation/whitespace with _ punct_pattern = '[\W_]+' punct_replacement = '_' new_col_name = re.sub(punct_pattern, punct_replacement, new_col_name) # remove leading/trailing _ if it exists (if last char was punctuation) new_col_name = new_col_name.strip("_") # TODO: check for empty string # return lower-case version of column name return new_col_name.lower()
Utility script for renaming pandas columns to patsy-friendly names. Revised names have been: - stripped of all punctuation and whitespace (converted to text or `_`) - converted to lower case Takes a list of column names, returns a dict mapping names to revised names. If there are any concerns with the conversion, this will print a warning & return original column names. Parameters ---------- cols (list): list of strings containing column names keep_paren_contents (logical): controls behavior of within-paren elements of text - if True, (the default) all text within parens retained - if False, text within parens will be removed from the field name Returns ------- dict mapping col_names -> new_col_names Example ------- > df = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']), 'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']), 'PD L1 (value)': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']), 'PD L1 (>1)': pd.Series([0., 1., 1., 0.], index=['a', 'b', 'c', 'd']), } > df = pd.DataFrame(df) > df = df.rename(columns = strip_column_names(df.columns)) ## observe, by comparison > df2 = df.rename(columns = strip_column_names(df.columns, keep_paren_contents=False)) def strip_column_names(cols, keep_paren_contents=True): """ Utility script for renaming pandas columns to patsy-friendly names. Revised names have been: - stripped of all punctuation and whitespace (converted to text or `_`) - converted to lower case Takes a list of column names, returns a dict mapping names to revised names. If there are any concerns with the conversion, this will print a warning & return original column names. Parameters ---------- cols (list): list of strings containing column names keep_paren_contents (logical): controls behavior of within-paren elements of text - if True, (the default) all text within parens retained - if False, text within parens will be removed from the field name Returns ------- dict mapping col_names -> new_col_names Example ------- > df = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']), 'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']), 'PD L1 (value)': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']), 'PD L1 (>1)': pd.Series([0., 1., 1., 0.], index=['a', 'b', 'c', 'd']), } > df = pd.DataFrame(df) > df = df.rename(columns = strip_column_names(df.columns)) ## observe, by comparison > df2 = df.rename(columns = strip_column_names(df.columns, keep_paren_contents=False)) """ # strip/replace punctuation new_cols = [ _strip_column_name(col, keep_paren_contents=keep_paren_contents) for col in cols] if len(new_cols) != len(set(new_cols)): warn_str = 'Warning: strip_column_names (if run) would introduce duplicate names.' warn_str += ' Reverting column names to the original.' warnings.warn(warn_str, Warning) print('Warning: strip_column_names would introduce duplicate names. Please fix & try again.') return dict(zip(cols, cols)) return dict(zip(cols, new_cols))
Given an object and a dictionary, give the object new attributes from that dictionary. Uses _strip_column_name to git rid of whitespace/uppercase/special characters. def set_attributes(obj, additional_data): """ Given an object and a dictionary, give the object new attributes from that dictionary. Uses _strip_column_name to git rid of whitespace/uppercase/special characters. """ for key, value in additional_data.items(): if hasattr(obj, key): raise ValueError("Key %s in additional_data already exists in this object" % key) setattr(obj, _strip_column_name(key), value)
Construct a DataFrameHolder and then return either that or the DataFrame. def return_obj(cols, df, return_cols=False): """Construct a DataFrameHolder and then return either that or the DataFrame.""" df_holder = DataFrameHolder(cols=cols, df=df) return df_holder.return_self(return_cols=return_cols)
Utility function to compare two abritrary provenance dicts returns number of discrepancies. Parameters ---------- this_provenance: provenance dict (to be compared to "other_provenance") other_provenance: comparison provenance dict (optional) left_outer_diff: description/prefix used when printing items in this_provenance but not in other_provenance right_outer_diff: description/prefix used when printing items in other_provenance but not in this_provenance Returns ----------- Number of discrepancies (0: None) def compare_provenance( this_provenance, other_provenance, left_outer_diff = "In current but not comparison", right_outer_diff = "In comparison but not current"): """Utility function to compare two abritrary provenance dicts returns number of discrepancies. Parameters ---------- this_provenance: provenance dict (to be compared to "other_provenance") other_provenance: comparison provenance dict (optional) left_outer_diff: description/prefix used when printing items in this_provenance but not in other_provenance right_outer_diff: description/prefix used when printing items in other_provenance but not in this_provenance Returns ----------- Number of discrepancies (0: None) """ ## if either this or other items is null, return 0 if (not this_provenance or not other_provenance): return 0 this_items = set(this_provenance.items()) other_items = set(other_provenance.items()) # Two-way diff: are any modules introduced, and are any modules lost? new_diff = this_items.difference(other_items) old_diff = other_items.difference(this_items) warn_str = "" if len(new_diff) > 0: warn_str += "%s: %s" % ( left_outer_diff, _provenance_str(new_diff)) if len(old_diff) > 0: warn_str += "%s: %s" % ( right_outer_diff, _provenance_str(old_diff)) if len(warn_str) > 0: warnings.warn(warn_str, Warning) return(len(new_diff)+len(old_diff))
Helper function to produce a single KM survival plot, among observations in df by groups defined by condition_col. All inputs are required - this function is intended to be called by `plot_kmf`. def _plot_kmf_single(df, condition_col, survival_col, censor_col, threshold, title, xlabel, ylabel, ax, with_condition_color, no_condition_color, with_condition_label, no_condition_label, color_map, label_map, color_palette, ci_show, print_as_title): """ Helper function to produce a single KM survival plot, among observations in df by groups defined by condition_col. All inputs are required - this function is intended to be called by `plot_kmf`. """ # make color inputs consistent hex format if colors.is_color_like(with_condition_color): with_condition_color = colors.to_hex(with_condition_color) if colors.is_color_like(no_condition_color): no_condition_color = colors.to_hex(no_condition_color) ## prepare data to be plotted; producing 3 outputs: # - `condition`, series containing category labels to be plotted # - `label_map` (mapping condition values to plot labels) # - `color_map` (mapping condition values to plotted colors) if threshold is not None: is_median = threshold == "median" if is_median: threshold = df[condition_col].median() label_suffix = float_str(threshold) condition = df[condition_col] > threshold default_label_no_condition = "%s ≤ %s" % (condition_col, label_suffix) if is_median: label_suffix += " (median)" default_label_with_condition = "%s > %s" % (condition_col, label_suffix) with_condition_label = with_condition_label or default_label_with_condition no_condition_label = no_condition_label or default_label_no_condition if not label_map: label_map = {False: no_condition_label, True: with_condition_label} if not color_map: color_map = {False: no_condition_color, True: with_condition_color} elif df[condition_col].dtype == 'O' or df[condition_col].dtype.name == "category": condition = df[condition_col].astype("category") if not label_map: label_map = dict() [label_map.update({condition_value: '{} = {}'.format(condition_col, condition_value)}) for condition_value in condition.unique()] if not color_map: rgb_values = sb.color_palette(color_palette, len(label_map.keys())) hex_values = [colors.to_hex(col) for col in rgb_values] color_map = dict(zip(label_map.keys(), hex_values)) elif df[condition_col].dtype == 'bool': condition = df[condition_col] default_label_with_condition = "= {}".format(condition_col) default_label_no_condition = "¬ {}".format(condition_col) with_condition_label = with_condition_label or default_label_with_condition no_condition_label = no_condition_label or default_label_no_condition if not label_map: label_map = {False: no_condition_label, True: with_condition_label} if not color_map: color_map = {False: no_condition_color, True: with_condition_color} else: raise ValueError('Don\'t know how to plot data of type\ {}'.format(df[condition_col].dtype)) # produce kmf plot for each category (group) identified above kmf = KaplanMeierFitter() grp_desc = list() grp_survival_data = dict() grp_event_data = dict() grp_names = list(condition.unique()) for grp_name, grp_df in df.groupby(condition): grp_survival = grp_df[survival_col] grp_event = (grp_df[censor_col].astype(bool)) grp_label = label_map[grp_name] grp_color = color_map[grp_name] kmf.fit(grp_survival, grp_event, label=grp_label) desc_str = "# {}: {}".format(grp_label, len(grp_survival)) grp_desc.append(desc_str) grp_survival_data[grp_name] = grp_survival grp_event_data[grp_name] = grp_event if ax: ax = kmf.plot(ax=ax, show_censors=True, ci_show=ci_show, color=grp_color) else: ax = kmf.plot(show_censors=True, ci_show=ci_show, color=grp_color) ## format the plot # Set the y-axis to range 0 to 1 ax.set_ylim(0, 1) y_tick_vals = ax.get_yticks() ax.set_yticklabels(["%d" % int(y_tick_val * 100) for y_tick_val in y_tick_vals]) # plot title if title: ax.set_title(title) elif print_as_title: ax.set_title(' | '.join(grp_desc)) else: [print(desc) for desc in grp_desc] # axis labels if xlabel: ax.set_xlabel(xlabel) if ylabel: ax.set_ylabel(ylabel) ## summarize analytical version of results ## again using same groups as are plotted if len(grp_names) == 2: # use log-rank test for 2 groups results = logrank_test(grp_survival_data[grp_names[0]], grp_survival_data[grp_names[1]], event_observed_A=grp_event_data[grp_names[0]], event_observed_B=grp_event_data[grp_names[1]]) elif len(grp_names) == 1: # no analytical result for 1 or 0 groups results = NullSurvivalResults() else: # cox PH fitter for >2 groups cf = CoxPHFitter() cox_df = patsy.dmatrix('+'.join([condition_col, survival_col, censor_col]), df, return_type='dataframe') del cox_df['Intercept'] results = cf.fit(cox_df, survival_col, event_col=censor_col) results.print_summary() # add metadata to results object so caller can print them results.survival_data_series = grp_survival_data results.event_data_series = grp_event_data results.desc = grp_desc return results
Plot survival curves by splitting the dataset into two groups based on condition_col. Report results for a log-rank test (if two groups are plotted) or CoxPH survival analysis (if >2 groups) for association with survival. Regarding definition of groups: If condition_col is numeric, values are split into 2 groups. - if threshold is defined, the groups are split on being > or < condition_col - if threshold == 'median', the threshold is set to the median of condition_col If condition_col is categorical or string, results are plotted for each unique value in the dataset. If condition_col is None, results are plotted for all observations Currently, if `strata_col` is given, the results are repeated among each stratum of the df. A truly "stratified" analysis is not yet supported by may be soon. Parameters ---------- df: dataframe condition_col: string, column which contains the condition to split on survival_col: string, column which contains the survival time censor_col: string, strata_col: optional string, denoting column containing data to stratify by (default: None) threshold: int or string, if int, condition_col is thresholded at int, if 'median', condition_col thresholded at its median if 'median-per-strata', & if stratified analysis then condition_col thresholded by strata title: Title for the plot, default None ax: an existing matplotlib ax, optional, default None note: not currently supported when `strata_col` is not None with_condition_color: str, hex code color for the with-condition curve no_condition_color: str, hex code color for the no-condition curve with_condition_label: str, optional, label for True condition case no_condition_label: str, optional, label for False condition case color_map: dict, optional, mapping of hex-values to condition text in the form of {value_name: color_hex_code}. defaults to `sb.color_palette` using `default_color_palette` name, or *_condition_color options in case of boolean operators. label_map: dict, optional, mapping of labels to condition text. defaults to "condition_name = condition_value", or *_condition_label options in case of boolean operators. color_palette: str, optional, name of sb.color_palette to use if color_map not provided. print_as_title: bool, optional, whether or not to print text within the plot's title vs. stdout, default False def plot_kmf(df, condition_col, censor_col, survival_col, strata_col=None, threshold=None, title=None, xlabel=None, ylabel=None, ax=None, with_condition_color="#B38600", no_condition_color="#A941AC", with_condition_label=None, no_condition_label=None, color_map=None, label_map=None, color_palette="Set1", ci_show=False, print_as_title=False): """ Plot survival curves by splitting the dataset into two groups based on condition_col. Report results for a log-rank test (if two groups are plotted) or CoxPH survival analysis (if >2 groups) for association with survival. Regarding definition of groups: If condition_col is numeric, values are split into 2 groups. - if threshold is defined, the groups are split on being > or < condition_col - if threshold == 'median', the threshold is set to the median of condition_col If condition_col is categorical or string, results are plotted for each unique value in the dataset. If condition_col is None, results are plotted for all observations Currently, if `strata_col` is given, the results are repeated among each stratum of the df. A truly "stratified" analysis is not yet supported by may be soon. Parameters ---------- df: dataframe condition_col: string, column which contains the condition to split on survival_col: string, column which contains the survival time censor_col: string, strata_col: optional string, denoting column containing data to stratify by (default: None) threshold: int or string, if int, condition_col is thresholded at int, if 'median', condition_col thresholded at its median if 'median-per-strata', & if stratified analysis then condition_col thresholded by strata title: Title for the plot, default None ax: an existing matplotlib ax, optional, default None note: not currently supported when `strata_col` is not None with_condition_color: str, hex code color for the with-condition curve no_condition_color: str, hex code color for the no-condition curve with_condition_label: str, optional, label for True condition case no_condition_label: str, optional, label for False condition case color_map: dict, optional, mapping of hex-values to condition text in the form of {value_name: color_hex_code}. defaults to `sb.color_palette` using `default_color_palette` name, or *_condition_color options in case of boolean operators. label_map: dict, optional, mapping of labels to condition text. defaults to "condition_name = condition_value", or *_condition_label options in case of boolean operators. color_palette: str, optional, name of sb.color_palette to use if color_map not provided. print_as_title: bool, optional, whether or not to print text within the plot's title vs. stdout, default False """ # set reasonable default threshold value depending on type of condition_col if threshold is None: if df[condition_col].dtype != "bool" and \ np.issubdtype(df[condition_col].dtype, np.number): threshold = "median" # check inputs for threshold for validity elif isinstance(threshold, numbers.Number): logger.debug("threshold value is numeric") elif threshold not in ("median", "median-per-strata"): raise ValueError("invalid input for threshold. Must be numeric, None, 'median', or 'median-per-strata'.") elif threshold == "median-per-strata" and strata_col is None: raise ValueError("threshold given was 'median-per-strata' and yet `strata_col` was None. Did you mean 'median'?") # construct kwarg dict to pass to _plot_kmf_single. # start with args that do not vary according to strata_col arglist = dict( condition_col=condition_col, survival_col=survival_col, censor_col=censor_col, threshold=threshold, with_condition_color=with_condition_color, no_condition_color=no_condition_color, with_condition_label=with_condition_label, no_condition_label=no_condition_label, color_map=color_map, label_map=label_map, xlabel=xlabel, ylabel=ylabel, ci_show=ci_show, color_palette=color_palette, print_as_title=print_as_title) # if strata_col is None, pass all parameters to _plot_kmf_single if strata_col is None: arglist.update(dict( df=df, title=title, ax=ax)) return _plot_kmf_single(**arglist) else: # prepare for stratified analysis if threshold == "median": # by default, "median" threshold should be intra-strata median arglist["threshold"] = df[condition_col].dropna().median() elif threshold == "median-per-strata": arglist["threshold"] = "median" # create axis / subplots for stratified results if ax is not None: raise ValueError("ax not supported with stratified analysis.") n_strata = len(df[strata_col].unique()) f, ax = plt.subplots(n_strata, sharex=True) # create results dict to hold per-strata results results = dict() # call _plot_kmf_single for each of the strata for i, (strat_name, strat_df) in enumerate(df.groupby(strata_col)): if n_strata == 1: arglist["ax"] = ax else: arglist["ax"] = ax[i] subtitle = "{}: {}".format(strata_col, strat_name) arglist["title"] = subtitle arglist["df"] = strat_df results[subtitle] = plot_kmf(**arglist) [print(desc) for desc in results[subtitle].desc] if title: f.suptitle(title) return results
:type formatted_text: FormattedText def concat(self, formatted_text): """:type formatted_text: FormattedText""" assert self._is_compatible(formatted_text), "Cannot concat text with different modes" self.text += formatted_text.text return self
:type formatted_texts: list[FormattedText] def join(self, formatted_texts): """:type formatted_texts: list[FormattedText]""" formatted_texts = list(formatted_texts) # so that after the first iteration elements are not lost if generator for formatted_text in formatted_texts: assert self._is_compatible(formatted_text), "Cannot join text with different modes" self.text = self.text.join((formatted_text.text for formatted_text in formatted_texts)) return self
:type args: FormattedText :type kwargs: FormattedText def concat(self, *args, **kwargs): """ :type args: FormattedText :type kwargs: FormattedText """ for arg in args: assert self.formatted_text._is_compatible(arg), "Cannot concat text with different modes" self.format_args.append(arg.text) for kwarg in kwargs: value = kwargs[kwarg] assert self.formatted_text._is_compatible(value), "Cannot concat text with different modes" self.format_kwargs[kwarg] = value.text return self
Parameters ---------- min_random_variants: optional, int Minimum number of random variants to be generated per patient. max_random_variants: optional, int Maximum number of random variants to be generated per patient. def random_cohort(size, cache_dir, data_dir=None, min_random_variants=None, max_random_variants=None, seed_val=1234): """ Parameters ---------- min_random_variants: optional, int Minimum number of random variants to be generated per patient. max_random_variants: optional, int Maximum number of random variants to be generated per patient. """ seed(seed_val) d = {} d["id"] = [str(id) for id in range(size)] d["age"] = choice([10, 15, 28, 32, 59, 62, 64, 66, 68], size) d["smoker"] = choice([False, True], size) d["OS"] = [randint(10, 1000) for i in range(size)] d["PFS"] = [int(os * 0.6) for os in d["OS"]] d["benefit"] = [pfs < 50 for pfs in d["PFS"]] d["random"] = [randint(100) for i in range(size)] d["random_boolean"] = choice([False, True], size) d["benefit_correlate"] = [randint(50) if benefit else randint(20) for benefit in d["benefit"]] d["benefit_correlate_boolean"] = [True if corr > 10 else False for corr in d["benefit_correlate"]] d["deceased"] = choice([False, True], size) d["progressed_or_deceased"] = [deceased or choice([False, True]) for deceased in d["deceased"]] df = pd.DataFrame(d) patients = [] for i, row in df.iterrows(): snv_vcf_paths = None if max_random_variants is not None and min_random_variants is not None: if data_dir is None: raise ValueError("data_dir must be provided if random variants are being generated.") vcf_path = path.join(data_dir, "patient_%s_mutect.vcf" % row["id"]) generate_simple_vcf( vcf_path, generate_random_missense_variants(num_variants=randint(min_random_variants, max_random_variants))) snv_vcf_paths = [vcf_path] patient = Patient( id=row["id"], os=row["OS"], pfs=row["PFS"], benefit=row["benefit"], deceased=row["deceased"], progressed_or_deceased=row["progressed_or_deceased"], hla_alleles=["HLA-A02:01"], variants={"snv": snv_vcf_paths}, additional_data=row) patients.append(patient) return Cohort( patients=patients, cache_dir=cache_dir, mhc_class=RandomBindingPredictor)
Generate a random collection of missense variants by trying random variants repeatedly. def generate_random_missense_variants(num_variants=10, max_search=100000, reference="GRCh37"): """ Generate a random collection of missense variants by trying random variants repeatedly. """ variants = [] for i in range(max_search): bases = ["A", "C", "T", "G"] random_ref = choice(bases) bases.remove(random_ref) random_alt = choice(bases) random_contig = choice(["1", "2", "3", "4", "5"]) random_variant = Variant(contig=random_contig, start=randint(1, 1000000), ref=random_ref, alt=random_alt, ensembl=reference) try: effects = random_variant.effects() for effect in effects: if isinstance(effect, Substitution): variants.append(random_variant) break except: continue if len(variants) == num_variants: break return VariantCollection(variants)
Output a very simple metadata-free VCF for each variant in a variant_collection. def generate_simple_vcf(filename, variant_collection): """ Output a very simple metadata-free VCF for each variant in a variant_collection. """ contigs = [] positions = [] refs = [] alts = [] for variant in variant_collection: contigs.append("chr" + variant.contig) positions.append(variant.start) refs.append(variant.ref) alts.append(variant.alt) df = pd.DataFrame() df["contig"] = contigs df["position"] = positions df["id"] = ["."] * len(variant_collection) df["ref"] = refs df["alt"] = alts df["qual"] = ["."] * len(variant_collection) df["filter"] = ["."] * len(variant_collection) df["info"] = ["."] * len(variant_collection) df["format"] = ["GT:AD:DP"] * len(variant_collection) normal_ref_depths = [randint(1, 10) for v in variant_collection] normal_alt_depths = [randint(1, 10) for v in variant_collection] df["n1"] = ["0:%d,%d:%d" % (normal_ref_depths[i], normal_alt_depths[i], normal_ref_depths[i] + normal_alt_depths[i]) for i in range(len(variant_collection))] tumor_ref_depths = [randint(1, 10) for v in variant_collection] tumor_alt_depths = [randint(1, 10) for v in variant_collection] df["t1"] = ["0/1:%d,%d:%d" % (tumor_ref_depths[i], tumor_alt_depths[i], tumor_ref_depths[i] + tumor_alt_depths[i]) for i in range(len(variant_collection))] with open(filename, "w") as f: f.write("##fileformat=VCFv4.1\n") f.write("##reference=file:///projects/ngs/resources/gatk/2.3/ucsc.hg19.fasta\n") with open(filename, "a") as f: df.to_csv(f, sep="\t", index=None, header=None)
Looks up folder contents of `path.` def list_folder(self, path): """Looks up folder contents of `path.`""" # Inspired by https://github.com/rspivak/sftpserver/blob/0.3/src/sftpserver/stub_sftp.py#L70 try: folder_contents = [] for f in os.listdir(path): attr = paramiko.SFTPAttributes.from_stat(os.stat(os.path.join(path, f))) attr.filename = f folder_contents.append(attr) return folder_contents except OSError as e: return SFTPServer.convert_errno(e.errno)
Filter variants from the Variant Collection Parameters ---------- variant_collection : varcode.VariantCollection patient : cohorts.Patient filter_fn: function Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved. Returns ------- varcode.VariantCollection Filtered variant collection, with only the variants passing the filter def filter_variants(variant_collection, patient, filter_fn, **kwargs): """Filter variants from the Variant Collection Parameters ---------- variant_collection : varcode.VariantCollection patient : cohorts.Patient filter_fn: function Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved. Returns ------- varcode.VariantCollection Filtered variant collection, with only the variants passing the filter """ if filter_fn: return variant_collection.clone_with_new_elements([ variant for variant in variant_collection if filter_fn(FilterableVariant( variant=variant, variant_collection=variant_collection, patient=patient, ), **kwargs) ]) else: return variant_collection
Filter variants from the Effect Collection Parameters ---------- effect_collection : varcode.EffectCollection variant_collection : varcode.VariantCollection patient : cohorts.Patient filter_fn : function Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved. all_effects : boolean Return the single, top-priority effect if False. If True, return all effects (don't filter to top-priority). Returns ------- varcode.EffectCollection Filtered effect collection, with only the variants passing the filter def filter_effects(effect_collection, variant_collection, patient, filter_fn, all_effects, **kwargs): """Filter variants from the Effect Collection Parameters ---------- effect_collection : varcode.EffectCollection variant_collection : varcode.VariantCollection patient : cohorts.Patient filter_fn : function Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved. all_effects : boolean Return the single, top-priority effect if False. If True, return all effects (don't filter to top-priority). Returns ------- varcode.EffectCollection Filtered effect collection, with only the variants passing the filter """ def top_priority_maybe(effect_collection): """ Always (unless all_effects=True) take the top priority effect per variant so we end up with a single effect per variant. """ if all_effects: return effect_collection return EffectCollection(list(effect_collection.top_priority_effect_per_variant().values())) def apply_filter_fn(filter_fn, effect): """ Return True if filter_fn is true for the effect or its alternate_effect. If no alternate_effect, then just return True if filter_fn is True. """ applied = filter_fn(FilterableEffect( effect=effect, variant_collection=variant_collection, patient=patient), **kwargs) if hasattr(effect, "alternate_effect"): applied_alternate = filter_fn(FilterableEffect( effect=effect.alternate_effect, variant_collection=variant_collection, patient=patient), **kwargs) return applied or applied_alternate return applied if filter_fn: return top_priority_maybe(EffectCollection([ effect for effect in effect_collection if apply_filter_fn(filter_fn, effect)])) else: return top_priority_maybe(effect_collection)
Count lines in a file def count_lines_in(filename): "Count lines in a file" f = open(filename) lines = 0 buf_size = 1024 * 1024 read_f = f.read # loop optimization buf = read_f(buf_size) while buf: lines += buf.count('\n') buf = read_f(buf_size) return lines
Resolve a path to the full python module name of the related view function def view_name_from(path): "Resolve a path to the full python module name of the related view function" try: return CACHED_VIEWS[path] except KeyError: view = resolve(path) module = path name = '' if hasattr(view.func, '__module__'): module = resolve(path).func.__module__ if hasattr(view.func, '__name__'): name = resolve(path).func.__name__ view = "%s.%s" % (module, name) CACHED_VIEWS[path] = view return view
Output a nicely formatted ascii table def generate_table_from(data): "Output a nicely formatted ascii table" table = Texttable(max_width=120) table.add_row(["view", "method", "status", "count", "minimum", "maximum", "mean", "stdev", "queries", "querytime"]) table.set_cols_align(["l", "l", "l", "r", "r", "r", "r", "r", "r", "r"]) for item in sorted(data): mean = round(sum(data[item]['times'])/data[item]['count'], 3) mean_sql = round(sum(data[item]['sql'])/data[item]['count'], 3) mean_sqltime = round(sum(data[item]['sqltime'])/data[item]['count'], 3) sdsq = sum([(i - mean) ** 2 for i in data[item]['times']]) try: stdev = '%.2f' % ((sdsq / (len(data[item]['times']) - 1)) ** .5) except ZeroDivisionError: stdev = '0.00' minimum = "%.2f" % min(data[item]['times']) maximum = "%.2f" % max(data[item]['times']) table.add_row([data[item]['view'], data[item]['method'], data[item]['status'], data[item]['count'], minimum, maximum, '%.3f' % mean, stdev, mean_sql, mean_sqltime]) return table.draw()
Given a log file and regex group and extract the performance data def analyze_log_file(logfile, pattern, reverse_paths=True, progress=True): "Given a log file and regex group and extract the performance data" if progress: lines = count_lines_in(logfile) pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=lines+1).start() counter = 0 data = {} compiled_pattern = compile(pattern) for line in fileinput.input([logfile]): if progress: counter = counter + 1 parsed = compiled_pattern.findall(line)[0] date = parsed[0] method = parsed[1] path = parsed[2] status = parsed[3] time = parsed[4] sql = parsed[5] sqltime = parsed[6] try: ignore = False for ignored_path in IGNORE_PATHS: compiled_path = compile(ignored_path) if compiled_path.match(path): ignore = True if not ignore: if reverse_paths: view = view_name_from(path) else: view = path key = "%s-%s-%s" % (view, status, method) try: data[key]['count'] = data[key]['count'] + 1 data[key]['times'].append(float(time)) data[key]['sql'].append(int(sql)) data[key]['sqltime'].append(float(sqltime)) except KeyError: data[key] = { 'count': 1, 'status': status, 'view': view, 'method': method, 'times': [float(time)], 'sql': [int(sql)], 'sqltime': [float(sqltime)], } except Resolver404: pass if progress: pbar.update(counter) if progress: pbar.finish() return data
Create a string representation of this collection, showing up to `limit` items. def to_string(self, limit=None): """ Create a string representation of this collection, showing up to `limit` items. """ header = self.short_string() if len(self) == 0: return header contents = "" element_lines = [ " -- %s" % (element,) for element in self.elements[:limit] ] contents = "\n".join(element_lines) if limit is not None and len(self.elements) > limit: contents += "\n ... and %d more" % (len(self) - limit) return "%s\n%s" % (header, contents)
:rtype: UserStorageHandler def get_instance(cls, state): """:rtype: UserStorageHandler""" if cls.instance is None: cls.instance = UserStorageHandler(state) return cls.instance
May contain sensitive info (like user ids). Use with care. def _get_active_threads_names(): """May contain sensitive info (like user ids). Use with care.""" active_threads = threading.enumerate() return FormattedText().join( [ FormattedText().newline().normal(" - {name}").start_format().bold(name=thread.name).end_format() for thread in active_threads ] )
May contain sensitive info (like user ids). Use with care. def _get_running_workers_names(running_workers: list): """May contain sensitive info (like user ids). Use with care.""" return FormattedText().join( [ FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format() for worker in running_workers ] )
May contain sensitive info (like user ids). Use with care. def _get_worker_pools_names(worker_pools: list): """May contain sensitive info (like user ids). Use with care.""" return FormattedText().join( [ FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format() for worker in worker_pools ] )
:param member_info: If True, adds also chat member info. Please, note that this additional info requires to make ONE api call. def format(self, member_info: bool = False): """ :param member_info: If True, adds also chat member info. Please, note that this additional info requires to make ONE api call. """ user = self.api_object self.__format_user(user) if member_info and self.chat.type != CHAT_TYPE_PRIVATE: self._add_empty() self.__format_member(user)
Log error failing silently on error def safe_log_error(self, error: Exception, *info: str): """Log error failing silently on error""" self.__do_safe(lambda: self.logger.error(error, *info))
Log info failing silently on error def safe_log_info(self, *info: str): """Log info failing silently on error""" self.__do_safe(lambda: self.logger.info(*info))
implements the wald-wolfowitz runs test: http://en.wikipedia.org/wiki/Wald-Wolfowitz_runs_test http://support.sas.com/kb/33/092.html :param sequence: any iterable with at most 2 values. e.g. '1001001' [1, 0, 1, 0, 1] 'abaaabbba' :rtype: a dict with keys of `n_runs`: the number of runs in the sequence `p`: the support to reject the null-hypothesis that the number of runs supports a random sequence `z`: the z-score, used to calculate the p-value `sd`, `mean`: the expected standard deviation, mean the number of runs, given the ratio of numbers of 1's/0's in the sequence >>> r = wald_wolfowitz('1000001') >>> r['n_runs'] # should be 3, because 1, 0, 1 3 >>> r['p'] < 0.05 # not < 0.05 evidence to reject Ho of random sequence False # this should show significance for non-randomness >>> li = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1] >>> wald_wolfowitz(li)['p'] < 0.05 True def wald_wolfowitz(sequence): """ implements the wald-wolfowitz runs test: http://en.wikipedia.org/wiki/Wald-Wolfowitz_runs_test http://support.sas.com/kb/33/092.html :param sequence: any iterable with at most 2 values. e.g. '1001001' [1, 0, 1, 0, 1] 'abaaabbba' :rtype: a dict with keys of `n_runs`: the number of runs in the sequence `p`: the support to reject the null-hypothesis that the number of runs supports a random sequence `z`: the z-score, used to calculate the p-value `sd`, `mean`: the expected standard deviation, mean the number of runs, given the ratio of numbers of 1's/0's in the sequence >>> r = wald_wolfowitz('1000001') >>> r['n_runs'] # should be 3, because 1, 0, 1 3 >>> r['p'] < 0.05 # not < 0.05 evidence to reject Ho of random sequence False # this should show significance for non-randomness >>> li = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1] >>> wald_wolfowitz(li)['p'] < 0.05 True """ R = n_runs = sum(1 for s in groupby(sequence, lambda a: a)) n = float(sum(1 for s in sequence if s == sequence[0])) m = float(sum(1 for s in sequence if s != sequence[0])) # expected mean runs ER = ((2 * n * m ) / (n + m)) + 1 # expected variance runs VR = (2 * n * m * (2 * n * m - n - m )) / ((n + m)**2 * (n + m - 1)) O = (ER - 1) * (ER - 2) / (n + m - 1.) assert VR - O < 0.001, (VR, O) SD = math.sqrt(VR) # Z-score Z = (R - ER) / SD return {'z': Z, 'mean': ER, 'sd': SD, 'p': zprob(Z), 'n_runs': R}
test for the autocorrelation of a sequence between t and t - 1 as the 'auto_correlation' it is less likely that the sequence is generated randomly. :param sequence: any iterable with at most 2 values that can be turned into a float via np.float . e.g. '1001001' [1, 0, 1, 0, 1] [1.2,.1,.5,1] :rtype: returns a dict of the linear regression stats of sequence[1:] vs. sequence[:-1] >>> result = auto_correlation('00000001111111111100000000') >>> result['p'] < 0.05 True >>> result['auto_correlation'] 0.83766233766233755 def auto_correlation(sequence): """ test for the autocorrelation of a sequence between t and t - 1 as the 'auto_correlation' it is less likely that the sequence is generated randomly. :param sequence: any iterable with at most 2 values that can be turned into a float via np.float . e.g. '1001001' [1, 0, 1, 0, 1] [1.2,.1,.5,1] :rtype: returns a dict of the linear regression stats of sequence[1:] vs. sequence[:-1] >>> result = auto_correlation('00000001111111111100000000') >>> result['p'] < 0.05 True >>> result['auto_correlation'] 0.83766233766233755 """ if isinstance(sequence, basestring): sequence = map(int, sequence) seq = np.array(list(sequence), dtype=np.float) dseq = np.column_stack((seq[1:], seq[:-1])) slope, intercept, r, ttp, see = linregress(seq[1:], seq[:-1]) cc = np.corrcoef(dseq, rowvar=0)[0][1] return {'slope': slope, 'intercept': intercept, 'r-squared': r ** 2, 'p': ttp, 'see': see, 'auto_correlation': cc}
Parse the links from a Link: header field. .. todo:: Links with the same relation collide at the moment. :param bytes value: The header value. :rtype: `dict` :return: A dictionary of parsed links, keyed by ``rel`` or ``url``. def _parse_header_links(response): """ Parse the links from a Link: header field. .. todo:: Links with the same relation collide at the moment. :param bytes value: The header value. :rtype: `dict` :return: A dictionary of parsed links, keyed by ``rel`` or ``url``. """ values = response.headers.getRawHeaders(b'link', [b'']) value = b','.join(values).decode('ascii') with LOG_HTTP_PARSE_LINKS(raw_link=value) as action: links = {} replace_chars = u' \'"' for val in re.split(u', *<', value): try: url, params = val.split(u';', 1) except ValueError: url, params = val, u'' link = {} link[u'url'] = url.strip(u'<> \'"') for param in params.split(u';'): try: key, value = param.split(u'=') except ValueError: break link[key.strip(replace_chars)] = value.strip(replace_chars) links[link.get(u'rel') or link.get(u'url')] = link action.add_success_fields(parsed_links=links) return links
Make a client if we didn't get one. def _default_client(jws_client, reactor, key, alg): """ Make a client if we didn't get one. """ if jws_client is None: pool = HTTPConnectionPool(reactor) agent = Agent(reactor, pool=pool) jws_client = JWSClient(HTTPClient(agent=agent), key, alg) return jws_client
Find a challenge combination that consists of a single challenge that the responder can satisfy. :param ~acme.messages.AuthorizationResource auth: The authorization to examine. :type responder: List[`~txacme.interfaces.IResponder`] :param responder: The possible responders to use. :raises NoSupportedChallenges: When a suitable challenge combination is not found. :rtype: Tuple[`~txacme.interfaces.IResponder`, `~acme.messages.ChallengeBody`] :return: The responder and challenge that were found. def _find_supported_challenge(authzr, responders): """ Find a challenge combination that consists of a single challenge that the responder can satisfy. :param ~acme.messages.AuthorizationResource auth: The authorization to examine. :type responder: List[`~txacme.interfaces.IResponder`] :param responder: The possible responders to use. :raises NoSupportedChallenges: When a suitable challenge combination is not found. :rtype: Tuple[`~txacme.interfaces.IResponder`, `~acme.messages.ChallengeBody`] :return: The responder and challenge that were found. """ matches = [ (responder, challbs[0]) for challbs in authzr.body.resolved_combinations for responder in responders if [challb.typ for challb in challbs] == [responder.challenge_type]] if len(matches) == 0: raise NoSupportedChallenges(authzr) else: return matches[0]
Complete an authorization using a responder. :param ~acme.messages.AuthorizationResource auth: The authorization to complete. :param .Client client: The ACME client. :type responders: List[`~txacme.interfaces.IResponder`] :param responders: A list of responders that can be used to complete the challenge with. :return: A deferred firing when the authorization is verified. def answer_challenge(authzr, client, responders): """ Complete an authorization using a responder. :param ~acme.messages.AuthorizationResource auth: The authorization to complete. :param .Client client: The ACME client. :type responders: List[`~txacme.interfaces.IResponder`] :param responders: A list of responders that can be used to complete the challenge with. :return: A deferred firing when the authorization is verified. """ responder, challb = _find_supported_challenge(authzr, responders) response = challb.response(client.key) def _stop_responding(): return maybeDeferred( responder.stop_responding, authzr.body.identifier.value, challb.chall, response) return ( maybeDeferred( responder.start_responding, authzr.body.identifier.value, challb.chall, response) .addCallback(lambda _: client.answer_challenge(challb, response)) .addCallback(lambda _: _stop_responding) )
Poll an authorization until it is in a state other than pending or processing. :param ~acme.messages.AuthorizationResource auth: The authorization to complete. :param clock: The ``IReactorTime`` implementation to use; usually the reactor, when not testing. :param .Client client: The ACME client. :param float timeout: Maximum time to poll in seconds, before giving up. :raises txacme.client.AuthorizationFailed: if the authorization is no longer in the pending, processing, or valid states. :raises: ``twisted.internet.defer.CancelledError`` if the authorization was still in pending or processing state when the timeout was reached. :rtype: Deferred[`~acme.messages.AuthorizationResource`] :return: A deferred firing when the authorization has completed/failed; if the authorization is valid, the authorization resource will be returned. def poll_until_valid(authzr, clock, client, timeout=300.0): """ Poll an authorization until it is in a state other than pending or processing. :param ~acme.messages.AuthorizationResource auth: The authorization to complete. :param clock: The ``IReactorTime`` implementation to use; usually the reactor, when not testing. :param .Client client: The ACME client. :param float timeout: Maximum time to poll in seconds, before giving up. :raises txacme.client.AuthorizationFailed: if the authorization is no longer in the pending, processing, or valid states. :raises: ``twisted.internet.defer.CancelledError`` if the authorization was still in pending or processing state when the timeout was reached. :rtype: Deferred[`~acme.messages.AuthorizationResource`] :return: A deferred firing when the authorization has completed/failed; if the authorization is valid, the authorization resource will be returned. """ def repoll(result): authzr, retry_after = result if authzr.body.status in {STATUS_PENDING, STATUS_PROCESSING}: return ( deferLater(clock, retry_after, lambda: None) .addCallback(lambda _: client.poll(authzr)) .addCallback(repoll) ) if authzr.body.status != STATUS_VALID: raise AuthorizationFailed(authzr) return authzr def cancel_timeout(result): if timeout_call.active(): timeout_call.cancel() return result d = client.poll(authzr).addCallback(repoll) timeout_call = clock.callLater(timeout, d.cancel) d.addBoth(cancel_timeout) return d
Construct a client from an ACME directory at a given URL. :param url: The ``twisted.python.url.URL`` to fetch the directory from. See `txacme.urls` for constants for various well-known public directories. :param reactor: The Twisted reactor to use. :param ~josepy.jwk.JWK key: The client key to use. :param alg: The signing algorithm to use. Needs to be compatible with the type of key used. :param JWSClient jws_client: The underlying client to use, or ``None`` to construct one. :return: The constructed client. :rtype: Deferred[`Client`] def from_url(cls, reactor, url, key, alg=RS256, jws_client=None): """ Construct a client from an ACME directory at a given URL. :param url: The ``twisted.python.url.URL`` to fetch the directory from. See `txacme.urls` for constants for various well-known public directories. :param reactor: The Twisted reactor to use. :param ~josepy.jwk.JWK key: The client key to use. :param alg: The signing algorithm to use. Needs to be compatible with the type of key used. :param JWSClient jws_client: The underlying client to use, or ``None`` to construct one. :return: The constructed client. :rtype: Deferred[`Client`] """ action = LOG_ACME_CONSUME_DIRECTORY( url=url, key_type=key.typ, alg=alg.name) with action.context(): check_directory_url_type(url) jws_client = _default_client(jws_client, reactor, key, alg) return ( DeferredContext(jws_client.get(url.asText())) .addCallback(json_content) .addCallback(messages.Directory.from_json) .addCallback( tap(lambda d: action.add_success_fields(directory=d))) .addCallback(cls, reactor, key, jws_client) .addActionFinish())
Create a new registration with the ACME server. :param ~acme.messages.NewRegistration new_reg: The registration message to use, or ``None`` to construct one. :return: The registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`] def register(self, new_reg=None): """ Create a new registration with the ACME server. :param ~acme.messages.NewRegistration new_reg: The registration message to use, or ``None`` to construct one. :return: The registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`] """ if new_reg is None: new_reg = messages.NewRegistration() action = LOG_ACME_REGISTER(registration=new_reg) with action.context(): return ( DeferredContext( self.update_registration( new_reg, uri=self.directory[new_reg])) .addErrback(self._maybe_registered, new_reg) .addCallback( tap(lambda r: action.add_success_fields(registration=r))) .addActionFinish())
Get the Location: if there is one. def _maybe_location(cls, response, uri=None): """ Get the Location: if there is one. """ location = response.headers.getRawHeaders(b'location', [None])[0] if location is not None: return location.decode('ascii') return uri
If the registration already exists, we should just load it. def _maybe_registered(self, failure, new_reg): """ If the registration already exists, we should just load it. """ failure.trap(ServerError) response = failure.value.response if response.code == http.CONFLICT: reg = new_reg.update( resource=messages.UpdateRegistration.resource_type) uri = self._maybe_location(response) return self.update_registration(reg, uri=uri) return failure
Accept the terms-of-service for a registration. :param ~acme.messages.RegistrationResource regr: The registration to update. :return: The updated registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`] def agree_to_tos(self, regr): """ Accept the terms-of-service for a registration. :param ~acme.messages.RegistrationResource regr: The registration to update. :return: The updated registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`] """ return self.update_registration( regr.update( body=regr.body.update( agreement=regr.terms_of_service)))
Submit a registration to the server to update it. :param ~acme.messages.RegistrationResource regr: The registration to update. Can be a :class:`~acme.messages.NewRegistration` instead, in order to create a new registration. :param str uri: The url to submit to. Must be specified if a :class:`~acme.messages.NewRegistration` is provided. :return: The updated registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`] def update_registration(self, regr, uri=None): """ Submit a registration to the server to update it. :param ~acme.messages.RegistrationResource regr: The registration to update. Can be a :class:`~acme.messages.NewRegistration` instead, in order to create a new registration. :param str uri: The url to submit to. Must be specified if a :class:`~acme.messages.NewRegistration` is provided. :return: The updated registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`] """ if uri is None: uri = regr.uri if isinstance(regr, messages.RegistrationResource): message = messages.UpdateRegistration(**dict(regr.body)) else: message = regr action = LOG_ACME_UPDATE_REGISTRATION(uri=uri, registration=message) with action.context(): return ( DeferredContext(self._client.post(uri, message)) .addCallback(self._parse_regr_response, uri=uri) .addCallback(self._check_regr, regr) .addCallback( tap(lambda r: action.add_success_fields(registration=r))) .addActionFinish())
Parse a registration response from the server. def _parse_regr_response(self, response, uri=None, new_authzr_uri=None, terms_of_service=None): """ Parse a registration response from the server. """ links = _parse_header_links(response) if u'terms-of-service' in links: terms_of_service = links[u'terms-of-service'][u'url'] if u'next' in links: new_authzr_uri = links[u'next'][u'url'] if new_authzr_uri is None: raise errors.ClientError('"next" link missing') return ( response.json() .addCallback( lambda body: messages.RegistrationResource( body=messages.Registration.from_json(body), uri=self._maybe_location(response, uri=uri), new_authzr_uri=new_authzr_uri, terms_of_service=terms_of_service)) )
Check that a registration response contains the registration we were expecting. def _check_regr(self, regr, new_reg): """ Check that a registration response contains the registration we were expecting. """ body = getattr(new_reg, 'body', new_reg) for k, v in body.items(): if k == 'resource' or not v: continue if regr.body[k] != v: raise errors.UnexpectedUpdate(regr) if regr.body.key != self.key.public_key(): raise errors.UnexpectedUpdate(regr) return regr
Create a new authorization. :param ~acme.messages.Identifier identifier: The identifier to authorize. :return: The new authorization resource. :rtype: Deferred[`~acme.messages.AuthorizationResource`] def request_challenges(self, identifier): """ Create a new authorization. :param ~acme.messages.Identifier identifier: The identifier to authorize. :return: The new authorization resource. :rtype: Deferred[`~acme.messages.AuthorizationResource`] """ action = LOG_ACME_CREATE_AUTHORIZATION(identifier=identifier) with action.context(): message = messages.NewAuthorization(identifier=identifier) return ( DeferredContext( self._client.post(self.directory[message], message)) .addCallback(self._expect_response, http.CREATED) .addCallback(self._parse_authorization) .addCallback(self._check_authorization, identifier) .addCallback( tap(lambda a: action.add_success_fields(authorization=a))) .addActionFinish())
Ensure we got the expected response code. def _expect_response(cls, response, code): """ Ensure we got the expected response code. """ if response.code != code: raise errors.ClientError( 'Expected {!r} response but got {!r}'.format( code, response.code)) return response
Parse an authorization resource. def _parse_authorization(cls, response, uri=None): """ Parse an authorization resource. """ links = _parse_header_links(response) try: new_cert_uri = links[u'next'][u'url'] except KeyError: raise errors.ClientError('"next" link missing') return ( response.json() .addCallback( lambda body: messages.AuthorizationResource( body=messages.Authorization.from_json(body), uri=cls._maybe_location(response, uri=uri), new_cert_uri=new_cert_uri)) )
Check that the authorization we got is the one we expected. def _check_authorization(cls, authzr, identifier): """ Check that the authorization we got is the one we expected. """ if authzr.body.identifier != identifier: raise errors.UnexpectedUpdate(authzr) return authzr
Respond to an authorization challenge. :param ~acme.messages.ChallengeBody challenge_body: The challenge being responded to. :param ~acme.challenges.ChallengeResponse response: The response to the challenge. :return: The updated challenge resource. :rtype: Deferred[`~acme.messages.ChallengeResource`] def answer_challenge(self, challenge_body, response): """ Respond to an authorization challenge. :param ~acme.messages.ChallengeBody challenge_body: The challenge being responded to. :param ~acme.challenges.ChallengeResponse response: The response to the challenge. :return: The updated challenge resource. :rtype: Deferred[`~acme.messages.ChallengeResource`] """ action = LOG_ACME_ANSWER_CHALLENGE( challenge_body=challenge_body, response=response) with action.context(): return ( DeferredContext( self._client.post(challenge_body.uri, response)) .addCallback(self._parse_challenge) .addCallback(self._check_challenge, challenge_body) .addCallback( tap(lambda c: action.add_success_fields(challenge_resource=c))) .addActionFinish())
Parse a challenge resource. def _parse_challenge(cls, response): """ Parse a challenge resource. """ links = _parse_header_links(response) try: authzr_uri = links['up']['url'] except KeyError: raise errors.ClientError('"up" link missing') return ( response.json() .addCallback( lambda body: messages.ChallengeResource( authzr_uri=authzr_uri, body=messages.ChallengeBody.from_json(body))) )
Check that the challenge resource we got is the one we expected. def _check_challenge(cls, challenge, challenge_body): """ Check that the challenge resource we got is the one we expected. """ if challenge.uri != challenge_body.uri: raise errors.UnexpectedUpdate(challenge.uri) return challenge
Update an authorization from the server (usually to check its status). def poll(self, authzr): """ Update an authorization from the server (usually to check its status). """ action = LOG_ACME_POLL_AUTHORIZATION(authorization=authzr) with action.context(): return ( DeferredContext(self._client.get(authzr.uri)) # Spec says we should get 202 while pending, Boulder actually # sends us 200 always, so just don't check. # .addCallback(self._expect_response, http.ACCEPTED) .addCallback( lambda res: self._parse_authorization(res, uri=authzr.uri) .addCallback( self._check_authorization, authzr.body.identifier) .addCallback( lambda authzr: (authzr, self.retry_after(res, _now=self._clock.seconds))) ) .addCallback(tap( lambda a_r: action.add_success_fields( authorization=a_r[0], retry_after=a_r[1]))) .addActionFinish())
Parse the Retry-After value from a response. def retry_after(cls, response, default=5, _now=time.time): """ Parse the Retry-After value from a response. """ val = response.headers.getRawHeaders(b'retry-after', [default])[0] try: return int(val) except ValueError: return http.stringToDatetime(val) - _now()
Request a certificate. Authorizations should have already been completed for all of the names requested in the CSR. Note that unlike `acme.client.Client.request_issuance`, the certificate resource will have the body data as raw bytes. .. seealso:: `txacme.util.csr_for_names` .. todo:: Delayed issuance is not currently supported, the server must issue the requested certificate immediately. :param csr: A certificate request message: normally `txacme.messages.CertificateRequest` or `acme.messages.CertificateRequest`. :rtype: Deferred[`acme.messages.CertificateResource`] :return: The issued certificate. def request_issuance(self, csr): """ Request a certificate. Authorizations should have already been completed for all of the names requested in the CSR. Note that unlike `acme.client.Client.request_issuance`, the certificate resource will have the body data as raw bytes. .. seealso:: `txacme.util.csr_for_names` .. todo:: Delayed issuance is not currently supported, the server must issue the requested certificate immediately. :param csr: A certificate request message: normally `txacme.messages.CertificateRequest` or `acme.messages.CertificateRequest`. :rtype: Deferred[`acme.messages.CertificateResource`] :return: The issued certificate. """ action = LOG_ACME_REQUEST_CERTIFICATE() with action.context(): return ( DeferredContext( self._client.post( self.directory[csr], csr, content_type=DER_CONTENT_TYPE, headers=Headers({b'Accept': [DER_CONTENT_TYPE]}))) .addCallback(self._expect_response, http.CREATED) .addCallback(self._parse_certificate) .addActionFinish())
Parse a response containing a certificate resource. def _parse_certificate(cls, response): """ Parse a response containing a certificate resource. """ links = _parse_header_links(response) try: cert_chain_uri = links[u'up'][u'url'] except KeyError: cert_chain_uri = None return ( response.content() .addCallback( lambda body: messages.CertificateResource( uri=cls._maybe_location(response), cert_chain_uri=cert_chain_uri, body=body)) )
Fetch the intermediary chain for a certificate. :param acme.messages.CertificateResource certr: The certificate to fetch the chain for. :param int max_length: The maximum length of the chain that will be fetched. :rtype: Deferred[List[`acme.messages.CertificateResource`]] :return: The issuer certificate chain, ordered with the trust anchor last. def fetch_chain(self, certr, max_length=10): """ Fetch the intermediary chain for a certificate. :param acme.messages.CertificateResource certr: The certificate to fetch the chain for. :param int max_length: The maximum length of the chain that will be fetched. :rtype: Deferred[List[`acme.messages.CertificateResource`]] :return: The issuer certificate chain, ordered with the trust anchor last. """ action = LOG_ACME_FETCH_CHAIN() with action.context(): if certr.cert_chain_uri is None: return succeed([]) elif max_length < 1: raise errors.ClientError('chain too long') return ( DeferredContext( self._client.get( certr.cert_chain_uri, content_type=DER_CONTENT_TYPE, headers=Headers({b'Accept': [DER_CONTENT_TYPE]}))) .addCallback(self._parse_certificate) .addCallback( lambda issuer: self.fetch_chain(issuer, max_length=max_length - 1) .addCallback(lambda chain: [issuer] + chain)) .addActionFinish())
Wrap ``JSONDeSerializable`` object in JWS. .. todo:: Implement ``acmePath``. :param ~josepy.interfaces.JSONDeSerializable obj: :param bytes nonce: :rtype: `bytes` :return: JSON-encoded data def _wrap_in_jws(self, nonce, obj): """ Wrap ``JSONDeSerializable`` object in JWS. .. todo:: Implement ``acmePath``. :param ~josepy.interfaces.JSONDeSerializable obj: :param bytes nonce: :rtype: `bytes` :return: JSON-encoded data """ with LOG_JWS_SIGN(key_type=self._key.typ, alg=self._alg.name, nonce=nonce): jobj = obj.json_dumps().encode() return ( JWS.sign( payload=jobj, key=self._key, alg=self._alg, nonce=nonce) .json_dumps() .encode())
Check response content and its type. .. note:: Unlike :mod:`acme.client`, checking is strict. :param bytes content_type: Expected Content-Type response header. If the response Content-Type does not match, :exc:`ClientError` is raised. :raises .ServerError: If server response body carries HTTP Problem (draft-ietf-appsawg-http-problem-00). :raises ~acme.errors.ClientError: In case of other networking errors. def _check_response(cls, response, content_type=JSON_CONTENT_TYPE): """ Check response content and its type. .. note:: Unlike :mod:`acme.client`, checking is strict. :param bytes content_type: Expected Content-Type response header. If the response Content-Type does not match, :exc:`ClientError` is raised. :raises .ServerError: If server response body carries HTTP Problem (draft-ietf-appsawg-http-problem-00). :raises ~acme.errors.ClientError: In case of other networking errors. """ def _got_failure(f): f.trap(ValueError) return None def _got_json(jobj): if 400 <= response.code < 600: if response_ct == JSON_ERROR_CONTENT_TYPE and jobj is not None: raise ServerError( messages.Error.from_json(jobj), response) else: # response is not JSON object raise errors.ClientError(response) elif response_ct != content_type: raise errors.ClientError( 'Unexpected response Content-Type: {0!r}'.format( response_ct)) elif content_type == JSON_CONTENT_TYPE and jobj is None: raise errors.ClientError(response) return response response_ct = response.headers.getRawHeaders( b'Content-Type', [None])[0] action = LOG_JWS_CHECK_RESPONSE( expected_content_type=content_type, response_content_type=response_ct) with action.context(): # TODO: response.json() is called twice, once here, and # once in _get and _post clients return ( DeferredContext(response.json()) .addErrback(_got_failure) .addCallback(_got_json) .addActionFinish())
Send HTTP request. :param str method: The HTTP method to use. :param str url: The URL to make the request to. :return: Deferred firing with the HTTP response. def _send_request(self, method, url, *args, **kwargs): """ Send HTTP request. :param str method: The HTTP method to use. :param str url: The URL to make the request to. :return: Deferred firing with the HTTP response. """ action = LOG_JWS_REQUEST(url=url) with action.context(): headers = kwargs.setdefault('headers', Headers()) headers.setRawHeaders(b'user-agent', [self._user_agent]) kwargs.setdefault('timeout', self.timeout) return ( DeferredContext( self._treq.request(method, url, *args, **kwargs)) .addCallback( tap(lambda r: action.add_success_fields( code=r.code, content_type=r.headers.getRawHeaders( b'content-type', [None])[0]))) .addActionFinish())
Send HEAD request without checking the response. Note that ``_check_response`` is not called, as there will be no response body to check. :param str url: The URL to make the request to. def head(self, url, *args, **kwargs): """ Send HEAD request without checking the response. Note that ``_check_response`` is not called, as there will be no response body to check. :param str url: The URL to make the request to. """ with LOG_JWS_HEAD().context(): return DeferredContext( self._send_request(u'HEAD', url, *args, **kwargs) ).addActionFinish()
Send GET request and check response. :param str method: The HTTP method to use. :param str url: The URL to make the request to. :raises txacme.client.ServerError: If server response body carries HTTP Problem (draft-ietf-appsawg-http-problem-00). :raises acme.errors.ClientError: In case of other protocol errors. :return: Deferred firing with the checked HTTP response. def get(self, url, content_type=JSON_CONTENT_TYPE, **kwargs): """ Send GET request and check response. :param str method: The HTTP method to use. :param str url: The URL to make the request to. :raises txacme.client.ServerError: If server response body carries HTTP Problem (draft-ietf-appsawg-http-problem-00). :raises acme.errors.ClientError: In case of other protocol errors. :return: Deferred firing with the checked HTTP response. """ with LOG_JWS_GET().context(): return ( DeferredContext(self._send_request(u'GET', url, **kwargs)) .addCallback(self._check_response, content_type=content_type) .addActionFinish())
Store a nonce from a response we received. :param twisted.web.iweb.IResponse response: The HTTP response. :return: The response, unmodified. def _add_nonce(self, response): """ Store a nonce from a response we received. :param twisted.web.iweb.IResponse response: The HTTP response. :return: The response, unmodified. """ nonce = response.headers.getRawHeaders( REPLAY_NONCE_HEADER, [None])[0] with LOG_JWS_ADD_NONCE(raw_nonce=nonce) as action: if nonce is None: raise errors.MissingNonce(response) else: try: decoded_nonce = Header._fields['nonce'].decode( nonce.decode('ascii') ) action.add_success_fields(nonce=decoded_nonce) except DeserializationError as error: raise errors.BadNonce(nonce, error) self._nonces.add(decoded_nonce) return response
Get a nonce to use in a request, removing it from the nonces on hand. def _get_nonce(self, url): """ Get a nonce to use in a request, removing it from the nonces on hand. """ action = LOG_JWS_GET_NONCE() if len(self._nonces) > 0: with action: nonce = self._nonces.pop() action.add_success_fields(nonce=nonce) return succeed(nonce) else: with action.context(): return ( DeferredContext(self.head(url)) .addCallback(self._add_nonce) .addCallback(lambda _: self._nonces.pop()) .addCallback(tap( lambda nonce: action.add_success_fields(nonce=nonce))) .addActionFinish())
POST an object and check the response. :param str url: The URL to request. :param ~josepy.interfaces.JSONDeSerializable obj: The serializable payload of the request. :param bytes content_type: The expected content type of the response. :raises txacme.client.ServerError: If server response body carries HTTP Problem (draft-ietf-appsawg-http-problem-00). :raises acme.errors.ClientError: In case of other protocol errors. def _post(self, url, obj, content_type, **kwargs): """ POST an object and check the response. :param str url: The URL to request. :param ~josepy.interfaces.JSONDeSerializable obj: The serializable payload of the request. :param bytes content_type: The expected content type of the response. :raises txacme.client.ServerError: If server response body carries HTTP Problem (draft-ietf-appsawg-http-problem-00). :raises acme.errors.ClientError: In case of other protocol errors. """ with LOG_JWS_POST().context(): headers = kwargs.setdefault('headers', Headers()) headers.setRawHeaders(b'content-type', [JSON_CONTENT_TYPE]) return ( DeferredContext(self._get_nonce(url)) .addCallback(self._wrap_in_jws, obj) .addCallback( lambda data: self._send_request( u'POST', url, data=data, **kwargs)) .addCallback(self._add_nonce) .addCallback(self._check_response, content_type=content_type) .addActionFinish())
POST an object and check the response. Retry once if a badNonce error is received. :param str url: The URL to request. :param ~josepy.interfaces.JSONDeSerializable obj: The serializable payload of the request. :param bytes content_type: The expected content type of the response. By default, JSON. :raises txacme.client.ServerError: If server response body carries HTTP Problem (draft-ietf-appsawg-http-problem-00). :raises acme.errors.ClientError: In case of other protocol errors. def post(self, url, obj, content_type=JSON_CONTENT_TYPE, **kwargs): """ POST an object and check the response. Retry once if a badNonce error is received. :param str url: The URL to request. :param ~josepy.interfaces.JSONDeSerializable obj: The serializable payload of the request. :param bytes content_type: The expected content type of the response. By default, JSON. :raises txacme.client.ServerError: If server response body carries HTTP Problem (draft-ietf-appsawg-http-problem-00). :raises acme.errors.ClientError: In case of other protocol errors. """ def retry_bad_nonce(f): f.trap(ServerError) # The current RFC draft defines the namespace as # urn:ietf:params:acme:error:<code>, but earlier drafts (and some # current implementations) use urn:acme:error:<code> instead. We # don't really care about the namespace here, just the error code. if f.value.message.typ.split(':')[-1] == 'badNonce': # If one nonce is bad, others likely are too. Let's clear them # and re-add the one we just got. self._nonces.clear() self._add_nonce(f.value.response) return self._post(url, obj, content_type, **kwargs) return f return ( self._post(url, obj, content_type, **kwargs) .addErrback(retry_bad_nonce))
Create a `threading.Thread`, but always set ``daemon``. def _daemon_thread(*a, **kw): """ Create a `threading.Thread`, but always set ``daemon``. """ thread = Thread(*a, **kw) thread.daemon = True return thread
Run a task in a worker, delivering the result as a ``Deferred`` in the reactor thread. def _defer_to_worker(deliver, worker, work, *args, **kwargs): """ Run a task in a worker, delivering the result as a ``Deferred`` in the reactor thread. """ deferred = Deferred() def wrapped_work(): try: result = work(*args, **kwargs) except BaseException: f = Failure() deliver(lambda: deferred.errback(f)) else: deliver(lambda: deferred.callback(result)) worker.do(wrapped_work) return deferred
Split the zone portion off from a DNS label. :param str server_name: The full DNS label. :param str zone_name: The zone name suffix. def _split_zone(server_name, zone_name): """ Split the zone portion off from a DNS label. :param str server_name: The full DNS label. :param str zone_name: The zone name suffix. """ server_name = server_name.rstrip(u'.') zone_name = zone_name.rstrip(u'.') if not (server_name == zone_name or server_name.endswith(u'.' + zone_name)): raise NotInZone(server_name=server_name, zone_name=zone_name) return server_name[:-len(zone_name)].rstrip(u'.')