index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
708,818
sweetviz.dataframe_report
get_target_type
null
def get_target_type(self) -> FeatureType: if self._target is None: return None return self._target["type"]
(self) -> sweetviz.sv_types.FeatureType
708,819
sweetviz.dataframe_report
get_type
null
def get_type(self, feature_name: str) -> FeatureType: if self._features.get(feature_name) is None: if self._target["name"] == feature_name: return self._target["type"] else: return None return self._features[feature_name].get("type")
(self, feature_name: str) -> sweetviz.sv_types.FeatureType
708,820
sweetviz.dataframe_report
get_what_influences_me
null
def get_what_influences_me(self, feature_name: str) -> dict: influenced = dict() for cur_name, cur_associations in self._associations.items(): if cur_name == feature_name: continue influence = cur_associations.get(feature_name) if influence is not None: influenced[cur_name] = influence return influenced
(self, feature_name: str) -> dict
708,821
sweetviz.dataframe_report
log_comet
null
def log_comet(self, experiment: 'comet_ml_logger.Experiment'): self.generate_comet_friendly_html() try: experiment.log_html(self._page_html) except: print("log_comet(): error logging HTML report.")
(self, experiment: 'comet_ml_logger.Experiment')
708,822
sweetviz.dataframe_report
process_associations
null
def process_associations(self, features_to_process: List[FeatureToProcess], source_target_series, compare_target_series): def mirror_association(association_dict, feature_name, other_name, value): if other_name not in association_dict.keys(): association_dict[other_name] = dict() other_dict = association_dict[other_name] if feature_name not in other_dict.keys(): other_dict[feature_name] = value for feature in features_to_process: feature_name = feature.source.name if feature_name not in self._associations.keys(): self._associations[feature_name] = dict() cur_associations = self._associations[feature_name] if feature.compare is not None: if feature_name not in self._associations_compare.keys(): self._associations_compare[feature_name] = dict() cur_associations_compare = self._associations_compare[feature_name] else: cur_associations_compare = None for other in features_to_process: # for other in [of for of in features_to_process if of.source.name != feature_name]: process_compare = cur_associations_compare is not None and other.compare is not None # if other.source.name in cur_associations.keys(): # print(f"Skipping {feature_name} {other.source.name}") # continue if other.source.name == feature_name: cur_associations[other.source.name] = 0.0 mirror_association(self._associations, feature_name, other.source.name, 0.0) if process_compare: cur_associations_compare[other.source.name] = 0.0 mirror_association(self._associations_compare, feature_name, other.source.name, 0.0) continue if self[feature_name]["type"] == FeatureType.TYPE_CAT or \ self[feature_name]["type"] == FeatureType.TYPE_BOOL: # CAT/BOOL source # ------------------------------------ if self[other.source.name]["type"] == FeatureType.TYPE_CAT or \ self[other.source.name]["type"] == FeatureType.TYPE_BOOL: # CAT-CAT cur_associations[other.source.name] = \ associations.theils_u(feature.source, other.source) if process_compare: cur_associations_compare[other.source.name] = \ associations.theils_u(feature.compare, other.compare) elif self[other.source.name]["type"] == FeatureType.TYPE_NUM: # CAT-NUM # This handles cat-num, then mirrors so no need to process num-cat separately # (symmetrical relationship) cur_associations[other.source.name] = \ associations.correlation_ratio(feature.source, other.source) mirror_association(self._associations, feature_name, other.source.name, \ cur_associations[other.source.name]) if process_compare: cur_associations_compare[other.source.name] = \ associations.correlation_ratio(feature.compare, other.compare) mirror_association(self._associations_compare, feature_name, other.source.name, \ cur_associations_compare[other.source.name]) elif self[feature_name]["type"] == FeatureType.TYPE_NUM: # NUM source # ------------------------------------ if self[other.source.name]["type"] == FeatureType.TYPE_NUM: # NUM-NUM try: cur_associations[other.source.name] = \ feature.source.corr(other.source, method='pearson') except FloatingPointError: # This usually happens when there is only 1 non-NaN value in each data series # Assigning the value 1.0 as per # https://stats.stackexchange.com/questions/94150/why-is-the-pearson-correlation-1-when-only-two-data-values-are-available # -> Also showing a warning cur_associations[other.source.name] = 1.0 self.corr_warning.append(feature_name + "/" + other.source.name) # TODO: display correlation error better in graph! if isnan(cur_associations[other.source.name]): if feature.source.equals(other.source): cur_associations[other.source.name] = CORRELATION_IDENTICAL else: # ERROR may occur if Nan's in one match values in other, and vice-versa cur_associations[other.source.name] = CORRELATION_ERROR mirror_association(self._associations, feature_name, other.source.name, \ cur_associations[other.source.name]) if process_compare: cur_associations_compare[other.source.name] = \ feature.compare.corr(other.compare, method='pearson') # TODO: display correlation error better in graph! if isnan(cur_associations_compare[other.source.name]): if feature.compare.equals(other.compare): cur_associations_compare[other.source.name] = CORRELATION_IDENTICAL else: # ERROR may occur if Nan's in one match values in other, and vice-versa cur_associations_compare[other.source.name] = CORRELATION_ERROR mirror_association(self._associations_compare, feature_name, other.source.name, \ cur_associations_compare[other.source.name]) self.progress_bar.update(1)
(self, features_to_process: List[sweetviz.sv_types.FeatureToProcess], source_target_series, compare_target_series)
708,823
sweetviz.dataframe_report
sanitize_bool
null
@staticmethod def sanitize_bool(value) -> bool: if value is bool: return value elif isinstance(value, str): return value.lower() in ['true', '1', 't', 'y', 'yes', '1.0'] elif isinstance(value, float) or isinstance(value, int): return bool(value) return False
(value) -> bool
708,824
sweetviz.dataframe_report
show_html
null
def show_html(self, filepath='SWEETVIZ_REPORT.html', open_browser=True, layout='widescreen', scale=None): scale = float(self.use_config_if_none(scale, "html_scale")) layout = self.use_config_if_none(layout, "html_layout") if layout not in ['widescreen', 'vertical']: raise ValueError(f"'layout' parameter must be either 'widescreen' or 'vertical'") sv_html.load_layout_globals_from_config() self.page_layout = layout self.scale = scale sv_html.set_summary_positions(self) sv_html.generate_html_detail(self) if self.associations_html_source: self.associations_html_source = sv_html.generate_html_associations(self, "source") if self.associations_html_compare: self.associations_html_compare = sv_html.generate_html_associations(self, "compare") self._page_html = sv_html.generate_html_dataframe_page(self) f = open(filepath, 'w', encoding="utf-8") f.write(self._page_html) f.close() if open_browser: self.verbose_print(f"Report {filepath} was generated! NOTEBOOK/COLAB USERS: the web browser MAY not pop up, regardless, the report IS saved in your notebook/colab files.") # Not sure how to work around this: not fatal but annoying...Notebook/colab # https://bugs.python.org/issue5993 webbrowser.open('file://' + os.path.realpath(filepath)) else: self.verbose_print(f"Report {filepath} was generated.") if len(self.corr_warning): print("---\nWARNING: one or more correlations had an edge-case/error and a 1.0 correlation was assigned\n" "(likely due to only having a single row, containing non-NaN values for both correlated features)\n" "Affected correlations:" + str(self.corr_warning)) # Auto-log to comet_ml if desired & present self._comet_ml_logger = comet_ml_logger.CometLogger() if self._comet_ml_logger._logging: self.generate_comet_friendly_html() self._comet_ml_logger.log_html(self._page_html) self._comet_ml_logger.end()
(self, filepath='SWEETVIZ_REPORT.html', open_browser=True, layout='widescreen', scale=None)
708,825
sweetviz.dataframe_report
show_notebook
null
def show_notebook(self, w=None, h=None, scale=None, layout=None, filepath=None, file_layout=None, file_scale=None): w = self.use_config_if_none(w, "notebook_width") h = self.use_config_if_none(h, "notebook_height") scale = float(self.use_config_if_none(scale, "notebook_scale")) layout = self.use_config_if_none(layout, "notebook_layout") if layout not in ['widescreen', 'vertical']: raise ValueError(f"'layout' parameter must be either 'widescreen' or 'vertical'") sv_html.load_layout_globals_from_config() self.page_layout = layout self.scale = scale sv_html.set_summary_positions(self) sv_html.generate_html_detail(self) if self.associations_html_source: self.associations_html_source = sv_html.generate_html_associations(self, "source") if self.associations_html_compare: self.associations_html_compare = sv_html.generate_html_associations(self, "compare") self._page_html = sv_html.generate_html_dataframe_page(self) width=w height=h if str(height).lower() == "full": height = self.page_height # Output to iFrame import html self._page_html = html.escape(self._page_html) iframe = f' <iframe width="{width}" height="{height}" srcdoc="{self._page_html}" frameborder="0" allowfullscreen></iframe>' from IPython.display import display from IPython.display import HTML display(HTML(iframe)) if filepath is not None: # We cannot just write out the same HTML as the notebook, as that one has been processed so as to # remove extraneous headings so it is nicely inserted into the notebook. # Instead, just do something similar to the "show_html()" code, but without its less-relevant printouts etc. # f = open(filepath, 'w', encoding="utf-8") # f.write(self._page_html) # f.close() scale = float(self.use_config_if_none(file_scale, "html_scale")) layout = self.use_config_if_none(file_layout, "html_layout") if layout not in ['widescreen', 'vertical']: raise ValueError(f"'layout' parameter for file output must be either 'widescreen' or 'vertical'") sv_html.load_layout_globals_from_config() self.page_layout = layout self.scale = scale sv_html.set_summary_positions(self) sv_html.generate_html_detail(self) if self.associations_html_source: self.associations_html_source = sv_html.generate_html_associations(self, "source") if self.associations_html_compare: self.associations_html_compare = sv_html.generate_html_associations(self, "compare") self._page_html = sv_html.generate_html_dataframe_page(self) f = open(filepath, 'w', encoding="utf-8") f.write(self._page_html) f.close() self.verbose_print(f"Report '{filepath}' was saved to storage.") if len(self.corr_warning): print("WARNING: one or more correlations had an edge-case/error and a 1.0 correlation was assigned\n" "(likely due to only a single row containing non-NaN values for both correlated features)\n" "Affected correlations:" + str(self.corr_warning)) # Auto-log to comet_ml if desired & present self._comet_ml_logger = comet_ml_logger.CometLogger() if self._comet_ml_logger._logging: self.generate_comet_friendly_html() self._comet_ml_logger.log_html(self._page_html) self._comet_ml_logger.end()
(self, w=None, h=None, scale=None, layout=None, filepath=None, file_layout=None, file_scale=None)
708,826
sweetviz.dataframe_report
summarize_category_types
null
def summarize_category_types(self, this_df: pd.DataFrame, dest_dict: dict, skip: List[str], \ source_target_dict): dest_dict["num_cat"] = len([x for x in self._features.values() if (x["type"] == FeatureType.TYPE_CAT or x["type"] == FeatureType.TYPE_BOOL) and x["name"] not in skip and x["name"] in this_df]) dest_dict["num_numerical"] = len([x for x in self._features.values() if x["type"] == FeatureType.TYPE_NUM and x["name"] not in skip \ and x["name"] in this_df]) dest_dict["num_text"] = len([x for x in self._features.values() if x["type"] == FeatureType.TYPE_TEXT and x["name"] not in skip \ and x["name"] in this_df]) if source_target_dict is not None and source_target_dict["name"] in this_df: if source_target_dict["type"] == FeatureType.TYPE_NUM: dest_dict["num_numerical"] = dest_dict["num_numerical"] + 1 elif source_target_dict["type"] == FeatureType.TYPE_CAT or source_target_dict["type"] == FeatureType.TYPE_BOOL: dest_dict["num_cat"] = dest_dict["num_cat"] + 1 return
(self, this_df: pandas.core.frame.DataFrame, dest_dict: dict, skip: List[str], source_target_dict)
708,827
sweetviz.dataframe_report
summarize_dataframe
null
def summarize_dataframe(self, source: pd.DataFrame, name: str, target_dict: dict, skip: List[str]): target_dict["name"] = name target_dict["num_rows"] = len(source) target_dict["num_columns"] = len(source.columns) target_dict["num_skipped_columns"] = len(source.columns) - len([x for x in source.columns if x not in skip]) target_dict["memory_total"] = source.memory_usage(index=True, deep=True).sum() if target_dict["num_rows"] > 0: target_dict["memory_single_row"] = \ float(target_dict["memory_total"]) / target_dict["num_rows"] else: target_dict["memory_single_row"] = 0 target_dict["duplicates"] = NumWithPercent(sum(source.duplicated()), len(source)) target_dict["num_cmp_not_in_source"] = 0 # set later, as needed
(self, source: pandas.core.frame.DataFrame, name: str, target_dict: dict, skip: List[str])
708,828
sweetviz.dataframe_report
use_config_if_none
null
def use_config_if_none(self, passed_value, config_name): if passed_value is None: return config["Output_Defaults"][config_name] return passed_value
(self, passed_value, config_name)
708,829
sweetviz.dataframe_report
verbose_print
null
def verbose_print(self, *args, **kwargs): if self.verbosity_level == "full": print(*args, **kwargs)
(self, *args, **kwargs)
708,830
sweetviz.feature_config
FeatureConfig
null
class FeatureConfig: def __init__(self, skip: Tuple = None, force_cat: Tuple = None, force_text: Tuple = None, force_num: Tuple = None): def make_list(param): if type(param) == list or type(param) == tuple: return param elif type(param) == str: return [param] elif param is None: return list() raise ValueError("Invalid value passed in for FeatureConfig") # NEW (12-14-2020): rename "index" features def rename_index(list_of_feature_names): return [x if x != "index" else "df_index" for x in list_of_feature_names] self.skip = rename_index(make_list(skip)) self.force_cat = rename_index(make_list(force_cat)) self.force_text = rename_index(make_list(force_text)) self.force_num = rename_index(make_list(force_num)) def get_predetermined_type(self, feature_name: str): if feature_name in self.skip: return FeatureType.TYPE_SKIPPED elif feature_name in self.force_cat: return FeatureType.TYPE_CAT elif feature_name in self.force_text: return FeatureType.TYPE_TEXT elif feature_name in self.force_num: return FeatureType.TYPE_NUM else: return FeatureType.TYPE_UNKNOWN def get_all_mentioned_features(self): returned = list() returned.extend(self.skip) returned.extend(self.force_cat) returned.extend(self.force_text) returned.extend(self.force_num) return returned
(skip: Tuple = None, force_cat: Tuple = None, force_text: Tuple = None, force_num: Tuple = None)
708,831
sweetviz.feature_config
__init__
null
def __init__(self, skip: Tuple = None, force_cat: Tuple = None, force_text: Tuple = None, force_num: Tuple = None): def make_list(param): if type(param) == list or type(param) == tuple: return param elif type(param) == str: return [param] elif param is None: return list() raise ValueError("Invalid value passed in for FeatureConfig") # NEW (12-14-2020): rename "index" features def rename_index(list_of_feature_names): return [x if x != "index" else "df_index" for x in list_of_feature_names] self.skip = rename_index(make_list(skip)) self.force_cat = rename_index(make_list(force_cat)) self.force_text = rename_index(make_list(force_text)) self.force_num = rename_index(make_list(force_num))
(self, skip: Optional[Tuple] = None, force_cat: Optional[Tuple] = None, force_text: Optional[Tuple] = None, force_num: Optional[Tuple] = None)
708,832
sweetviz.feature_config
get_all_mentioned_features
null
def get_all_mentioned_features(self): returned = list() returned.extend(self.skip) returned.extend(self.force_cat) returned.extend(self.force_text) returned.extend(self.force_num) return returned
(self)
708,833
sweetviz.feature_config
get_predetermined_type
null
def get_predetermined_type(self, feature_name: str): if feature_name in self.skip: return FeatureType.TYPE_SKIPPED elif feature_name in self.force_cat: return FeatureType.TYPE_CAT elif feature_name in self.force_text: return FeatureType.TYPE_TEXT elif feature_name in self.force_num: return FeatureType.TYPE_NUM else: return FeatureType.TYPE_UNKNOWN
(self, feature_name: str)
708,834
sweetviz.sv_public
analyze
null
def analyze(source: Union[pd.DataFrame, Tuple[pd.DataFrame, str]], target_feat: str = None, feat_cfg: FeatureConfig = None, pairwise_analysis: str = 'auto'): report = sweetviz.DataframeReport(source, target_feat, None, pairwise_analysis, feat_cfg) return report
(source: Union[pandas.core.frame.DataFrame, Tuple[pandas.core.frame.DataFrame, str]], target_feat: Optional[str] = None, feat_cfg: Optional[sweetviz.feature_config.FeatureConfig] = None, pairwise_analysis: str = 'auto')
708,836
sweetviz.sv_public
compare
null
def compare(source: Union[pd.DataFrame, Tuple[pd.DataFrame, str]], compare: Union[pd.DataFrame, Tuple[pd.DataFrame, str]], target_feat: str = None, feat_cfg: FeatureConfig = None, pairwise_analysis: str = 'auto'): report = sweetviz.DataframeReport(source, target_feat, compare, pairwise_analysis, feat_cfg) return report
(source: Union[pandas.core.frame.DataFrame, Tuple[pandas.core.frame.DataFrame, str]], compare: Union[pandas.core.frame.DataFrame, Tuple[pandas.core.frame.DataFrame, str]], target_feat: Optional[str] = None, feat_cfg: Optional[sweetviz.feature_config.FeatureConfig] = None, pairwise_analysis: str = 'auto')
708,837
sweetviz.sv_public
compare_intra
null
def compare_intra(source_df: pd.DataFrame, condition_series: pd.Series, names: Tuple[str, str], target_feat: str = None, feat_cfg: FeatureConfig = None, pairwise_analysis: str = 'auto'): if len(source_df) != len(condition_series): raise ValueError('compare_intra() expects source_df and ' 'condition_series to be the same length') if condition_series.dtypes != bool: raise ValueError('compare_intra() requires condition_series ' 'to be boolean length') data_true = source_df[condition_series] data_false = source_df[condition_series == False] if len(data_false) == 0: raise ValueError('compare_intra(): FALSE dataset is empty, nothing to compare!') if len(data_true) == 0: raise ValueError('compare_intra(): TRUE dataset is empty, nothing to compare!') report = sweetviz.DataframeReport([data_true, names[0]], target_feat, [data_false, names[1]], pairwise_analysis, feat_cfg) return report
(source_df: pandas.core.frame.DataFrame, condition_series: pandas.core.series.Series, names: Tuple[str, str], target_feat: Optional[str] = None, feat_cfg: Optional[sweetviz.feature_config.FeatureConfig] = None, pairwise_analysis: str = 'auto')
708,860
pypi_simple.errors
DigestMismatchError
Raised by `PyPISimple.download_package()` and `PyPISimple.get_package_metadata()` with ``verify=True`` when the digest of the downloaded data does not match the expected value
class DigestMismatchError(ValueError): """ Raised by `PyPISimple.download_package()` and `PyPISimple.get_package_metadata()` with ``verify=True`` when the digest of the downloaded data does not match the expected value """ def __init__( self, algorithm: str, expected_digest: str, actual_digest: str ) -> None: #: The name of the digest algorithm used self.algorithm = algorithm #: The expected digest self.expected_digest = expected_digest #: The digest of the data that was actually received self.actual_digest = actual_digest def __str__(self) -> str: return ( f"{self.algorithm} digest of downloaded data is" f" {self.actual_digest!r} instead of expected {self.expected_digest!r}" )
(algorithm: str, expected_digest: str, actual_digest: str) -> None
708,861
pypi_simple.errors
__init__
null
def __init__( self, algorithm: str, expected_digest: str, actual_digest: str ) -> None: #: The name of the digest algorithm used self.algorithm = algorithm #: The expected digest self.expected_digest = expected_digest #: The digest of the data that was actually received self.actual_digest = actual_digest
(self, algorithm: str, expected_digest: str, actual_digest: str) -> NoneType
708,862
pypi_simple.errors
__str__
null
def __str__(self) -> str: return ( f"{self.algorithm} digest of downloaded data is" f" {self.actual_digest!r} instead of expected {self.expected_digest!r}" )
(self) -> str
708,863
pypi_simple.classes
DistributionPackage
Information about a versioned archive file from which a Python project release can be installed .. versionchanged:: 1.0.0 ``yanked`` field replaced with `is_yanked` and `yanked_reason`
class DistributionPackage: """ Information about a versioned archive file from which a Python project release can be installed .. versionchanged:: 1.0.0 ``yanked`` field replaced with `is_yanked` and `yanked_reason` """ #: The basename of the package file filename: str #: The URL from which the package file can be downloaded, with any hash #: digest fragment removed url: str #: The name of the project (as extracted from the filename), or `None` if #: the filename cannot be parsed project: Optional[str] #: The project version (as extracted from the filename), or `None` if the #: filename cannot be parsed version: Optional[str] #: The type of the package, or `None` if the filename cannot be parsed. #: The recognized package types are: #: #: - ``'dumb'`` #: - ``'egg'`` #: - ``'msi'`` #: - ``'rpm'`` #: - ``'sdist'`` #: - ``'wheel'`` #: - ``'wininst'`` package_type: Optional[str] #: A collection of hash digests for the file as a `dict` mapping hash #: algorithm names to hex-encoded digest strings digests: dict[str, str] #: An optional version specifier string declaring the Python version(s) in #: which the package can be installed requires_python: Optional[str] #: Whether the package file is accompanied by a PGP signature file. This #: is `None` if the package repository does not report such information. has_sig: Optional[bool] #: Whether the package file has been "yanked" from the package repository #: (meaning that it should only be installed when that specific version is #: requested) is_yanked: bool = False #: If the package file has been "yanked" and a reason is given, this #: attribute will contain that (possibly empty) reason yanked_reason: Optional[str] = None #: Whether the package file is accompanied by a Core Metadata file. This #: is `None` if the package repository does not report such information. has_metadata: Optional[bool] = None #: If the package repository provides a Core Metadata file for the package, #: this is a (possibly empty) `dict` of digests of the file, given as a #: mapping from hash algorithm names to hex-encoded digest strings; #: otherwise, it is `None` metadata_digests: Optional[dict[str, str]] = None #: .. versionadded:: 1.1.0 #: #: The size of the package file in bytes, or `None` if not specified #: [#pep700]_. size: Optional[int] = None #: .. versionadded:: 1.1.0 #: #: The time at which the package file was uploaded to the server, or `None` #: if not specified [#pep700]_. upload_time: Optional[datetime] = None @property def sig_url(self) -> str: """ The URL of the package file's PGP signature file, if it exists; cf. `has_sig` """ u = urlparse(self.url) return urlunparse((u[0], u[1], u[2] + ".asc", "", "", "")) @property def metadata_url(self) -> str: """ The URL of the package file's Core Metadata file, if it exists; cf. `has_metadata` """ u = urlparse(self.url) return urlunparse((u[0], u[1], u[2] + ".metadata", "", "", "")) @classmethod def from_link( cls, link: Link, project_hint: Optional[str] = None ) -> DistributionPackage: """ Construct a `DistributionPackage` from a `Link` on a project page. :param Link link: a link parsed from a project page :param Optional[str] project_hint: Optionally, the expected value for the project name (usually the name of the project page on which the link was found). The name does not need to be normalized. :rtype: DistributionPackage """ try: project, version, pkg_type = parse_filename(link.text, project_hint) except UnparsableFilenameError: project = None version = None pkg_type = None urlbits = urlparse(link.url) dgst_name, _, dgst_value = urlbits.fragment.partition("=") digests = {dgst_name: dgst_value} if dgst_value else {} url = urlunparse(urlbits._replace(fragment="")) has_sig: Optional[bool] gpg_sig = link.get_str_attrib("data-gpg-sig") if gpg_sig is not None: has_sig = gpg_sig.lower() == "true" else: has_sig = None mddigest = link.get_str_attrib("data-core-metadata") metadata_digests: Optional[dict[str, str]] has_metadata = None if mddigest is not None: metadata_digests = {} m = re.fullmatch(r"(\w+)=([0-9A-Fa-f]+)", mddigest) if m: metadata_digests[m[1]] = m[2] has_metadata = bool(m) or mddigest.lower() == "true" else: metadata_digests = None yanked_reason = link.get_str_attrib("data-yanked") return cls( filename=link.text, url=url, has_sig=has_sig, requires_python=link.get_str_attrib("data-requires-python"), project=project, version=version, package_type=pkg_type, is_yanked=yanked_reason is not None, yanked_reason=yanked_reason, digests=digests, metadata_digests=metadata_digests, has_metadata=has_metadata, ) @classmethod def from_json_data( cls, data: Any, project_hint: Optional[str] = None, base_url: Optional[str] = None, ) -> DistributionPackage: """ Construct a `DistributionPackage` from an object taken from the ``"files"`` field of a :pep:`691` project detail JSON response. :param data: a file dictionary :param Optional[str] project_hint: Optionally, the expected value for the project name (usually the name of the project page on which the link was found). The name does not need to be normalized. :param Optional[str] base_url: an optional URL to join to the front of a relative file URL (usually the URL of the page being parsed) :rtype: DistributionPackage :raises ValueError: if ``data`` is not a `dict` """ return cls.from_file(File.model_validate(data), project_hint, base_url) @classmethod def from_file( cls, file: File, project_hint: Optional[str] = None, base_url: Optional[str] = None, ) -> DistributionPackage: """:meta private:""" try: project, version, pkg_type = parse_filename(file.filename, project_hint) except UnparsableFilenameError: project = None version = None pkg_type = None return cls( filename=file.filename, url=basejoin(base_url, file.url), has_sig=file.gpg_sig, requires_python=file.requires_python, project=project, version=version, package_type=pkg_type, is_yanked=file.is_yanked, yanked_reason=file.yanked_reason, digests=file.hashes, metadata_digests=file.metadata_digests, has_metadata=file.has_metadata, size=file.size, upload_time=file.upload_time, )
(filename: str, url: str, project: Optional[str], version: Optional[str], package_type: Optional[str], digests: dict[str, str], requires_python: Optional[str], has_sig: Optional[bool], is_yanked: bool = False, yanked_reason: Optional[str] = None, has_metadata: Optional[bool] = None, metadata_digests: Optional[dict[str, str]] = None, size: Optional[int] = None, upload_time: Optional[datetime.datetime] = None) -> None
708,864
pypi_simple.classes
__eq__
null
from __future__ import annotations from dataclasses import dataclass, field from datetime import datetime import re from typing import Any, Optional from urllib.parse import urlparse, urlunparse from mailbits import ContentType import requests from .errors import UnparsableFilenameError, UnsupportedContentTypeError from .filenames import parse_filename from .html import Link, RepositoryPage from .pep691 import File, Project, ProjectList from .util import basejoin, check_repo_version @dataclass class DistributionPackage: """ Information about a versioned archive file from which a Python project release can be installed .. versionchanged:: 1.0.0 ``yanked`` field replaced with `is_yanked` and `yanked_reason` """ #: The basename of the package file filename: str #: The URL from which the package file can be downloaded, with any hash #: digest fragment removed url: str #: The name of the project (as extracted from the filename), or `None` if #: the filename cannot be parsed project: Optional[str] #: The project version (as extracted from the filename), or `None` if the #: filename cannot be parsed version: Optional[str] #: The type of the package, or `None` if the filename cannot be parsed. #: The recognized package types are: #: #: - ``'dumb'`` #: - ``'egg'`` #: - ``'msi'`` #: - ``'rpm'`` #: - ``'sdist'`` #: - ``'wheel'`` #: - ``'wininst'`` package_type: Optional[str] #: A collection of hash digests for the file as a `dict` mapping hash #: algorithm names to hex-encoded digest strings digests: dict[str, str] #: An optional version specifier string declaring the Python version(s) in #: which the package can be installed requires_python: Optional[str] #: Whether the package file is accompanied by a PGP signature file. This #: is `None` if the package repository does not report such information. has_sig: Optional[bool] #: Whether the package file has been "yanked" from the package repository #: (meaning that it should only be installed when that specific version is #: requested) is_yanked: bool = False #: If the package file has been "yanked" and a reason is given, this #: attribute will contain that (possibly empty) reason yanked_reason: Optional[str] = None #: Whether the package file is accompanied by a Core Metadata file. This #: is `None` if the package repository does not report such information. has_metadata: Optional[bool] = None #: If the package repository provides a Core Metadata file for the package, #: this is a (possibly empty) `dict` of digests of the file, given as a #: mapping from hash algorithm names to hex-encoded digest strings; #: otherwise, it is `None` metadata_digests: Optional[dict[str, str]] = None #: .. versionadded:: 1.1.0 #: #: The size of the package file in bytes, or `None` if not specified #: [#pep700]_. size: Optional[int] = None #: .. versionadded:: 1.1.0 #: #: The time at which the package file was uploaded to the server, or `None` #: if not specified [#pep700]_. upload_time: Optional[datetime] = None @property def sig_url(self) -> str: """ The URL of the package file's PGP signature file, if it exists; cf. `has_sig` """ u = urlparse(self.url) return urlunparse((u[0], u[1], u[2] + ".asc", "", "", "")) @property def metadata_url(self) -> str: """ The URL of the package file's Core Metadata file, if it exists; cf. `has_metadata` """ u = urlparse(self.url) return urlunparse((u[0], u[1], u[2] + ".metadata", "", "", "")) @classmethod def from_link( cls, link: Link, project_hint: Optional[str] = None ) -> DistributionPackage: """ Construct a `DistributionPackage` from a `Link` on a project page. :param Link link: a link parsed from a project page :param Optional[str] project_hint: Optionally, the expected value for the project name (usually the name of the project page on which the link was found). The name does not need to be normalized. :rtype: DistributionPackage """ try: project, version, pkg_type = parse_filename(link.text, project_hint) except UnparsableFilenameError: project = None version = None pkg_type = None urlbits = urlparse(link.url) dgst_name, _, dgst_value = urlbits.fragment.partition("=") digests = {dgst_name: dgst_value} if dgst_value else {} url = urlunparse(urlbits._replace(fragment="")) has_sig: Optional[bool] gpg_sig = link.get_str_attrib("data-gpg-sig") if gpg_sig is not None: has_sig = gpg_sig.lower() == "true" else: has_sig = None mddigest = link.get_str_attrib("data-core-metadata") metadata_digests: Optional[dict[str, str]] has_metadata = None if mddigest is not None: metadata_digests = {} m = re.fullmatch(r"(\w+)=([0-9A-Fa-f]+)", mddigest) if m: metadata_digests[m[1]] = m[2] has_metadata = bool(m) or mddigest.lower() == "true" else: metadata_digests = None yanked_reason = link.get_str_attrib("data-yanked") return cls( filename=link.text, url=url, has_sig=has_sig, requires_python=link.get_str_attrib("data-requires-python"), project=project, version=version, package_type=pkg_type, is_yanked=yanked_reason is not None, yanked_reason=yanked_reason, digests=digests, metadata_digests=metadata_digests, has_metadata=has_metadata, ) @classmethod def from_json_data( cls, data: Any, project_hint: Optional[str] = None, base_url: Optional[str] = None, ) -> DistributionPackage: """ Construct a `DistributionPackage` from an object taken from the ``"files"`` field of a :pep:`691` project detail JSON response. :param data: a file dictionary :param Optional[str] project_hint: Optionally, the expected value for the project name (usually the name of the project page on which the link was found). The name does not need to be normalized. :param Optional[str] base_url: an optional URL to join to the front of a relative file URL (usually the URL of the page being parsed) :rtype: DistributionPackage :raises ValueError: if ``data`` is not a `dict` """ return cls.from_file(File.model_validate(data), project_hint, base_url) @classmethod def from_file( cls, file: File, project_hint: Optional[str] = None, base_url: Optional[str] = None, ) -> DistributionPackage: """:meta private:""" try: project, version, pkg_type = parse_filename(file.filename, project_hint) except UnparsableFilenameError: project = None version = None pkg_type = None return cls( filename=file.filename, url=basejoin(base_url, file.url), has_sig=file.gpg_sig, requires_python=file.requires_python, project=project, version=version, package_type=pkg_type, is_yanked=file.is_yanked, yanked_reason=file.yanked_reason, digests=file.hashes, metadata_digests=file.metadata_digests, has_metadata=file.has_metadata, size=file.size, upload_time=file.upload_time, )
(self, other)
708,866
pypi_simple.classes
__repr__
null
@dataclass class ProjectPage: """A parsed project page from a simple repository""" #: The name of the project the page is for project: str #: A list of packages (as `DistributionPackage` objects) listed on the #: project page packages: list[DistributionPackage] #: The repository version reported by the page, or `None` if not specified repository_version: Optional[str] #: The value of the :mailheader:`X-PyPI-Last-Serial` response header #: returned when fetching the page, or `None` if not specified last_serial: Optional[str] #: .. versionadded:: 1.1.0 #: #: A list of the project's versions, or `None` if not specified [#pep700]_. versions: Optional[list[str]] = None #: .. versionadded:: 1.4.0 #: #: Repository "tracks" metadata. See `PEP 708`__. #: #: __ https://peps.python.org/pep-0708/#repository-tracks-metadata tracks: list[str] = field(default_factory=list) #: .. versionadded:: 1.4.0 #: #: Repository "alternate locations" metadata. See `PEP 708`__. #: #: __ https://peps.python.org/pep-0708/#alternate-locations-metadata alternate_locations: list[str] = field(default_factory=list) @classmethod def from_html( cls, project: str, html: str | bytes, base_url: Optional[str] = None, from_encoding: Optional[str] = None, ) -> ProjectPage: """ .. versionadded:: 1.0.0 Parse an HTML project page from a simple repository into a `ProjectPage`. Note that the `last_serial` attribute will be `None`. :param str project: The name of the project whose page is being parsed :param html: the HTML to parse :type html: str or bytes :param Optional[str] base_url: an optional URL to join to the front of the packages' URLs (usually the URL of the page being parsed) :param Optional[str] from_encoding: an optional hint to Beautiful Soup as to the encoding of ``html`` when it is `bytes` (usually the ``charset`` parameter of the response's :mailheader:`Content-Type` header) :rtype: ProjectPage :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version """ page = RepositoryPage.from_html(html, base_url, from_encoding) return cls( project=project, packages=[ DistributionPackage.from_link(link, project) for link in page.links ], repository_version=page.repository_version, last_serial=None, versions=None, tracks=page.tracks, alternate_locations=page.alternate_locations, ) @classmethod def from_json_data(cls, data: Any, base_url: Optional[str] = None) -> ProjectPage: """ .. versionadded:: 1.0.0 Parse an object decoded from an :mimetype:`application/vnd.pypi.simple.v1+json` response (See :pep:`691`) into a `ProjectPage`. The `last_serial` attribute will be set to the value of the ``.meta._last-serial`` field, if any. :param data: The decoded body of the JSON response :param Optional[str] base_url: an optional URL to join to the front of any relative file URLs (usually the URL of the page being parsed) :rtype: ProjectPage :raises ValueError: if ``data`` is not a `dict` :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version """ project = Project.model_validate(data) check_repo_version(project.meta.api_version) return ProjectPage( project=project.name, packages=[ DistributionPackage.from_file(f, project.name, base_url) for f in project.files ], repository_version=project.meta.api_version, last_serial=project.meta.last_serial, versions=project.versions, tracks=project.meta.tracks, alternate_locations=project.meta.alternate_locations, ) @classmethod def from_response(cls, r: requests.Response, project: str) -> ProjectPage: """ .. versionadded:: 1.0.0 Parse a project page from a `requests.Response` returned from a (non-streaming) request to a simple repository, and return a `ProjectPage`. :param requests.Response r: the response object to parse :param str project: the name of the project whose page is being parsed :rtype: ProjectPage :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version :raises UnsupportedContentTypeError: if the response has an unsupported :mailheader:`Content-Type` """ ct = ContentType.parse(r.headers.get("content-type", "text/html")) if ct.content_type == "application/vnd.pypi.simple.v1+json": page = cls.from_json_data(r.json(), r.url) elif ( ct.content_type == "application/vnd.pypi.simple.v1+html" or ct.content_type == "text/html" ): page = cls.from_html( project=project, html=r.content, base_url=r.url, from_encoding=ct.params.get("charset"), ) else: raise UnsupportedContentTypeError(r.url, str(ct)) if page.last_serial is None: page.last_serial = r.headers.get("X-PyPI-Last-Serial") return page
(self)
708,867
pypi_simple.classes
IndexPage
A parsed index/root page from a simple repository
class IndexPage: """A parsed index/root page from a simple repository""" #: The project names listed in the index. The names are not normalized. projects: list[str] #: The repository version reported by the page, or `None` if not specified repository_version: Optional[str] #: The value of the :mailheader:`X-PyPI-Last-Serial` response header #: returned when fetching the page, or `None` if not specified last_serial: Optional[str] @classmethod def from_html( cls, html: str | bytes, from_encoding: Optional[str] = None ) -> IndexPage: """ .. versionadded:: 1.0.0 Parse an HTML index/root page from a simple repository into an `IndexPage`. Note that the `last_serial` attribute will be `None`. :param html: the HTML to parse :type html: str or bytes :param Optional[str] from_encoding: an optional hint to Beautiful Soup as to the encoding of ``html`` when it is `bytes` (usually the ``charset`` parameter of the response's :mailheader:`Content-Type` header) :rtype: IndexPage :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version """ page = RepositoryPage.from_html(html, from_encoding=from_encoding) return cls( projects=[link.text for link in page.links], repository_version=page.repository_version, last_serial=None, ) @classmethod def from_json_data(cls, data: Any) -> IndexPage: """ .. versionadded:: 1.0.0 Parse an object decoded from an :mimetype:`application/vnd.pypi.simple.v1+json` response (See :pep:`691`) into an `IndexPage`. The `last_serial` attribute will be set to the value of the ``.meta._last-serial`` field, if any. :param data: The decoded body of the JSON response :rtype: IndexPage :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version :raises ValueError: if ``data`` is not a `dict` """ plist = ProjectList.model_validate(data) check_repo_version(plist.meta.api_version) return IndexPage( projects=[p.name for p in plist.projects], repository_version=plist.meta.api_version, last_serial=plist.meta.last_serial, ) @classmethod def from_response(cls, r: requests.Response) -> IndexPage: """ .. versionadded:: 1.0.0 Parse an index page from a `requests.Response` returned from a (non-streaming) request to a simple repository, and return an `IndexPage`. :param requests.Response r: the response object to parse :rtype: IndexPage :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version :raises UnsupportedContentTypeError: if the response has an unsupported :mailheader:`Content-Type` """ ct = ContentType.parse(r.headers.get("content-type", "text/html")) if ct.content_type == "application/vnd.pypi.simple.v1+json": page = cls.from_json_data(r.json()) elif ( ct.content_type == "application/vnd.pypi.simple.v1+html" or ct.content_type == "text/html" ): page = cls.from_html(html=r.content, from_encoding=ct.params.get("charset")) else: raise UnsupportedContentTypeError(r.url, str(ct)) if page.last_serial is None: page.last_serial = r.headers.get("X-PyPI-Last-Serial") return page
(projects: list[str], repository_version: Optional[str], last_serial: Optional[str]) -> None
708,871
pypi_simple.html
Link
A hyperlink extracted from an HTML page
class Link: """A hyperlink extracted from an HTML page""" #: The text inside the link tag, with leading & trailing whitespace removed #: and with any tags nested inside the link tags ignored text: str #: The URL that the link points to, resolved relative to the URL of the #: source HTML page and relative to the page's ``<base>`` href value, if #: any url: str #: A dictionary of attributes set on the link tag (including the unmodified #: ``href`` attribute). Keys are converted to lowercase. Most attributes #: have `str` values, but some (referred to as "CDATA list attributes" by #: the HTML spec; e.g., ``"class"``) have values of type ``list[str]`` #: instead. attrs: dict[str, str | list[str]] def get_str_attrib(self, attrib: str) -> Optional[str]: """:meta private:""" value = self.attrs.get(attrib) if value is not None: assert isinstance(value, str) return value
(text: str, url: str, attrs: dict[str, str | list[str]]) -> None
708,872
pypi_simple.html
__eq__
null
from __future__ import annotations from dataclasses import dataclass import re from typing import Optional from urllib.parse import urljoin from bs4 import BeautifulSoup, Tag from .util import basejoin, check_repo_version @dataclass class RepositoryPage: """ .. versionadded:: 1.0.0 A parsed HTML page from a :pep:`503` simple repository """ #: The repository version, if any, reported by the page in accordance with #: :pep:`629` repository_version: Optional[str] #: A list of hyperlinks found on the page links: list[Link] #: .. versionadded:: 1.4.0 #: #: ``<meta/>`` tags found on the page whose ``name`` attributes start with #: ``pypi:``. This is a dict in which the keys are ``name`` attributes #: with leading ``"pypi:"`` removed and in which the values are the #: corresponding ``content`` attributes. pypi_meta: dict[str, list[str]] @property def tracks(self) -> list[str]: """ .. versionadded:: 1.4.0 Repository "tracks" metadata. See `PEP 708`__. __ https://peps.python.org/pep-0708/#repository-tracks-metadata """ return self.pypi_meta.get("tracks", []) @property def alternate_locations(self) -> list[str]: """ .. versionadded:: 1.4.0 Repository "alternate locations" metadata. See `PEP 708`__. __ https://peps.python.org/pep-0708/#alternate-locations-metadata """ return self.pypi_meta.get("alternate-locations", []) @classmethod def from_html( cls, html: str | bytes, base_url: Optional[str] = None, from_encoding: Optional[str] = None, ) -> RepositoryPage: """ Parse an HTML page from a simple repository into a `RepositoryPage`. :param html: the HTML to parse :type html: str or bytes :param Optional[str] base_url: an optional URL to join to the front of the links' URLs (usually the URL of the page being parsed) :param Optional[str] from_encoding: an optional hint to Beautiful Soup as to the encoding of ``html`` when it is `bytes` (usually the ``charset`` parameter of the response's :mailheader:`Content-Type` header) :rtype: RepositoryPage :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version """ soup = BeautifulSoup(html, "html.parser", from_encoding=from_encoding) base_tag = soup.find("base", href=True) if base_tag is not None: assert isinstance(base_tag, Tag) href = base_tag["href"] assert isinstance(href, str) if base_url is None: base_url = href else: base_url = urljoin(base_url, href) meta: dict[str, list[str]] = {} for tag in soup.find_all( "meta", attrs={"name": re.compile(r"^pypi:"), "content": True} ): assert isinstance(tag, Tag) name = tag["name"] assert isinstance(name, str) assert name.startswith("pypi:") content = tag["content"] assert isinstance(content, str) meta.setdefault(name[5:], []).append(content) try: repository_version = meta["repository-version"][0] except LookupError: repository_version = None if repository_version is not None: check_repo_version(repository_version) links = [] for link in soup.find_all("a", href=True): links.append( Link( text="".join(link.strings).strip(), url=basejoin(base_url, link["href"]), attrs=link.attrs, ) ) return cls(repository_version=repository_version, links=links, pypi_meta=meta)
(self, other)
708,875
pypi_simple.html
get_str_attrib
:meta private:
def get_str_attrib(self, attrib: str) -> Optional[str]: """:meta private:""" value = self.attrs.get(attrib) if value is not None: assert isinstance(value, str) return value
(self, attrib: str) -> Optional[str]
708,876
pypi_simple.errors
NoDigestsError
Raised by `PyPISimple.download_package()` and `PyPISimple.get_package_metadata()` with ``verify=True`` when the given package or package metadata does not have any digests with known algorithms
class NoDigestsError(ValueError): """ Raised by `PyPISimple.download_package()` and `PyPISimple.get_package_metadata()` with ``verify=True`` when the given package or package metadata does not have any digests with known algorithms """ pass
null
708,877
pypi_simple.client
NoMetadataError
.. versionadded:: 1.3.0 Raised by `PyPISimple.get_package_metadata()` when a request for distribution metadata fails with a 404 error code
class NoMetadataError(Exception): """ .. versionadded:: 1.3.0 Raised by `PyPISimple.get_package_metadata()` when a request for distribution metadata fails with a 404 error code """ def __init__(self, filename: str) -> None: #: The filename of the package whose metadata was requested self.filename = filename def __str__(self) -> str: return f"No distribution metadata found for {self.filename}"
(filename: 'str') -> 'None'
708,878
pypi_simple.client
__init__
null
def __init__(self, filename: str) -> None: #: The filename of the package whose metadata was requested self.filename = filename
(self, filename: str) -> NoneType
708,879
pypi_simple.client
__str__
null
def __str__(self) -> str: return f"No distribution metadata found for {self.filename}"
(self) -> str
708,880
pypi_simple.client
NoSuchProjectError
Raised by `PyPISimple.get_project_page()` when a request for a project fails with a 404 error code
class NoSuchProjectError(Exception): """ Raised by `PyPISimple.get_project_page()` when a request for a project fails with a 404 error code """ def __init__(self, project: str, url: str) -> None: #: The name of the project requested self.project = project #: The URL to which the failed request was made self.url = url def __str__(self) -> str: return f"No details about project {self.project!r} available at {self.url}"
(project: 'str', url: 'str') -> 'None'
708,881
pypi_simple.client
__init__
null
def __init__(self, project: str, url: str) -> None: #: The name of the project requested self.project = project #: The URL to which the failed request was made self.url = url
(self, project: str, url: str) -> NoneType
708,882
pypi_simple.client
__str__
null
def __str__(self) -> str: return f"No details about project {self.project!r} available at {self.url}"
(self) -> str
708,883
pypi_simple.progress
ProgressTracker
A `typing.Protocol` for progress trackers. A progress tracker must be usable as a context manager whose ``__enter__`` method performs startup & returns itself and whose ``__exit__`` method performs shutdown/cleanup. In addition, a progress tracker must have an ``update(increment: int)`` method that will be called with the size of each downloaded file chunk.
class ProgressTracker(Protocol): """ A `typing.Protocol` for progress trackers. A progress tracker must be usable as a context manager whose ``__enter__`` method performs startup & returns itself and whose ``__exit__`` method performs shutdown/cleanup. In addition, a progress tracker must have an ``update(increment: int)`` method that will be called with the size of each downloaded file chunk. """ def __enter__(self) -> Self: ... def __exit__( self, exc_type: Optional[type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> Optional[bool]: ... def update(self, increment: int) -> None: ...
(*args, **kwargs)
708,884
pypi_simple.progress
__enter__
null
def __enter__(self) -> Self: ...
(self) -> 'Self'
708,885
pypi_simple.progress
__exit__
null
def __exit__( self, exc_type: Optional[type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> Optional[bool]: ...
(self, exc_type: Optional[type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[traceback]) -> Optional[bool]
708,888
pypi_simple.progress
update
null
def update(self, increment: int) -> None: ...
(self, increment: int) -> NoneType
708,889
pypi_simple.classes
ProjectPage
A parsed project page from a simple repository
class ProjectPage: """A parsed project page from a simple repository""" #: The name of the project the page is for project: str #: A list of packages (as `DistributionPackage` objects) listed on the #: project page packages: list[DistributionPackage] #: The repository version reported by the page, or `None` if not specified repository_version: Optional[str] #: The value of the :mailheader:`X-PyPI-Last-Serial` response header #: returned when fetching the page, or `None` if not specified last_serial: Optional[str] #: .. versionadded:: 1.1.0 #: #: A list of the project's versions, or `None` if not specified [#pep700]_. versions: Optional[list[str]] = None #: .. versionadded:: 1.4.0 #: #: Repository "tracks" metadata. See `PEP 708`__. #: #: __ https://peps.python.org/pep-0708/#repository-tracks-metadata tracks: list[str] = field(default_factory=list) #: .. versionadded:: 1.4.0 #: #: Repository "alternate locations" metadata. See `PEP 708`__. #: #: __ https://peps.python.org/pep-0708/#alternate-locations-metadata alternate_locations: list[str] = field(default_factory=list) @classmethod def from_html( cls, project: str, html: str | bytes, base_url: Optional[str] = None, from_encoding: Optional[str] = None, ) -> ProjectPage: """ .. versionadded:: 1.0.0 Parse an HTML project page from a simple repository into a `ProjectPage`. Note that the `last_serial` attribute will be `None`. :param str project: The name of the project whose page is being parsed :param html: the HTML to parse :type html: str or bytes :param Optional[str] base_url: an optional URL to join to the front of the packages' URLs (usually the URL of the page being parsed) :param Optional[str] from_encoding: an optional hint to Beautiful Soup as to the encoding of ``html`` when it is `bytes` (usually the ``charset`` parameter of the response's :mailheader:`Content-Type` header) :rtype: ProjectPage :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version """ page = RepositoryPage.from_html(html, base_url, from_encoding) return cls( project=project, packages=[ DistributionPackage.from_link(link, project) for link in page.links ], repository_version=page.repository_version, last_serial=None, versions=None, tracks=page.tracks, alternate_locations=page.alternate_locations, ) @classmethod def from_json_data(cls, data: Any, base_url: Optional[str] = None) -> ProjectPage: """ .. versionadded:: 1.0.0 Parse an object decoded from an :mimetype:`application/vnd.pypi.simple.v1+json` response (See :pep:`691`) into a `ProjectPage`. The `last_serial` attribute will be set to the value of the ``.meta._last-serial`` field, if any. :param data: The decoded body of the JSON response :param Optional[str] base_url: an optional URL to join to the front of any relative file URLs (usually the URL of the page being parsed) :rtype: ProjectPage :raises ValueError: if ``data`` is not a `dict` :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version """ project = Project.model_validate(data) check_repo_version(project.meta.api_version) return ProjectPage( project=project.name, packages=[ DistributionPackage.from_file(f, project.name, base_url) for f in project.files ], repository_version=project.meta.api_version, last_serial=project.meta.last_serial, versions=project.versions, tracks=project.meta.tracks, alternate_locations=project.meta.alternate_locations, ) @classmethod def from_response(cls, r: requests.Response, project: str) -> ProjectPage: """ .. versionadded:: 1.0.0 Parse a project page from a `requests.Response` returned from a (non-streaming) request to a simple repository, and return a `ProjectPage`. :param requests.Response r: the response object to parse :param str project: the name of the project whose page is being parsed :rtype: ProjectPage :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version :raises UnsupportedContentTypeError: if the response has an unsupported :mailheader:`Content-Type` """ ct = ContentType.parse(r.headers.get("content-type", "text/html")) if ct.content_type == "application/vnd.pypi.simple.v1+json": page = cls.from_json_data(r.json(), r.url) elif ( ct.content_type == "application/vnd.pypi.simple.v1+html" or ct.content_type == "text/html" ): page = cls.from_html( project=project, html=r.content, base_url=r.url, from_encoding=ct.params.get("charset"), ) else: raise UnsupportedContentTypeError(r.url, str(ct)) if page.last_serial is None: page.last_serial = r.headers.get("X-PyPI-Last-Serial") return page
(project: str, packages: list[pypi_simple.classes.DistributionPackage], repository_version: Optional[str], last_serial: Optional[str], versions: Optional[list[str]] = None, tracks: list[str] = <factory>, alternate_locations: list[str] = <factory>) -> None
708,893
pypi_simple.client
PyPISimple
A client for fetching package information from a Python simple package repository. If necessary, login/authentication details for the repository can be specified at initialization by setting the ``auth`` parameter to either a ``(username, password)`` pair or `another authentication object accepted by requests <https://requests.readthedocs.io/en/master/user/authentication/>`_. If more complicated session configuration is desired (e.g., setting up caching), the user must create & configure a `requests.Session` object appropriately and pass it to the constructor as the ``session`` parameter. A `PyPISimple` instance can be used as a context manager that will automatically close its session on exit, regardless of where the session object came from. .. versionchanged:: 1.0.0 ``accept`` parameter added :param str endpoint: The base URL of the simple API instance to query; defaults to the base URL for PyPI's simple API :param auth: Optional login/authentication details for the repository; either a ``(username, password)`` pair or `another authentication object accepted by requests <https://requests.readthedocs.io/en/master/user/authentication/>`_ :param session: Optional `requests.Session` object to use instead of creating a fresh one :param str accept: The :mailheader:`Accept` header to send in requests in order to specify what serialization format the server should return; defaults to `ACCEPT_ANY`
class PyPISimple: """ A client for fetching package information from a Python simple package repository. If necessary, login/authentication details for the repository can be specified at initialization by setting the ``auth`` parameter to either a ``(username, password)`` pair or `another authentication object accepted by requests <https://requests.readthedocs.io/en/master/user/authentication/>`_. If more complicated session configuration is desired (e.g., setting up caching), the user must create & configure a `requests.Session` object appropriately and pass it to the constructor as the ``session`` parameter. A `PyPISimple` instance can be used as a context manager that will automatically close its session on exit, regardless of where the session object came from. .. versionchanged:: 1.0.0 ``accept`` parameter added :param str endpoint: The base URL of the simple API instance to query; defaults to the base URL for PyPI's simple API :param auth: Optional login/authentication details for the repository; either a ``(username, password)`` pair or `another authentication object accepted by requests <https://requests.readthedocs.io/en/master/user/authentication/>`_ :param session: Optional `requests.Session` object to use instead of creating a fresh one :param str accept: The :mailheader:`Accept` header to send in requests in order to specify what serialization format the server should return; defaults to `ACCEPT_ANY` """ def __init__( self, endpoint: str = PYPI_SIMPLE_ENDPOINT, auth: Any = None, session: Optional[requests.Session] = None, accept: str = ACCEPT_ANY, ) -> None: self.endpoint: str = endpoint.rstrip("/") + "/" self.s: requests.Session if session is not None: self.s = session else: self.s = requests.Session() self.s.headers["User-Agent"] = USER_AGENT if auth is not None: self.s.auth = auth self.accept = accept def __enter__(self) -> PyPISimple: return self def __exit__( self, _exc_type: Optional[type[BaseException]], _exc_val: Optional[BaseException], _exc_tb: Optional[TracebackType], ) -> None: self.s.close() def get_index_page( self, timeout: float | tuple[float, float] | None = None, accept: Optional[str] = None, headers: Optional[dict[str, str]] = None, ) -> IndexPage: """ Fetches the index/root page from the simple repository and returns an `IndexPage` instance. .. warning:: PyPI's project index file is very large and takes several seconds to parse. Use this method sparingly. .. versionchanged:: 1.0.0 ``accept`` parameter added .. versionchanged:: 1.5.0 ``headers`` parameter added :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[str] accept: The :mailheader:`Accept` header to send in order to specify what serialization format the server should return; defaults to the value supplied on client instantiation :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :rtype: IndexPage :raises requests.HTTPError: if the repository responds with an HTTP error code :raises UnsupportedContentTypeError: if the repository responds with an unsupported :mailheader:`Content-Type` :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version """ request_headers = {"Accept": accept or self.accept} if headers: request_headers.update(headers) r = self.s.get( self.endpoint, timeout=timeout, headers=request_headers, ) r.raise_for_status() return IndexPage.from_response(r) def stream_project_names( self, chunk_size: int = 65535, timeout: float | tuple[float, float] | None = None, accept: Optional[str] = None, headers: Optional[dict[str, str]] = None, ) -> Iterator[str]: """ Returns a generator of names of projects available in the repository. The names are not normalized. Unlike `get_index_page()`, this function makes a streaming request to the server and parses the document in chunks. It is intended to be faster than the other methods, especially when the complete document is very large. .. warning:: This function is rather experimental. It does not have full support for web encodings, encoding detection, or handling invalid HTML. .. note:: If the server responds with a JSON representation of the Simple API rather than an HTML representation, the response body will be loaded & parsed in its entirety before yielding anything. .. versionchanged:: 1.0.0 ``accept`` parameter added .. versionchanged:: 1.5.0 ``headers`` parameter added :param int chunk_size: how many bytes to read from the response at a time :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[str] accept: The :mailheader:`Accept` header to send in order to specify what serialization format the server should return; defaults to the value supplied on client instantiation :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :rtype: Iterator[str] :raises requests.HTTPError: if the repository responds with an HTTP error code :raises UnsupportedContentTypeError: if the repository responds with an unsupported :mailheader:`Content-Type` :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version """ request_headers = {"Accept": accept or self.accept} if headers: request_headers.update(headers) with self.s.get( self.endpoint, stream=True, timeout=timeout, headers=request_headers, ) as r: r.raise_for_status() ct = ContentType.parse(r.headers.get("content-type", "text/html")) if ct.content_type == "application/vnd.pypi.simple.v1+json": page = IndexPage.from_json_data(r.json()) yield from page.projects elif ( ct.content_type == "application/vnd.pypi.simple.v1+html" or ct.content_type == "text/html" ): for link in parse_links_stream_response(r, chunk_size): yield link.text else: raise UnsupportedContentTypeError(r.url, str(ct)) def get_project_page( self, project: str, timeout: float | tuple[float, float] | None = None, accept: Optional[str] = None, headers: Optional[dict[str, str]] = None, ) -> ProjectPage: """ Fetches the page for the given project from the simple repository and returns a `ProjectPage` instance. Raises `NoSuchProjectError` if the repository responds with a 404. All other HTTP errors cause a `requests.HTTPError` to be raised. .. versionchanged:: 1.0.0 - A 404 now causes `NoSuchProjectError` to be raised instead of returning `None` - ``accept`` parameter added .. versionchanged:: 1.5.0 ``headers`` parameter added :param str project: The name of the project to fetch information on. The name does not need to be normalized. :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[str] accept: The :mailheader:`Accept` header to send in order to specify what serialization format the server should return; defaults to the value supplied on client instantiation :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :rtype: ProjectPage :raises NoSuchProjectError: if the repository responds with a 404 error code :raises requests.HTTPError: if the repository responds with an HTTP error code other than 404 :raises UnsupportedContentTypeError: if the repository responds with an unsupported :mailheader:`Content-Type` :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version """ request_headers = {"Accept": accept or self.accept} if headers: request_headers.update(headers) url = self.get_project_url(project) r = self.s.get(url, timeout=timeout, headers=request_headers) if r.status_code == 404: raise NoSuchProjectError(project, url) r.raise_for_status() return ProjectPage.from_response(r, project) def get_project_url(self, project: str) -> str: """ Returns the URL for the given project's page in the repository. :param str project: The name of the project to build a URL for. The name does not need to be normalized. :rtype: str """ return self.endpoint + normalize(project) + "/" def download_package( self, pkg: DistributionPackage, path: AnyStr | os.PathLike[AnyStr], verify: bool = True, keep_on_error: bool = False, progress: Optional[Callable[[Optional[int]], ProgressTracker]] = None, timeout: float | tuple[float, float] | None = None, headers: Optional[dict[str, str]] = None, ) -> None: """ Download the given `DistributionPackage` to the given path. If an error occurs while downloading or verifying digests, and ``keep_on_error`` is not true, the downloaded file is not saved. Download progress can be tracked (e.g., for display by a progress bar) by passing an appropriate callable as the ``progress`` argument. This callable will be passed the length of the downloaded file, if known, and it must return a `ProgressTracker` — a context manager with an ``update(increment: int)`` method that will be passed the size of each downloaded chunk as each chunk is received. .. versionchanged:: 1.5.0 ``headers`` parameter added :param DistributionPackage pkg: the distribution package to download :param path: the path at which to save the downloaded file; any parent directories of this path will be created as needed :param bool verify: whether to verify the package's digests against the downloaded file :param bool keep_on_error: whether to keep (true) or delete (false) the downloaded file if an error occurs :param progress: a callable for constructing a progress tracker :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :raises requests.HTTPError: if the repository responds with an HTTP error code :raises NoDigestsError: if ``verify`` is true and the given package does not have any digests with known algorithms :raises DigestMismatchError: if ``verify`` is true and the digest of the downloaded file does not match the expected value """ target = Path(os.fsdecode(path)) target.parent.mkdir(parents=True, exist_ok=True) digester: AbstractDigestChecker if verify: digester = DigestChecker(pkg.digests) else: digester = NullDigestChecker() with self.s.get(pkg.url, stream=True, timeout=timeout, headers=headers) as r: r.raise_for_status() try: content_length = int(r.headers["Content-Length"]) except (ValueError, KeyError): content_length = None if progress is None: progress = null_progress_tracker() try: with progress(content_length) as p: with target.open("wb") as fp: for chunk in r.iter_content(65535): fp.write(chunk) digester.update(chunk) p.update(len(chunk)) digester.finalize() except Exception: if not keep_on_error: try: target.unlink() except FileNotFoundError: pass raise def get_package_metadata_bytes( self, pkg: DistributionPackage, verify: bool = True, timeout: float | tuple[float, float] | None = None, headers: Optional[dict[str, str]] = None, ) -> bytes: """ .. versionadded:: 1.5.0 Retrieve the `distribution metadata`_ for the given `DistributionPackage` as raw bytes. This method is lower-level than `PyPISimple.get_package_metadata()` and is most appropriate if you want to defer interpretation of the data (e.g., if you're just writing to a file) or want to customize the handling of non-UTF-8 data. Not all packages have distribution metadata available for download; the `DistributionPackage.has_metadata` attribute can be used to check whether the repository reported the availability of the metadata. This method will always attempt to download metadata regardless of the value of `~DistributionPackage.has_metadata`; if the server replies with a 404, a `NoMetadataError` is raised. :param DistributionPackage pkg: the distribution package to retrieve the metadata of :param bool verify: whether to verify the metadata's digests against the retrieved data :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :rtype: bytes :raises NoMetadataError: if the repository responds with a 404 error code :raises requests.HTTPError: if the repository responds with an HTTP error code other than 404 :raises NoDigestsError: if ``verify`` is true and the given package's metadata does not have any digests with known algorithms :raises DigestMismatchError: if ``verify`` is true and the digest of the downloaded data does not match the expected value """ digester: AbstractDigestChecker if verify: digester = DigestChecker(pkg.metadata_digests or {}) else: digester = NullDigestChecker() r = self.s.get(pkg.metadata_url, timeout=timeout, headers=headers) if r.status_code == 404: raise NoMetadataError(pkg.filename) r.raise_for_status() digester.update(r.content) digester.finalize() return r.content def get_package_metadata( self, pkg: DistributionPackage, verify: bool = True, timeout: float | tuple[float, float] | None = None, headers: Optional[dict[str, str]] = None, ) -> str: """ .. versionadded:: 1.3.0 Retrieve the `distribution metadata`_ for the given `DistributionPackage` and decode it as UTF-8. The metadata can then be parsed with, for example, |the packaging package|_. Not all packages have distribution metadata available for download; the `DistributionPackage.has_metadata` attribute can be used to check whether the repository reported the availability of the metadata. This method will always attempt to download metadata regardless of the value of `~DistributionPackage.has_metadata`; if the server replies with a 404, a `NoMetadataError` is raised. .. _distribution metadata: https://packaging.python.org/en/latest/specifications/core-metadata/ .. |the packaging package| replace:: the ``packaging`` package .. _the packaging package: https://packaging.pypa.io/en/stable/metadata.html .. versionchanged:: 1.5.0 ``headers`` parameter added :param DistributionPackage pkg: the distribution package to retrieve the metadata of :param bool verify: whether to verify the metadata's digests against the retrieved data :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :rtype: str :raises NoMetadataError: if the repository responds with a 404 error code :raises requests.HTTPError: if the repository responds with an HTTP error code other than 404 :raises NoDigestsError: if ``verify`` is true and the given package's metadata does not have any digests with known algorithms :raises DigestMismatchError: if ``verify`` is true and the digest of the downloaded data does not match the expected value """ return self.get_package_metadata_bytes( pkg, verify, timeout, headers, ).decode("utf-8", "surrogateescape")
(endpoint: 'str' = 'https://pypi.org/simple/', auth: 'Any' = None, session: 'Optional[requests.Session]' = None, accept: 'str' = 'application/vnd.pypi.simple.v1+json, application/vnd.pypi.simple.v1+html, text/html;q=0.01') -> 'None'
708,894
pypi_simple.client
__enter__
null
def __enter__(self) -> PyPISimple: return self
(self) -> pypi_simple.client.PyPISimple
708,895
pypi_simple.client
__exit__
null
def __exit__( self, _exc_type: Optional[type[BaseException]], _exc_val: Optional[BaseException], _exc_tb: Optional[TracebackType], ) -> None: self.s.close()
(self, _exc_type: Optional[type[BaseException]], _exc_val: Optional[BaseException], _exc_tb: Optional[traceback]) -> NoneType
708,896
pypi_simple.client
__init__
null
def __init__( self, endpoint: str = PYPI_SIMPLE_ENDPOINT, auth: Any = None, session: Optional[requests.Session] = None, accept: str = ACCEPT_ANY, ) -> None: self.endpoint: str = endpoint.rstrip("/") + "/" self.s: requests.Session if session is not None: self.s = session else: self.s = requests.Session() self.s.headers["User-Agent"] = USER_AGENT if auth is not None: self.s.auth = auth self.accept = accept
(self, endpoint: str = 'https://pypi.org/simple/', auth: Optional[Any] = None, session: Optional[requests.sessions.Session] = None, accept: str = 'application/vnd.pypi.simple.v1+json, application/vnd.pypi.simple.v1+html, text/html;q=0.01') -> NoneType
708,897
pypi_simple.client
download_package
Download the given `DistributionPackage` to the given path. If an error occurs while downloading or verifying digests, and ``keep_on_error`` is not true, the downloaded file is not saved. Download progress can be tracked (e.g., for display by a progress bar) by passing an appropriate callable as the ``progress`` argument. This callable will be passed the length of the downloaded file, if known, and it must return a `ProgressTracker` — a context manager with an ``update(increment: int)`` method that will be passed the size of each downloaded chunk as each chunk is received. .. versionchanged:: 1.5.0 ``headers`` parameter added :param DistributionPackage pkg: the distribution package to download :param path: the path at which to save the downloaded file; any parent directories of this path will be created as needed :param bool verify: whether to verify the package's digests against the downloaded file :param bool keep_on_error: whether to keep (true) or delete (false) the downloaded file if an error occurs :param progress: a callable for constructing a progress tracker :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :raises requests.HTTPError: if the repository responds with an HTTP error code :raises NoDigestsError: if ``verify`` is true and the given package does not have any digests with known algorithms :raises DigestMismatchError: if ``verify`` is true and the digest of the downloaded file does not match the expected value
def download_package( self, pkg: DistributionPackage, path: AnyStr | os.PathLike[AnyStr], verify: bool = True, keep_on_error: bool = False, progress: Optional[Callable[[Optional[int]], ProgressTracker]] = None, timeout: float | tuple[float, float] | None = None, headers: Optional[dict[str, str]] = None, ) -> None: """ Download the given `DistributionPackage` to the given path. If an error occurs while downloading or verifying digests, and ``keep_on_error`` is not true, the downloaded file is not saved. Download progress can be tracked (e.g., for display by a progress bar) by passing an appropriate callable as the ``progress`` argument. This callable will be passed the length of the downloaded file, if known, and it must return a `ProgressTracker` — a context manager with an ``update(increment: int)`` method that will be passed the size of each downloaded chunk as each chunk is received. .. versionchanged:: 1.5.0 ``headers`` parameter added :param DistributionPackage pkg: the distribution package to download :param path: the path at which to save the downloaded file; any parent directories of this path will be created as needed :param bool verify: whether to verify the package's digests against the downloaded file :param bool keep_on_error: whether to keep (true) or delete (false) the downloaded file if an error occurs :param progress: a callable for constructing a progress tracker :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :raises requests.HTTPError: if the repository responds with an HTTP error code :raises NoDigestsError: if ``verify`` is true and the given package does not have any digests with known algorithms :raises DigestMismatchError: if ``verify`` is true and the digest of the downloaded file does not match the expected value """ target = Path(os.fsdecode(path)) target.parent.mkdir(parents=True, exist_ok=True) digester: AbstractDigestChecker if verify: digester = DigestChecker(pkg.digests) else: digester = NullDigestChecker() with self.s.get(pkg.url, stream=True, timeout=timeout, headers=headers) as r: r.raise_for_status() try: content_length = int(r.headers["Content-Length"]) except (ValueError, KeyError): content_length = None if progress is None: progress = null_progress_tracker() try: with progress(content_length) as p: with target.open("wb") as fp: for chunk in r.iter_content(65535): fp.write(chunk) digester.update(chunk) p.update(len(chunk)) digester.finalize() except Exception: if not keep_on_error: try: target.unlink() except FileNotFoundError: pass raise
(self, pkg: pypi_simple.classes.DistributionPackage, path: Union[~AnyStr, os.PathLike[~AnyStr]], verify: bool = True, keep_on_error: bool = False, progress: Optional[collections.abc.Callable[[Optional[int]], pypi_simple.progress.ProgressTracker]] = None, timeout: Union[float, tuple[float, float], NoneType] = None, headers: Optional[dict[str, str]] = None) -> NoneType
708,898
pypi_simple.client
get_index_page
Fetches the index/root page from the simple repository and returns an `IndexPage` instance. .. warning:: PyPI's project index file is very large and takes several seconds to parse. Use this method sparingly. .. versionchanged:: 1.0.0 ``accept`` parameter added .. versionchanged:: 1.5.0 ``headers`` parameter added :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[str] accept: The :mailheader:`Accept` header to send in order to specify what serialization format the server should return; defaults to the value supplied on client instantiation :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :rtype: IndexPage :raises requests.HTTPError: if the repository responds with an HTTP error code :raises UnsupportedContentTypeError: if the repository responds with an unsupported :mailheader:`Content-Type` :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version
def get_index_page( self, timeout: float | tuple[float, float] | None = None, accept: Optional[str] = None, headers: Optional[dict[str, str]] = None, ) -> IndexPage: """ Fetches the index/root page from the simple repository and returns an `IndexPage` instance. .. warning:: PyPI's project index file is very large and takes several seconds to parse. Use this method sparingly. .. versionchanged:: 1.0.0 ``accept`` parameter added .. versionchanged:: 1.5.0 ``headers`` parameter added :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[str] accept: The :mailheader:`Accept` header to send in order to specify what serialization format the server should return; defaults to the value supplied on client instantiation :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :rtype: IndexPage :raises requests.HTTPError: if the repository responds with an HTTP error code :raises UnsupportedContentTypeError: if the repository responds with an unsupported :mailheader:`Content-Type` :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version """ request_headers = {"Accept": accept or self.accept} if headers: request_headers.update(headers) r = self.s.get( self.endpoint, timeout=timeout, headers=request_headers, ) r.raise_for_status() return IndexPage.from_response(r)
(self, timeout: Union[float, tuple[float, float], NoneType] = None, accept: Optional[str] = None, headers: Optional[dict[str, str]] = None) -> pypi_simple.classes.IndexPage
708,899
pypi_simple.client
get_package_metadata
.. versionadded:: 1.3.0 Retrieve the `distribution metadata`_ for the given `DistributionPackage` and decode it as UTF-8. The metadata can then be parsed with, for example, |the packaging package|_. Not all packages have distribution metadata available for download; the `DistributionPackage.has_metadata` attribute can be used to check whether the repository reported the availability of the metadata. This method will always attempt to download metadata regardless of the value of `~DistributionPackage.has_metadata`; if the server replies with a 404, a `NoMetadataError` is raised. .. _distribution metadata: https://packaging.python.org/en/latest/specifications/core-metadata/ .. |the packaging package| replace:: the ``packaging`` package .. _the packaging package: https://packaging.pypa.io/en/stable/metadata.html .. versionchanged:: 1.5.0 ``headers`` parameter added :param DistributionPackage pkg: the distribution package to retrieve the metadata of :param bool verify: whether to verify the metadata's digests against the retrieved data :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :rtype: str :raises NoMetadataError: if the repository responds with a 404 error code :raises requests.HTTPError: if the repository responds with an HTTP error code other than 404 :raises NoDigestsError: if ``verify`` is true and the given package's metadata does not have any digests with known algorithms :raises DigestMismatchError: if ``verify`` is true and the digest of the downloaded data does not match the expected value
def get_package_metadata( self, pkg: DistributionPackage, verify: bool = True, timeout: float | tuple[float, float] | None = None, headers: Optional[dict[str, str]] = None, ) -> str: """ .. versionadded:: 1.3.0 Retrieve the `distribution metadata`_ for the given `DistributionPackage` and decode it as UTF-8. The metadata can then be parsed with, for example, |the packaging package|_. Not all packages have distribution metadata available for download; the `DistributionPackage.has_metadata` attribute can be used to check whether the repository reported the availability of the metadata. This method will always attempt to download metadata regardless of the value of `~DistributionPackage.has_metadata`; if the server replies with a 404, a `NoMetadataError` is raised. .. _distribution metadata: https://packaging.python.org/en/latest/specifications/core-metadata/ .. |the packaging package| replace:: the ``packaging`` package .. _the packaging package: https://packaging.pypa.io/en/stable/metadata.html .. versionchanged:: 1.5.0 ``headers`` parameter added :param DistributionPackage pkg: the distribution package to retrieve the metadata of :param bool verify: whether to verify the metadata's digests against the retrieved data :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :rtype: str :raises NoMetadataError: if the repository responds with a 404 error code :raises requests.HTTPError: if the repository responds with an HTTP error code other than 404 :raises NoDigestsError: if ``verify`` is true and the given package's metadata does not have any digests with known algorithms :raises DigestMismatchError: if ``verify`` is true and the digest of the downloaded data does not match the expected value """ return self.get_package_metadata_bytes( pkg, verify, timeout, headers, ).decode("utf-8", "surrogateescape")
(self, pkg: pypi_simple.classes.DistributionPackage, verify: bool = True, timeout: Union[float, tuple[float, float], NoneType] = None, headers: Optional[dict[str, str]] = None) -> str
708,900
pypi_simple.client
get_package_metadata_bytes
.. versionadded:: 1.5.0 Retrieve the `distribution metadata`_ for the given `DistributionPackage` as raw bytes. This method is lower-level than `PyPISimple.get_package_metadata()` and is most appropriate if you want to defer interpretation of the data (e.g., if you're just writing to a file) or want to customize the handling of non-UTF-8 data. Not all packages have distribution metadata available for download; the `DistributionPackage.has_metadata` attribute can be used to check whether the repository reported the availability of the metadata. This method will always attempt to download metadata regardless of the value of `~DistributionPackage.has_metadata`; if the server replies with a 404, a `NoMetadataError` is raised. :param DistributionPackage pkg: the distribution package to retrieve the metadata of :param bool verify: whether to verify the metadata's digests against the retrieved data :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :rtype: bytes :raises NoMetadataError: if the repository responds with a 404 error code :raises requests.HTTPError: if the repository responds with an HTTP error code other than 404 :raises NoDigestsError: if ``verify`` is true and the given package's metadata does not have any digests with known algorithms :raises DigestMismatchError: if ``verify`` is true and the digest of the downloaded data does not match the expected value
def get_package_metadata_bytes( self, pkg: DistributionPackage, verify: bool = True, timeout: float | tuple[float, float] | None = None, headers: Optional[dict[str, str]] = None, ) -> bytes: """ .. versionadded:: 1.5.0 Retrieve the `distribution metadata`_ for the given `DistributionPackage` as raw bytes. This method is lower-level than `PyPISimple.get_package_metadata()` and is most appropriate if you want to defer interpretation of the data (e.g., if you're just writing to a file) or want to customize the handling of non-UTF-8 data. Not all packages have distribution metadata available for download; the `DistributionPackage.has_metadata` attribute can be used to check whether the repository reported the availability of the metadata. This method will always attempt to download metadata regardless of the value of `~DistributionPackage.has_metadata`; if the server replies with a 404, a `NoMetadataError` is raised. :param DistributionPackage pkg: the distribution package to retrieve the metadata of :param bool verify: whether to verify the metadata's digests against the retrieved data :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :rtype: bytes :raises NoMetadataError: if the repository responds with a 404 error code :raises requests.HTTPError: if the repository responds with an HTTP error code other than 404 :raises NoDigestsError: if ``verify`` is true and the given package's metadata does not have any digests with known algorithms :raises DigestMismatchError: if ``verify`` is true and the digest of the downloaded data does not match the expected value """ digester: AbstractDigestChecker if verify: digester = DigestChecker(pkg.metadata_digests or {}) else: digester = NullDigestChecker() r = self.s.get(pkg.metadata_url, timeout=timeout, headers=headers) if r.status_code == 404: raise NoMetadataError(pkg.filename) r.raise_for_status() digester.update(r.content) digester.finalize() return r.content
(self, pkg: pypi_simple.classes.DistributionPackage, verify: bool = True, timeout: Union[float, tuple[float, float], NoneType] = None, headers: Optional[dict[str, str]] = None) -> bytes
708,901
pypi_simple.client
get_project_page
Fetches the page for the given project from the simple repository and returns a `ProjectPage` instance. Raises `NoSuchProjectError` if the repository responds with a 404. All other HTTP errors cause a `requests.HTTPError` to be raised. .. versionchanged:: 1.0.0 - A 404 now causes `NoSuchProjectError` to be raised instead of returning `None` - ``accept`` parameter added .. versionchanged:: 1.5.0 ``headers`` parameter added :param str project: The name of the project to fetch information on. The name does not need to be normalized. :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[str] accept: The :mailheader:`Accept` header to send in order to specify what serialization format the server should return; defaults to the value supplied on client instantiation :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :rtype: ProjectPage :raises NoSuchProjectError: if the repository responds with a 404 error code :raises requests.HTTPError: if the repository responds with an HTTP error code other than 404 :raises UnsupportedContentTypeError: if the repository responds with an unsupported :mailheader:`Content-Type` :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version
def get_project_page( self, project: str, timeout: float | tuple[float, float] | None = None, accept: Optional[str] = None, headers: Optional[dict[str, str]] = None, ) -> ProjectPage: """ Fetches the page for the given project from the simple repository and returns a `ProjectPage` instance. Raises `NoSuchProjectError` if the repository responds with a 404. All other HTTP errors cause a `requests.HTTPError` to be raised. .. versionchanged:: 1.0.0 - A 404 now causes `NoSuchProjectError` to be raised instead of returning `None` - ``accept`` parameter added .. versionchanged:: 1.5.0 ``headers`` parameter added :param str project: The name of the project to fetch information on. The name does not need to be normalized. :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[str] accept: The :mailheader:`Accept` header to send in order to specify what serialization format the server should return; defaults to the value supplied on client instantiation :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :rtype: ProjectPage :raises NoSuchProjectError: if the repository responds with a 404 error code :raises requests.HTTPError: if the repository responds with an HTTP error code other than 404 :raises UnsupportedContentTypeError: if the repository responds with an unsupported :mailheader:`Content-Type` :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version """ request_headers = {"Accept": accept or self.accept} if headers: request_headers.update(headers) url = self.get_project_url(project) r = self.s.get(url, timeout=timeout, headers=request_headers) if r.status_code == 404: raise NoSuchProjectError(project, url) r.raise_for_status() return ProjectPage.from_response(r, project)
(self, project: str, timeout: Union[float, tuple[float, float], NoneType] = None, accept: Optional[str] = None, headers: Optional[dict[str, str]] = None) -> pypi_simple.classes.ProjectPage
708,902
pypi_simple.client
get_project_url
Returns the URL for the given project's page in the repository. :param str project: The name of the project to build a URL for. The name does not need to be normalized. :rtype: str
def get_project_url(self, project: str) -> str: """ Returns the URL for the given project's page in the repository. :param str project: The name of the project to build a URL for. The name does not need to be normalized. :rtype: str """ return self.endpoint + normalize(project) + "/"
(self, project: str) -> str
708,903
pypi_simple.client
stream_project_names
Returns a generator of names of projects available in the repository. The names are not normalized. Unlike `get_index_page()`, this function makes a streaming request to the server and parses the document in chunks. It is intended to be faster than the other methods, especially when the complete document is very large. .. warning:: This function is rather experimental. It does not have full support for web encodings, encoding detection, or handling invalid HTML. .. note:: If the server responds with a JSON representation of the Simple API rather than an HTML representation, the response body will be loaded & parsed in its entirety before yielding anything. .. versionchanged:: 1.0.0 ``accept`` parameter added .. versionchanged:: 1.5.0 ``headers`` parameter added :param int chunk_size: how many bytes to read from the response at a time :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[str] accept: The :mailheader:`Accept` header to send in order to specify what serialization format the server should return; defaults to the value supplied on client instantiation :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :rtype: Iterator[str] :raises requests.HTTPError: if the repository responds with an HTTP error code :raises UnsupportedContentTypeError: if the repository responds with an unsupported :mailheader:`Content-Type` :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version
def stream_project_names( self, chunk_size: int = 65535, timeout: float | tuple[float, float] | None = None, accept: Optional[str] = None, headers: Optional[dict[str, str]] = None, ) -> Iterator[str]: """ Returns a generator of names of projects available in the repository. The names are not normalized. Unlike `get_index_page()`, this function makes a streaming request to the server and parses the document in chunks. It is intended to be faster than the other methods, especially when the complete document is very large. .. warning:: This function is rather experimental. It does not have full support for web encodings, encoding detection, or handling invalid HTML. .. note:: If the server responds with a JSON representation of the Simple API rather than an HTML representation, the response body will be loaded & parsed in its entirety before yielding anything. .. versionchanged:: 1.0.0 ``accept`` parameter added .. versionchanged:: 1.5.0 ``headers`` parameter added :param int chunk_size: how many bytes to read from the response at a time :param timeout: optional timeout to pass to the ``requests`` call :type timeout: float | tuple[float,float] | None :param Optional[str] accept: The :mailheader:`Accept` header to send in order to specify what serialization format the server should return; defaults to the value supplied on client instantiation :param Optional[dict[str, str]] headers: Custom headers to provide for the request. :rtype: Iterator[str] :raises requests.HTTPError: if the repository responds with an HTTP error code :raises UnsupportedContentTypeError: if the repository responds with an unsupported :mailheader:`Content-Type` :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version """ request_headers = {"Accept": accept or self.accept} if headers: request_headers.update(headers) with self.s.get( self.endpoint, stream=True, timeout=timeout, headers=request_headers, ) as r: r.raise_for_status() ct = ContentType.parse(r.headers.get("content-type", "text/html")) if ct.content_type == "application/vnd.pypi.simple.v1+json": page = IndexPage.from_json_data(r.json()) yield from page.projects elif ( ct.content_type == "application/vnd.pypi.simple.v1+html" or ct.content_type == "text/html" ): for link in parse_links_stream_response(r, chunk_size): yield link.text else: raise UnsupportedContentTypeError(r.url, str(ct))
(self, chunk_size: int = 65535, timeout: Union[float, tuple[float, float], NoneType] = None, accept: Optional[str] = None, headers: Optional[dict[str, str]] = None) -> collections.abc.Iterator[str]
708,904
pypi_simple.html
RepositoryPage
.. versionadded:: 1.0.0 A parsed HTML page from a :pep:`503` simple repository
class RepositoryPage: """ .. versionadded:: 1.0.0 A parsed HTML page from a :pep:`503` simple repository """ #: The repository version, if any, reported by the page in accordance with #: :pep:`629` repository_version: Optional[str] #: A list of hyperlinks found on the page links: list[Link] #: .. versionadded:: 1.4.0 #: #: ``<meta/>`` tags found on the page whose ``name`` attributes start with #: ``pypi:``. This is a dict in which the keys are ``name`` attributes #: with leading ``"pypi:"`` removed and in which the values are the #: corresponding ``content`` attributes. pypi_meta: dict[str, list[str]] @property def tracks(self) -> list[str]: """ .. versionadded:: 1.4.0 Repository "tracks" metadata. See `PEP 708`__. __ https://peps.python.org/pep-0708/#repository-tracks-metadata """ return self.pypi_meta.get("tracks", []) @property def alternate_locations(self) -> list[str]: """ .. versionadded:: 1.4.0 Repository "alternate locations" metadata. See `PEP 708`__. __ https://peps.python.org/pep-0708/#alternate-locations-metadata """ return self.pypi_meta.get("alternate-locations", []) @classmethod def from_html( cls, html: str | bytes, base_url: Optional[str] = None, from_encoding: Optional[str] = None, ) -> RepositoryPage: """ Parse an HTML page from a simple repository into a `RepositoryPage`. :param html: the HTML to parse :type html: str or bytes :param Optional[str] base_url: an optional URL to join to the front of the links' URLs (usually the URL of the page being parsed) :param Optional[str] from_encoding: an optional hint to Beautiful Soup as to the encoding of ``html`` when it is `bytes` (usually the ``charset`` parameter of the response's :mailheader:`Content-Type` header) :rtype: RepositoryPage :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version """ soup = BeautifulSoup(html, "html.parser", from_encoding=from_encoding) base_tag = soup.find("base", href=True) if base_tag is not None: assert isinstance(base_tag, Tag) href = base_tag["href"] assert isinstance(href, str) if base_url is None: base_url = href else: base_url = urljoin(base_url, href) meta: dict[str, list[str]] = {} for tag in soup.find_all( "meta", attrs={"name": re.compile(r"^pypi:"), "content": True} ): assert isinstance(tag, Tag) name = tag["name"] assert isinstance(name, str) assert name.startswith("pypi:") content = tag["content"] assert isinstance(content, str) meta.setdefault(name[5:], []).append(content) try: repository_version = meta["repository-version"][0] except LookupError: repository_version = None if repository_version is not None: check_repo_version(repository_version) links = [] for link in soup.find_all("a", href=True): links.append( Link( text="".join(link.strings).strip(), url=basejoin(base_url, link["href"]), attrs=link.attrs, ) ) return cls(repository_version=repository_version, links=links, pypi_meta=meta)
(repository_version: Optional[str], links: list[pypi_simple.html.Link], pypi_meta: dict[str, list[str]]) -> None
708,908
pypi_simple.errors
UnexpectedRepoVersionWarning
Emitted upon encountering a simple repository whose repository version (:pep:`629`) has a greater minor version components than the maximum supported repository version (`SUPPORTED_REPOSITORY_VERSION`). This warning can be emitted by anything that can raise `UnsupportedRepoVersionError`.
class UnexpectedRepoVersionWarning(UserWarning): """ Emitted upon encountering a simple repository whose repository version (:pep:`629`) has a greater minor version components than the maximum supported repository version (`SUPPORTED_REPOSITORY_VERSION`). This warning can be emitted by anything that can raise `UnsupportedRepoVersionError`. """ pass
null
708,909
pypi_simple.errors
UnparsableFilenameError
.. versionadded:: 1.0.0 Raised when `parse_filename()` is passed an unparsable filename
class UnparsableFilenameError(ValueError): """ .. versionadded:: 1.0.0 Raised when `parse_filename()` is passed an unparsable filename """ def __init__(self, filename: str) -> None: #: The unparsable filename self.filename = filename def __str__(self) -> str: return f"Cannot parse package filename: {self.filename!r}"
(filename: str) -> None
708,910
pypi_simple.errors
__init__
null
def __init__(self, filename: str) -> None: #: The unparsable filename self.filename = filename
(self, filename: str) -> NoneType
708,911
pypi_simple.errors
__str__
null
def __str__(self) -> str: return f"Cannot parse package filename: {self.filename!r}"
(self) -> str
708,912
pypi_simple.errors
UnsupportedContentTypeError
Raised when a response from a simple repository has an unsupported :mailheader:`Content-Type`
class UnsupportedContentTypeError(ValueError): """ Raised when a response from a simple repository has an unsupported :mailheader:`Content-Type` """ def __init__(self, url: str, content_type: str) -> None: #: The URL that returned the response self.url = url #: The unsupported :mailheader:`Content-Type` self.content_type = content_type def __str__(self) -> str: return ( f"Response from {self.url} has unsupported Content-Type" f" {self.content_type!r}" )
(url: str, content_type: str) -> None
708,913
pypi_simple.errors
__init__
null
def __init__(self, url: str, content_type: str) -> None: #: The URL that returned the response self.url = url #: The unsupported :mailheader:`Content-Type` self.content_type = content_type
(self, url: str, content_type: str) -> NoneType
708,914
pypi_simple.errors
__str__
null
def __str__(self) -> str: return ( f"Response from {self.url} has unsupported Content-Type" f" {self.content_type!r}" )
(self) -> str
708,915
pypi_simple.errors
UnsupportedRepoVersionError
Raised upon encountering a simple repository whose repository version (:pep:`629`) has a greater major component than the maximum supported repository version (`SUPPORTED_REPOSITORY_VERSION`)
class UnsupportedRepoVersionError(Exception): """ Raised upon encountering a simple repository whose repository version (:pep:`629`) has a greater major component than the maximum supported repository version (`SUPPORTED_REPOSITORY_VERSION`) """ def __init__(self, declared_version: str, supported_version: str) -> None: #: The version of the simple repository self.declared_version: str = declared_version #: The maximum repository version that we support self.supported_version: str = supported_version def __str__(self) -> str: return ( f"Repository's version ({self.declared_version}) has greater major" f" component than supported version ({self.supported_version})" )
(declared_version: str, supported_version: str) -> None
708,916
pypi_simple.errors
__init__
null
def __init__(self, declared_version: str, supported_version: str) -> None: #: The version of the simple repository self.declared_version: str = declared_version #: The maximum repository version that we support self.supported_version: str = supported_version
(self, declared_version: str, supported_version: str) -> NoneType
708,917
pypi_simple.errors
__str__
null
def __str__(self) -> str: return ( f"Repository's version ({self.declared_version}) has greater major" f" component than supported version ({self.supported_version})" )
(self) -> str
708,924
pypi_simple.filenames
parse_filename
Given the filename of a distribution package, returns a triple of the project name, project version, and package type. The name and version are spelled the same as they appear in the filename; no normalization is performed. The package type may be any of the following strings: - ``'dumb'`` - ``'egg'`` - ``'msi'`` - ``'rpm'`` - ``'sdist'`` - ``'wheel'`` - ``'wininst'`` Note that some filenames (e.g., :file:`1-2-3.tar.gz`) may be ambiguous as to which part is the project name and which is the version. In order to resolve the ambiguity, the expected value for the project name (*modulo* normalization) can be supplied as the ``project_name`` argument to the function. If the filename can be parsed with the given string in the role of the project name, the results of that parse will be returned; otherwise, the function will fall back to breaking the project & version apart at an unspecified point. .. versionchanged:: 1.0.0 Now raises `UnparsableFilenameError` for unparsable filenames instead of returning all `None`\s :param str filename: The package filename to parse :param Optional[str] project_hint: Optionally, the expected value for the project name (usually the name of the project page on which the filename was found). The name does not need to be normalized. :rtype: tuple[str, str, str] :raises UnparsableFilenameError: if the filename cannot be parsed
def parse_filename( filename: str, project_hint: Optional[str] = None ) -> tuple[str, str, str]: """ Given the filename of a distribution package, returns a triple of the project name, project version, and package type. The name and version are spelled the same as they appear in the filename; no normalization is performed. The package type may be any of the following strings: - ``'dumb'`` - ``'egg'`` - ``'msi'`` - ``'rpm'`` - ``'sdist'`` - ``'wheel'`` - ``'wininst'`` Note that some filenames (e.g., :file:`1-2-3.tar.gz`) may be ambiguous as to which part is the project name and which is the version. In order to resolve the ambiguity, the expected value for the project name (*modulo* normalization) can be supplied as the ``project_name`` argument to the function. If the filename can be parsed with the given string in the role of the project name, the results of that parse will be returned; otherwise, the function will fall back to breaking the project & version apart at an unspecified point. .. versionchanged:: 1.0.0 Now raises `UnparsableFilenameError` for unparsable filenames instead of returning all `None`\\s :param str filename: The package filename to parse :param Optional[str] project_hint: Optionally, the expected value for the project name (usually the name of the project page on which the filename was found). The name does not need to be normalized. :rtype: tuple[str, str, str] :raises UnparsableFilenameError: if the filename cannot be parsed """ for pkg_type, rgx in GOOD_PACKAGE_RGXN: m = rgx.match(filename) if m: return (m.group("project"), m.group("version"), pkg_type) if project_hint is not None: proj_rgx = re.sub(r"[^A-Za-z0-9]+", "[-_.]+", project_hint) proj_rgx = re.sub( r"([A-Za-z])", lambda m: "[" + m.group(1).upper() + m.group(1).lower() + "]", proj_rgx, ) m = re.match(proj_rgx + r"(?=-)", filename) if m: project = m.group(0) rest_of_name = filename[m.end(0) :] for pkg_type, rgx in BAD_PACKAGE_BASES: m = rgx.match(rest_of_name) if m: return (project, m.group("version"), pkg_type) for pkg_type, rgx in BAD_PACKAGE_RGXN: m = rgx.match(filename) if m: return (m.group("project"), m.group("version"), pkg_type) raise UnparsableFilenameError(filename)
(filename: str, project_hint: Optional[str] = None) -> tuple[str, str, str]
708,925
pypi_simple.html_stream
parse_links_stream
Parse an HTML page given as an iterable of `bytes` or `str` and yield each hyperlink encountered in the document as a `Link` object. This function consumes the elements of ``htmlseq`` one at a time and yields the links found in each segment before moving on to the next one. It is intended to be faster than `RepositoryPage.from_html()`, especially when the complete document is very large. .. warning:: This function is rather experimental. It does not have full support for web encodings, encoding detection, or handling invalid HTML. It also leaves CDATA list attributes on links as strings instead of converting them to lists. :param Iterable[AnyStr] htmlseq: an iterable of either `bytes` or `str` that, when joined together, form an HTML document to parse :param Optional[str] base_url: an optional URL to join to the front of the links' URLs (usually the URL of the page being parsed) :param Optional[str] http_charset: the document's encoding as declared by the transport layer, if any; e.g., as declared in the ``charset`` parameter of the :mailheader:`Content-Type` header of the HTTP response that returned the document :rtype: Iterator[Link] :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version
def parse_links_stream( htmlseq: Iterable[AnyStr], base_url: Optional[str] = None, http_charset: Optional[str] = None, ) -> Iterator[Link]: """ Parse an HTML page given as an iterable of `bytes` or `str` and yield each hyperlink encountered in the document as a `Link` object. This function consumes the elements of ``htmlseq`` one at a time and yields the links found in each segment before moving on to the next one. It is intended to be faster than `RepositoryPage.from_html()`, especially when the complete document is very large. .. warning:: This function is rather experimental. It does not have full support for web encodings, encoding detection, or handling invalid HTML. It also leaves CDATA list attributes on links as strings instead of converting them to lists. :param Iterable[AnyStr] htmlseq: an iterable of either `bytes` or `str` that, when joined together, form an HTML document to parse :param Optional[str] base_url: an optional URL to join to the front of the links' URLs (usually the URL of the page being parsed) :param Optional[str] http_charset: the document's encoding as declared by the transport layer, if any; e.g., as declared in the ``charset`` parameter of the :mailheader:`Content-Type` header of the HTTP response that returned the document :rtype: Iterator[Link] :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version """ textseq = iterhtmldecode(htmlseq, http_charset=http_charset) parser = LinkParser(base_url=base_url) for piece in textseq: parser.feed(piece) for link in parser.fetch_links(): yield link parser.close() for link in parser.fetch_links(): yield link
(htmlseq: collections.abc.Iterable[~AnyStr], base_url: Optional[str] = None, http_charset: Optional[str] = None) -> collections.abc.Iterator[pypi_simple.html.Link]
708,926
pypi_simple.html_stream
parse_links_stream_response
Parse an HTML page from a streaming `requests.Response` object and yield each hyperlink encountered in the document as a `Link` object. See `parse_links_stream()` for more information. :param requests.Response r: the streaming response object to parse :param int chunk_size: how many bytes to read from the response at a time :rtype: Iterator[Link] :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version
def parse_links_stream_response( r: requests.Response, chunk_size: int = 65535 ) -> Iterator[Link]: """ Parse an HTML page from a streaming `requests.Response` object and yield each hyperlink encountered in the document as a `Link` object. See `parse_links_stream()` for more information. :param requests.Response r: the streaming response object to parse :param int chunk_size: how many bytes to read from the response at a time :rtype: Iterator[Link] :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version """ return parse_links_stream( r.iter_content(chunk_size), base_url=r.url, http_charset=r.encoding, )
(r: requests.models.Response, chunk_size: int = 65535) -> collections.abc.Iterator[pypi_simple.html.Link]
708,929
pypi_simple.progress
tqdm_progress_factory
A function for displaying a progress bar with tqdm_ during a download. Naturally, using this requires tqdm to be installed alongside ``pypi-simple``. Call `tqdm_progress_factory()` with any arguments you wish to pass to the ``tqdm.tqdm`` constructor, and pass the result as the ``progress`` argument to `PyPISimple.download_package()`. .. _tqdm: https://tqdm.github.io Example: .. code:: python with PyPISimple() as client: page = client.get_project_page("pypi-simple") pkg = page.packages[-1] client.download_package( pkg, path=pkg.filename, progress=tqdm_progress_factory(desc="Downloading ..."), )
def tqdm_progress_factory(**kwargs: Any) -> Callable[[Optional[int]], ProgressTracker]: """ A function for displaying a progress bar with tqdm_ during a download. Naturally, using this requires tqdm to be installed alongside ``pypi-simple``. Call `tqdm_progress_factory()` with any arguments you wish to pass to the ``tqdm.tqdm`` constructor, and pass the result as the ``progress`` argument to `PyPISimple.download_package()`. .. _tqdm: https://tqdm.github.io Example: .. code:: python with PyPISimple() as client: page = client.get_project_page("pypi-simple") pkg = page.packages[-1] client.download_package( pkg, path=pkg.filename, progress=tqdm_progress_factory(desc="Downloading ..."), ) """ from tqdm import tqdm def factory(content_length: Optional[int]) -> ProgressTracker: return tqdm(total=content_length, **kwargs) return factory
(**kwargs: Any) -> collections.abc.Callable[[typing.Optional[int]], pypi_simple.progress.ProgressTracker]
708,933
colorlover
flipper
Invert color scale dictionary
def flipper( scl=None ): ''' Invert color scale dictionary ''' scl = scl if scl is not None else scales flipped = defaultdict(dict) for key, val in list(scl.items()): for subkey, subval in list(val.items()): flipped[subkey][key] = subval return flipped
(scl=None)
708,934
colorlover
interp
Interpolate a color scale "scl" to a new one with length "r" Fun usage in IPython notebook: HTML( to_html( to_hsl( interp( cl.scales['11']['qual']['Paired'], 5000 ) ) ) )
def interp(scl, r): ''' Interpolate a color scale "scl" to a new one with length "r" Fun usage in IPython notebook: HTML( to_html( to_hsl( interp( cl.scales['11']['qual']['Paired'], 5000 ) ) ) ) ''' c = [] if isinstance(r, int): if r == 0: r_steps = [] elif r == 1: # Midway point r_steps = [(len(scl) / 2) - 1] else: # Linearly space r from 0 to len(scl) - 1 r_steps = [x*1.0*(len(scl) - 1)/(r-1) for x in range(r)] else: r_steps = r scl = to_numeric(scl) def hsl_interp(fraction, hsl_start, hsl_end): ''' Interpolate between values of 2, 3-member tuples ''' def interp_linear(f, s, e): return s + (e - s) * f def interp_circular(f, s, e): s_mod = s % 360 e_mod = e % 360 if max(s_mod, e_mod) - min(s_mod, e_mod) > 180: s_mod, e_mod = (s_mod + 360, e_mod) if s_mod < e_mod else ( s_mod, e_mod + 360) return interp_linear(f, s_mod, e_mod) % 360 else: return interp_linear(f, s_mod, e_mod) return ( interp_circular(fraction, hsl_start[0], hsl_end[0]), interp_circular(fraction, hsl_start[1], hsl_end[1]), interp_circular(fraction, hsl_start[2], hsl_end[2]), ) for i in r_steps: section_min = math.floor(i) section_max = math.ceil(i) c_i_min = int(section_min) c_i_max = int(section_max) if c_i_min == c_i_max: # No interpolation needed hsl = rgb_to_hsl(scl[c_i_min]) else: fraction = (i-section_min) hsl_o = rgb_to_hsl( scl[c_i_min] ) # convert rgb to hls hsl_f = rgb_to_hsl( scl[c_i_max] ) hsl = hsl_interp( fraction, hsl_o, hsl_f ) c.append( 'hsl'+str(hsl) ) return to_hsl( c )
(scl, r)
708,936
colorlover
rgb_to_hsl
Adapted from M Bostock's RGB to HSL converter in d3.js https://github.com/mbostock/d3/blob/master/src/color/rgb.js
def rgb_to_hsl(rgb): ''' Adapted from M Bostock's RGB to HSL converter in d3.js https://github.com/mbostock/d3/blob/master/src/color/rgb.js ''' r,g,b = float(rgb[0])/255.0,\ float(rgb[1])/255.0,\ float(rgb[2])/255.0 mx = max(r, g, b) mn = min(r, g, b) h = s = l = (mx + mn) / 2 if mx == mn: # achromatic h = 0 s = 0 if l > 0 and l < 1 else h else: d = mx - mn; s = d / (mx + mn) if l < 0.5 else d / (2 - mx - mn) if mx == r: h = (g - b) / d + ( 6 if g < b else 0 ) elif mx == g: h = (b - r) / d + 2 else: h = (r - g) / d + 4 return (int(round(h*60,4)), int(round(s*100,4)), int(round(l*100,4)))
(rgb)
708,937
colorlover
scale_type
returns "rbg", "hsl", "numeric", or raises exception. ie, [ "rgb(255, 255, 255)", "rgb(255, 255, 255)", "rgb(255, 255, 255)" ] --> "rgb"
def scale_type( scale ): ''' returns "rbg", "hsl", "numeric", or raises exception. ie, [ "rgb(255, 255, 255)", "rgb(255, 255, 255)", "rgb(255, 255, 255)" ] --> "rgb" ''' swatch = scale[0] s_t = str(swatch)[0:3] if s_t in ['rgb', 'hsl']: return s_t elif isinstance(swatch,tuple) and len(swatch) == 3: return 'numeric' raise Exception('Could not determine type of input colorscale.\n\ Colorscales must be in one of these 3 forms:\n\ [ (255, 255, 255), (255, 255, 255), (255, 255, 255) ]\n\ [ "rgb(255, 255, 255)", "rgb(255, 255, 255)", "rgb(255, 255, 255)" ]\n\ [ "hsl(360,100,100)", "hsl(360,100,100)", "hsl(360,100,100)" ]')
(scale)
708,938
colorlover
to_hsl
convert a string rgb or numeric rgb colorscale to hsl. ie, [ "rgb(255, 255, 255)", "rgb(255, 255, 255)", "rgb(255, 255, 255)" ] --> [ "hsl(360,100%,100%)", "hsl(360,100%,100%)", "hsl(360,100%,100%)" ] add percentages to saturation and lightness if missing for css compatibility
def to_hsl( scale ): ''' convert a string rgb or numeric rgb colorscale to hsl. ie, [ "rgb(255, 255, 255)", "rgb(255, 255, 255)", "rgb(255, 255, 255)" ] --> [ "hsl(360,100%,100%)", "hsl(360,100%,100%)", "hsl(360,100%,100%)" ] add percentages to saturation and lightness if missing for css compatibility ''' hsl = [] s_t = scale_type(scale) if s_t == 'hsl': # add percentages to s and l if missing numeric_hsl_scale = [] for s in scale: s = s[s.find("(")+1:s.find(")")].replace(' ','').replace('%','').split(',') numeric_hsl_scale.append( ( float(s[0]), float(s[1]), float(s[2]) ) ) for ea in numeric_hsl_scale: h,s,l = [ str(x) for x in ea ] if s[-1] != '%': s = s + '%' if l[-1] != '%': l = l + '%' hsl_str = 'hsl(' + ', '.join([h,s,l]) + ')' hsl.append( hsl_str ) return hsl elif s_t == 'rgb': scale = to_numeric( scale ) for ea in scale: r,g,b = [ x/255.0 for x in ea ] h,l,s = colorsys.rgb_to_hls( r,g,b ) h,s,l = [ str(int(round(h*360.0))), str(int(round(s*100.0)))+'%', str(int(round(l*100.0)))+'%' ] hsl_str = 'hsl(' + ', '.join([h,s,l]) + ')' hsl.append( hsl_str ) return hsl
(scale)
708,939
colorlover
to_html
traverse color scale dictionary and return available color scales in HTML string
def to_html( scale ): ''' traverse color scale dictionary and return available color scales in HTML string ''' global s s = '' def single_scale( scale ): ''' return square html <div> for a single color ''' if scale_type( scale ) == 'numeric': scale = to_rgb( scale ) s_s = '' for ea in scale: s_s+='<div style="background-color:{0};height:20px;width:20px;margin-bottom:0px;display:inline-block;"></div>'.format(ea) return s_s def section_titles( k ): d = { 'qual':'Qualitative','div':'Diverging','seq':'Sequential' } if k in list(d.keys()): return '<h4>' + d[k] + '</h4>' return '<hr><h3>' + k + ' colors</h3>' def prettyprint( d ): global s for k, v in list(d.items()): if isinstance(v, dict): if len(list(v.keys())) != 0: s += section_titles(k) prettyprint(v) else: s += '<div style="display:inline-block;padding:10px;"><div>{0}</div>{1}</div>'.format(k, single_scale( v ) ) return s if isinstance( scale, list ): return single_scale( scale ) elif isinstance( scale, dict ): prettyprint( scale ) return s
(scale)
708,940
colorlover
to_numeric
converts scale of rgb or hsl strings to list of tuples with rgb integer values. ie, [ "rgb(255, 255, 255)", "rgb(255, 255, 255)", "rgb(255, 255, 255)" ] --> [ (255, 255, 255), (255, 255, 255), (255, 255, 255) ]
def to_numeric( scale ): ''' converts scale of rgb or hsl strings to list of tuples with rgb integer values. ie, [ "rgb(255, 255, 255)", "rgb(255, 255, 255)", "rgb(255, 255, 255)" ] --> [ (255, 255, 255), (255, 255, 255), (255, 255, 255) ] ''' numeric_scale = [] s_t = scale_type( scale ) if s_t in ['rgb','hsl']: for s in scale: s = s[s.find("(")+1:s.find(")")].replace(' ','').split(',') numeric_scale.append( ( float(s[0]), float(s[1]), float(s[2]) ) ) elif s_t == 'numeric': numeric_scale = scale return numeric_scale
(scale)
708,941
colorlover
to_rgb
convert an hsl or numeric rgb color scale to string rgb color scale. ie, [ "hsl(360,100,100)", "hsl(360,100,100)", "hsl(360,100,100)" ] --> [ "rgb(255, 255, 255)", "rgb(255, 255, 255)", "rgb(255, 255, 255)" ]
def to_rgb( scale ): ''' convert an hsl or numeric rgb color scale to string rgb color scale. ie, [ "hsl(360,100,100)", "hsl(360,100,100)", "hsl(360,100,100)" ] --> [ "rgb(255, 255, 255)", "rgb(255, 255, 255)", "rgb(255, 255, 255)" ] ''' rgb = [] s_t = scale_type(scale) if s_t == 'rgb': return scale elif s_t == 'numeric': for ea in scale: rgb.append( 'rgb'+str(ea) ) return rgb elif s_t == 'hsl': numeric_hsl_scale = [] for s in scale: s = s[s.find("(")+1:s.find(")")].replace(' ','').replace('%','').split(',') numeric_hsl_scale.append( ( float(s[0]), float(s[1]), float(s[2]) ) ) scale = numeric_hsl_scale for ea in scale: h,s,l = [ float(x) for x in ea ] r,g,b = colorsys.hls_to_rgb(h/360.0, l/100.0, s/100.0) r,g,b = [ str(int(round(x*255.0))) for x in (r,g,b) ] rgb_str = 'rgb(' + ', '.join([r,g,b]) + ')' rgb.append( rgb_str ) return rgb
(scale)
708,942
subgrounds.client.async_
AsyncSubgrounds
null
class AsyncSubgrounds(SubgroundsBase): @cached_property def _client(self): """Cached client""" return httpx.AsyncClient(http2=HTTP2_SUPPORT, timeout=self.timeout) async def load(self, url: str, save_schema: bool = False, is_subgraph: bool = True): """Performs introspection on the provided GraphQL API ``url`` to get the schema, stores the schema if ``save_schema`` is ``True`` and returns a generated class representing the GraphQL endpoint with all its entities. """ try: loader = self._load(url, save_schema, is_subgraph) url, query = next(loader) # if this fails, schema is loaded from cache data = await self._fetch(url, {"query": query}) loader.send(data) except StopIteration as e: return e.value assert False async def load_subgraph(self, url: str, save_schema: bool = False) -> Subgraph: """Performs introspection on the provided GraphQL API ``url`` to get the schema, stores the schema if ``save_schema`` is ``True`` and returns a generated class representing the subgraph with all its entities. Args: url: The url of the API save_schema: Flag indicating whether or not the schema should be cached to disk. Defaults to False. cache_dir: If ``save_schema == True``, then subgraph schemas will be stored under ``cache_dir``. Defaults to ``schemas/`` Returns: Subgraph: A generated class representing the subgraph and its entities """ return await self.load(url, save_schema, True) async def load_api(self, url: str, save_schema: bool = False) -> Subgraph: """Performs introspection on the provided GraphQL API ``url`` to get the schema, stores the schema if ``save_schema`` is ``True`` and returns a generated class representing the GraphQL endpoint with all its entities. Args: url: The url of the API save_schema: Flag indicating whether or not the schema should be saved to disk. Defaults to False. Returns: A generated class representing the subgraph and its entities """ return await self.load(url, save_schema, False) async def execute( self, req: DataRequest, pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy, ) -> DataResponse: """Executes a :class:`DataRequest` and returns a :class:`DataResponse`. Args: req: The :class:`DataRequest` object to be executed pagination_strategy: A Class implementing the :class:`PaginationStrategy` ``Protocol``. If ``None``, then automatic pagination is disabled. Defaults to :class:`LegacyStrategy`. Returns: A :class:`DataResponse` object representing the response """ try: executor = self._execute(req, pagination_strategy) doc = next(executor) while True: data = await self._fetch( doc.url, {"query": doc.graphql, "variables": doc.variables} ) doc = executor.send(DocumentResponse(url=doc.url, data=data)) except StopIteration as e: return e.value async def query_json( self, fpaths: FieldPath | list[FieldPath], pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy, ) -> list[dict[str, Any]]: """See :func:`~subgrounds.Subgrounds.query_json`. Args: fpaths: One or more :class:`FieldPath` objects that should be included in the request. pagination_strategy: A Class implementing the :class:`PaginationStrategy` ``Protocol``. If ``None``, then automatic pagination is disabled. Defaults to :class:`LegacyStrategy`. Returns: The reponse data """ fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse) req = self.mk_request(fpaths) data = await self.execute(req, pagination_strategy) return [doc.data for doc in data.responses] async def query_df( self, fpaths: FieldPath | list[FieldPath], columns: list[str] | None = None, concat: bool = False, pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy, ) -> pd.DataFrame | list[pd.DataFrame]: """See :func:`~subgrounds.Subgrounds.query_df`. Args: fpaths: One or more `FieldPath` objects that should be included in the request columns: The column labels. merge: Whether or not to merge resulting dataframes. pagination_strategy: A class implementing the :class:`PaginationStrategy` ``Protocol``. If ``None``, then automatic pagination is disabled. Returns: A :class:`pandas.DataFrame` containing the reponse data. """ fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse) json_data = await self.query_json(fpaths, pagination_strategy) return df_of_json(json_data, fpaths, columns, concat) async def query( self, fpaths: FieldPath | list[FieldPath], unwrap: bool = True, pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy, ) -> str | int | float | bool | list | tuple | None: """See :func:`~subgrounds.Subgrounds.query`. Args: fpaths: One or more ``FieldPath`` object(s) to query. unwrap: Flag indicating whether or not, in the case where the returned data is a list of one element, the element itself should be returned instead of the list. Defaults to ``True``. pagination_strategy: A Class implementing the :class:`PaginationStrategy` ``Protocol``. If ``None``, then automatic pagination is disabled. Defaults to :class:`LegacyStrategy`. Returns: The ``FieldPath`` object(s) data """ fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse) blob = await self.query_json(fpaths, pagination_strategy=pagination_strategy) def f(fpath: FieldPath) -> dict[str, Any]: data = fpath._extract_data(blob) if type(data) == list and len(data) == 1 and unwrap: return data[0] return data data = tuple(fpaths | map(f)) if len(data) == 1: return data[0] return data async def _fetch(self, url: str, blob: dict[str, Any]) -> dict[str, Any]: resp = await self._client.post( url, json=blob, headers=default_header(url) | self.headers ) resp.raise_for_status() try: raw_data = resp.json() except JSONDecodeError: raise ServerError( f"Server ({url}) did not respond with proper JSON" f"\nDid you query a proper GraphQL endpoint?" f"\n\n{resp.content}" ) if (data := raw_data.get("data")) is None: raise GraphQLError(raw_data.get("errors", "Unknown Error(s) Found")) return data async def __aenter__(self): await self._client.__aenter__() return self async def __aexit__(self, *args): await self._client.__aexit__(*args)
(timeout: int = 30, headers: dict[str, typing.Any] = <factory>, global_transforms: list[subgrounds.transform.base.RequestTransform] = <factory>, subgraphs: dict[str, subgrounds.subgraph.subgraph.Subgraph] = <factory>, schema_cache: pathlib.Path = PosixPath('schemas')) -> None
708,943
subgrounds.client.async_
__aenter__
null
def f(fpath: FieldPath) -> dict[str, Any]: data = fpath._extract_data(blob) if type(data) == list and len(data) == 1 and unwrap: return data[0] return data
(self)
708,945
subgrounds.client.base
__eq__
null
""" `SubgroundsBase` This module implements the base API that developers can use to build custom clients to allow for intricate customization of the subgrounds feature set. {{ lab_bdg }} The building of Custom Clients is an experimental feature. """ import json import logging import warnings from abc import ABC from collections.abc import Generator from dataclasses import dataclass, field from functools import reduce from importlib import resources from pathlib import Path from typing import Annotated, Any, Type, cast from typing_extensions import Self from pipe import groupby, map, traverse from ..errors import SubgroundsError from ..pagination import ( LegacyStrategy, PaginationStrategy, normalize_strategy, paginate, ) from ..query import DataRequest, DataResponse, Document, DocumentResponse, Query from ..schema import SchemaMeta from ..subgraph import FieldPath, Subgraph from ..transform import ( DEFAULT_GLOBAL_TRANSFORMS, DEFAULT_SUBGRAPH_TRANSFORMS, RequestTransform, apply_transforms, ) from ..utils import PLAYGROUNDS_APP_URL logger = logging.getLogger("subgrounds") warnings.simplefilter("default") HTTP2_SUPPORT = True INTROSPECTION_QUERY = ( resources.files("subgrounds") / "resources" / "introspection.graphql" ) @dataclass class SubgroundsBase(ABC): """A base instance for all `Subgrounds` (should not be used directly)""" timeout: Annotated[int, "seconds"] = 30 headers: dict[str, Any] = field(default_factory=dict) global_transforms: list[RequestTransform] = field( default_factory=lambda: DEFAULT_GLOBAL_TRANSFORMS.copy() ) subgraphs: dict[str, Subgraph] = field(default_factory=dict) schema_cache: Path = Path("schemas/") def __post_init__(self): self.schema_cache = Path(self.schema_cache) @classmethod def from_pg_key(cls, key: str, **kwargs: Any) -> Self: """Create a Subgrounds* instance using a playgrounds key directly. This sets the `headers` field internally to be used with all queries made out. Args: key: The aforementioned Playgrounds API Key **kwargs: Anything else to construct the Subgrounds* instance Returns: An instance Subgrounds* with Playgrounds API support baked in """ if not key.startswith("pg-"): raise SubgroundsError( "Invalid Playgrounds Key: key should start with 'pg-'.\n\n" f"Go to {PLAYGROUNDS_APP_URL} to double check your API Key!" ) headers: dict[str, Any] = {"headers": {"Playgrounds-Api-Key": key}} if (headers_arg := kwargs.get("headers")) is not None: if "Playgrounds-Api-Key" in headers_arg: raise TypeError( f"{cls.__name__}.from_pg_key cannot take `headers`" "as a keyword argument." ) headers |= headers_arg return cls(**(kwargs | headers)) @classmethod def _subgraph_slug(cls, url: str) -> str: *_, author, name = url.split("/") return f"{author}_{name}" def make_request(self, fpaths: FieldPath | list[FieldPath]) -> DataRequest: """Creates a :class:`DataRequest` object by combining one or more :class:`FieldPath` objects. Args: fpaths: One or more :class:`FieldPath` objects that should be included in the request Returns: Brand new request """ fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse) return DataRequest( documents=list( fpaths | groupby(lambda fpath: fpath._subgraph._url) | map( lambda group: Document( url=group[0], query=reduce( Query.add, group[1] | map(FieldPath._selection), Query() ), ) ) ) ) mk_request = make_request def fetch_schema(self, url: str) -> None | dict[str, Any]: """Reads schema from filesystem based on subgraph_slug of the url""" self.schema_cache.mkdir(parents=True, exist_ok=True) schema_path = self.schema_cache / self._subgraph_slug(url) if (schema := schema_path.with_suffix(".json")).exists(): return json.loads(schema.read_text()) def cache_schema(self, url: str, data: dict[str, Any]): """Writes schema into filesystem based on subgraph_slug of the url""" self.schema_cache.mkdir(parents=True, exist_ok=True) schema_path = self.schema_cache / self._subgraph_slug(url) if (schema := schema_path.with_suffix(".json")).exists(): schema.write_text(json.dumps(data)) else: raise ValueError(f"Schema at {schema} doesn't exist.") def _load( self, url: str, save_schema: bool = False, is_subgraph: bool = True ) -> Generator[tuple[str, str], dict[str, Any], Subgraph]: """Loads a subgraph / graphql API by fetching the schema. If `save_schema` is set `True`, grabs schema from disk and skips query made to url. ```{note} This method utilizes the sans-io generator pattern. More on that [here](/subgrounds/advanced_topics/custom_clients/#methodology). ``` """ if not save_schema or (schema := self.fetch_schema(url)) is None: # TODO Yield `Document` once we have graphql -> AST converter schema = yield url, INTROSPECTION_QUERY.read_text() if save_schema: self.cache_schema(url, schema) self.subgraphs[url] = Subgraph( url, SchemaMeta.parse_obj(schema["__schema"]), DEFAULT_SUBGRAPH_TRANSFORMS, is_subgraph, ) return self.subgraphs[url] def _execute( self, req: DataRequest, pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy, ) -> Generator[Document, DocumentResponse, DataResponse]: """Executes a :class:`DataRequest` and returns a :class:`DataResponse`. Args: req: The :class:`DataRequest` object to be executed. pagination_strategy: A Class implementing the :class:`PaginationStrategy` ``Protocol``. If ``None``, then automatic pagination is disabled. Defaults to :class:`LegacyStrategy`. Returns: A :class:`DataResponse` object representing the response """ strategy = normalize_strategy(pagination_strategy) # Setup the main transformation pipeline via `apply_transforms` document_transforms = { url: subgraph._transforms for url, subgraph in self.subgraphs.items() } transformer = apply_transforms(self.global_transforms, document_transforms, req) # start with the base request and the response we'll be building iteratively data_req = cast(DataRequest, next(transformer)) data_resp = DataResponse(responses=[]) # for each top-level document (generally 1 per subgraph URL): # define the pagination pipeline # setup the starting doc request and the response we'll be building # iteratively until pagination is complete for doc in data_req.documents: paginator = paginate(self.subgraphs[doc.url]._schema, doc, strategy) doc_req = next(paginator) doc_resp = DocumentResponse(url=doc.url, data={}) while True: resp = yield doc_req doc_resp = doc_resp.combine(resp) try: doc_req = paginator.send(resp) except StopIteration: break data_resp = data_resp.add_responses(doc_resp) next(transformer) # toss empty None return cast(DataResponse, transformer.send(data_resp)) def _execute_iter( self, req: DataRequest, pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy, ) -> Generator[Document | DocumentResponse, DocumentResponse, None]: """Same as `execute`, except that an iterator is returned which will iterate the data pages. Args: req: The :class:`DataRequest` object to be executed pagination_strategy: A Class implementing the :class:`PaginationStrategy` ``Protocol``. If ``None``, then automatic pagination is disabled. Defaults to :class:`LegacyStrategy`. Returns: An iterator over the :class:`DocumentResponse` pages. ⚠️ DOES NOT apply global transforms across multiple documents or their pages. Since we yield each page as we get it, it's not possible to accurately perform the transforms since we don't collect the pages. This means transforms expecting multiple documents or pages of documents will be inaccurate. """ if len(req.documents) > 1: raise NotImplementedError( "`execute_iter` cannot currently handle multiple documents\n" "Try splitting your query into multiple queries." ) document_transforms = { url: subgraph._transforms for url, subgraph in self.subgraphs.items() } transformer = apply_transforms(self.global_transforms, document_transforms, req) strategy = normalize_strategy(pagination_strategy) data_req = cast(DataRequest, next(transformer)) for doc in data_req.documents: paginator = paginate(self.subgraphs[doc.url]._schema, doc, strategy) paginated_doc = next(paginator) while True: resp = yield paginated_doc next(transformer) # toss empty None data_resp = cast( DataResponse, transformer.send(DataResponse(responses=[resp])) ) yield data_resp.responses[0] # will only be one try: paginated_doc = paginator.send(resp) except StopIteration: break
(self, other)
708,947
subgrounds.client.base
__post_init__
null
def __post_init__(self): self.schema_cache = Path(self.schema_cache)
(self)
708,948
subgrounds.client.base
__repr__
null
def _execute( self, req: DataRequest, pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy, ) -> Generator[Document, DocumentResponse, DataResponse]: """Executes a :class:`DataRequest` and returns a :class:`DataResponse`. Args: req: The :class:`DataRequest` object to be executed. pagination_strategy: A Class implementing the :class:`PaginationStrategy` ``Protocol``. If ``None``, then automatic pagination is disabled. Defaults to :class:`LegacyStrategy`. Returns: A :class:`DataResponse` object representing the response """ strategy = normalize_strategy(pagination_strategy) # Setup the main transformation pipeline via `apply_transforms` document_transforms = { url: subgraph._transforms for url, subgraph in self.subgraphs.items() } transformer = apply_transforms(self.global_transforms, document_transforms, req) # start with the base request and the response we'll be building iteratively data_req = cast(DataRequest, next(transformer)) data_resp = DataResponse(responses=[]) # for each top-level document (generally 1 per subgraph URL): # define the pagination pipeline # setup the starting doc request and the response we'll be building # iteratively until pagination is complete for doc in data_req.documents: paginator = paginate(self.subgraphs[doc.url]._schema, doc, strategy) doc_req = next(paginator) doc_resp = DocumentResponse(url=doc.url, data={}) while True: resp = yield doc_req doc_resp = doc_resp.combine(resp) try: doc_req = paginator.send(resp) except StopIteration: break data_resp = data_resp.add_responses(doc_resp) next(transformer) # toss empty None return cast(DataResponse, transformer.send(data_resp))
(self)
708,949
subgrounds.client.base
_execute
Executes a :class:`DataRequest` and returns a :class:`DataResponse`. Args: req: The :class:`DataRequest` object to be executed. pagination_strategy: A Class implementing the :class:`PaginationStrategy` ``Protocol``. If ``None``, then automatic pagination is disabled. Defaults to :class:`LegacyStrategy`. Returns: A :class:`DataResponse` object representing the response
def _execute( self, req: DataRequest, pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy, ) -> Generator[Document, DocumentResponse, DataResponse]: """Executes a :class:`DataRequest` and returns a :class:`DataResponse`. Args: req: The :class:`DataRequest` object to be executed. pagination_strategy: A Class implementing the :class:`PaginationStrategy` ``Protocol``. If ``None``, then automatic pagination is disabled. Defaults to :class:`LegacyStrategy`. Returns: A :class:`DataResponse` object representing the response """ strategy = normalize_strategy(pagination_strategy) # Setup the main transformation pipeline via `apply_transforms` document_transforms = { url: subgraph._transforms for url, subgraph in self.subgraphs.items() } transformer = apply_transforms(self.global_transforms, document_transforms, req) # start with the base request and the response we'll be building iteratively data_req = cast(DataRequest, next(transformer)) data_resp = DataResponse(responses=[]) # for each top-level document (generally 1 per subgraph URL): # define the pagination pipeline # setup the starting doc request and the response we'll be building # iteratively until pagination is complete for doc in data_req.documents: paginator = paginate(self.subgraphs[doc.url]._schema, doc, strategy) doc_req = next(paginator) doc_resp = DocumentResponse(url=doc.url, data={}) while True: resp = yield doc_req doc_resp = doc_resp.combine(resp) try: doc_req = paginator.send(resp) except StopIteration: break data_resp = data_resp.add_responses(doc_resp) next(transformer) # toss empty None return cast(DataResponse, transformer.send(data_resp))
(self, req: subgrounds.query.DataRequest, pagination_strategy: Optional[Type[subgrounds.pagination.strategies.PaginationStrategy]] = <class 'subgrounds.pagination.strategies.LegacyStrategy'>) -> collections.abc.Generator[subgrounds.query.Document, subgrounds.query.DocumentResponse, subgrounds.query.DataResponse]
708,950
subgrounds.client.base
_execute_iter
Same as `execute`, except that an iterator is returned which will iterate the data pages. Args: req: The :class:`DataRequest` object to be executed pagination_strategy: A Class implementing the :class:`PaginationStrategy` ``Protocol``. If ``None``, then automatic pagination is disabled. Defaults to :class:`LegacyStrategy`. Returns: An iterator over the :class:`DocumentResponse` pages. ⚠️ DOES NOT apply global transforms across multiple documents or their pages. Since we yield each page as we get it, it's not possible to accurately perform the transforms since we don't collect the pages. This means transforms expecting multiple documents or pages of documents will be inaccurate.
def _execute_iter( self, req: DataRequest, pagination_strategy: Type[PaginationStrategy] | None = LegacyStrategy, ) -> Generator[Document | DocumentResponse, DocumentResponse, None]: """Same as `execute`, except that an iterator is returned which will iterate the data pages. Args: req: The :class:`DataRequest` object to be executed pagination_strategy: A Class implementing the :class:`PaginationStrategy` ``Protocol``. If ``None``, then automatic pagination is disabled. Defaults to :class:`LegacyStrategy`. Returns: An iterator over the :class:`DocumentResponse` pages. ⚠️ DOES NOT apply global transforms across multiple documents or their pages. Since we yield each page as we get it, it's not possible to accurately perform the transforms since we don't collect the pages. This means transforms expecting multiple documents or pages of documents will be inaccurate. """ if len(req.documents) > 1: raise NotImplementedError( "`execute_iter` cannot currently handle multiple documents\n" "Try splitting your query into multiple queries." ) document_transforms = { url: subgraph._transforms for url, subgraph in self.subgraphs.items() } transformer = apply_transforms(self.global_transforms, document_transforms, req) strategy = normalize_strategy(pagination_strategy) data_req = cast(DataRequest, next(transformer)) for doc in data_req.documents: paginator = paginate(self.subgraphs[doc.url]._schema, doc, strategy) paginated_doc = next(paginator) while True: resp = yield paginated_doc next(transformer) # toss empty None data_resp = cast( DataResponse, transformer.send(DataResponse(responses=[resp])) ) yield data_resp.responses[0] # will only be one try: paginated_doc = paginator.send(resp) except StopIteration: break
(self, req: subgrounds.query.DataRequest, pagination_strategy: Optional[Type[subgrounds.pagination.strategies.PaginationStrategy]] = <class 'subgrounds.pagination.strategies.LegacyStrategy'>) -> collections.abc.Generator[subgrounds.query.Document | subgrounds.query.DocumentResponse, subgrounds.query.DocumentResponse, None]
708,952
subgrounds.client.base
_load
Loads a subgraph / graphql API by fetching the schema. If `save_schema` is set `True`, grabs schema from disk and skips query made to url. ```{note} This method utilizes the sans-io generator pattern. More on that [here](/subgrounds/advanced_topics/custom_clients/#methodology). ```
def _load( self, url: str, save_schema: bool = False, is_subgraph: bool = True ) -> Generator[tuple[str, str], dict[str, Any], Subgraph]: """Loads a subgraph / graphql API by fetching the schema. If `save_schema` is set `True`, grabs schema from disk and skips query made to url. ```{note} This method utilizes the sans-io generator pattern. More on that [here](/subgrounds/advanced_topics/custom_clients/#methodology). ``` """ if not save_schema or (schema := self.fetch_schema(url)) is None: # TODO Yield `Document` once we have graphql -> AST converter schema = yield url, INTROSPECTION_QUERY.read_text() if save_schema: self.cache_schema(url, schema) self.subgraphs[url] = Subgraph( url, SchemaMeta.parse_obj(schema["__schema"]), DEFAULT_SUBGRAPH_TRANSFORMS, is_subgraph, ) return self.subgraphs[url]
(self, url: str, save_schema: bool = False, is_subgraph: bool = True) -> collections.abc.Generator[tuple[str, str], dict[str, typing.Any], subgrounds.subgraph.subgraph.Subgraph]
708,953
subgrounds.client.base
cache_schema
Writes schema into filesystem based on subgraph_slug of the url
def cache_schema(self, url: str, data: dict[str, Any]): """Writes schema into filesystem based on subgraph_slug of the url""" self.schema_cache.mkdir(parents=True, exist_ok=True) schema_path = self.schema_cache / self._subgraph_slug(url) if (schema := schema_path.with_suffix(".json")).exists(): schema.write_text(json.dumps(data)) else: raise ValueError(f"Schema at {schema} doesn't exist.")
(self, url: str, data: dict[str, typing.Any])
708,954
subgrounds.client.async_
execute
Executes a :class:`DataRequest` and returns a :class:`DataResponse`. Args: req: The :class:`DataRequest` object to be executed pagination_strategy: A Class implementing the :class:`PaginationStrategy` ``Protocol``. If ``None``, then automatic pagination is disabled. Defaults to :class:`LegacyStrategy`. Returns: A :class:`DataResponse` object representing the response
@cached_property def _client(self): """Cached client""" return httpx.AsyncClient(http2=HTTP2_SUPPORT, timeout=self.timeout)
(self, req: subgrounds.query.DataRequest, pagination_strategy: Optional[Type[subgrounds.pagination.strategies.PaginationStrategy]] = <class 'subgrounds.pagination.strategies.LegacyStrategy'>) -> subgrounds.query.DataResponse
708,955
subgrounds.client.base
fetch_schema
Reads schema from filesystem based on subgraph_slug of the url
def fetch_schema(self, url: str) -> None | dict[str, Any]: """Reads schema from filesystem based on subgraph_slug of the url""" self.schema_cache.mkdir(parents=True, exist_ok=True) schema_path = self.schema_cache / self._subgraph_slug(url) if (schema := schema_path.with_suffix(".json")).exists(): return json.loads(schema.read_text())
(self, url: str) -> None | dict[str, typing.Any]
708,956
subgrounds.client.async_
load
Performs introspection on the provided GraphQL API ``url`` to get the schema, stores the schema if ``save_schema`` is ``True`` and returns a generated class representing the GraphQL endpoint with all its entities.
@cached_property def _client(self): """Cached client""" return httpx.AsyncClient(http2=HTTP2_SUPPORT, timeout=self.timeout)
(self, url: str, save_schema: bool = False, is_subgraph: bool = True)
708,957
subgrounds.client.async_
load_api
Performs introspection on the provided GraphQL API ``url`` to get the schema, stores the schema if ``save_schema`` is ``True`` and returns a generated class representing the GraphQL endpoint with all its entities. Args: url: The url of the API save_schema: Flag indicating whether or not the schema should be saved to disk. Defaults to False. Returns: A generated class representing the subgraph and its entities
@cached_property def _client(self): """Cached client""" return httpx.AsyncClient(http2=HTTP2_SUPPORT, timeout=self.timeout)
(self, url: str, save_schema: bool = False) -> subgrounds.subgraph.subgraph.Subgraph
708,958
subgrounds.client.async_
load_subgraph
Performs introspection on the provided GraphQL API ``url`` to get the schema, stores the schema if ``save_schema`` is ``True`` and returns a generated class representing the subgraph with all its entities. Args: url: The url of the API save_schema: Flag indicating whether or not the schema should be cached to disk. Defaults to False. cache_dir: If ``save_schema == True``, then subgraph schemas will be stored under ``cache_dir``. Defaults to ``schemas/`` Returns: Subgraph: A generated class representing the subgraph and its entities
@cached_property def _client(self): """Cached client""" return httpx.AsyncClient(http2=HTTP2_SUPPORT, timeout=self.timeout)
(self, url: str, save_schema: bool = False) -> subgrounds.subgraph.subgraph.Subgraph
708,959
subgrounds.client.base
make_request
Creates a :class:`DataRequest` object by combining one or more :class:`FieldPath` objects. Args: fpaths: One or more :class:`FieldPath` objects that should be included in the request Returns: Brand new request
def make_request(self, fpaths: FieldPath | list[FieldPath]) -> DataRequest: """Creates a :class:`DataRequest` object by combining one or more :class:`FieldPath` objects. Args: fpaths: One or more :class:`FieldPath` objects that should be included in the request Returns: Brand new request """ fpaths = list([fpaths] | traverse | map(FieldPath._auto_select) | traverse) return DataRequest( documents=list( fpaths | groupby(lambda fpath: fpath._subgraph._url) | map( lambda group: Document( url=group[0], query=reduce( Query.add, group[1] | map(FieldPath._selection), Query() ), ) ) ) )
(self, fpaths: subgrounds.subgraph.fieldpath.FieldPath | list[subgrounds.subgraph.fieldpath.FieldPath]) -> subgrounds.query.DataRequest
708,961
subgrounds.client.async_
query
See :func:`~subgrounds.Subgrounds.query`. Args: fpaths: One or more ``FieldPath`` object(s) to query. unwrap: Flag indicating whether or not, in the case where the returned data is a list of one element, the element itself should be returned instead of the list. Defaults to ``True``. pagination_strategy: A Class implementing the :class:`PaginationStrategy` ``Protocol``. If ``None``, then automatic pagination is disabled. Defaults to :class:`LegacyStrategy`. Returns: The ``FieldPath`` object(s) data
@cached_property def _client(self): """Cached client""" return httpx.AsyncClient(http2=HTTP2_SUPPORT, timeout=self.timeout)
(self, fpaths: subgrounds.subgraph.fieldpath.FieldPath | list[subgrounds.subgraph.fieldpath.FieldPath], unwrap: bool = True, pagination_strategy: Optional[Type[subgrounds.pagination.strategies.PaginationStrategy]] = <class 'subgrounds.pagination.strategies.LegacyStrategy'>) -> str | int | float | bool | list | tuple | None
708,962
subgrounds.client.async_
query_df
See :func:`~subgrounds.Subgrounds.query_df`. Args: fpaths: One or more `FieldPath` objects that should be included in the request columns: The column labels. merge: Whether or not to merge resulting dataframes. pagination_strategy: A class implementing the :class:`PaginationStrategy` ``Protocol``. If ``None``, then automatic pagination is disabled. Returns: A :class:`pandas.DataFrame` containing the reponse data.
@cached_property def _client(self): """Cached client""" return httpx.AsyncClient(http2=HTTP2_SUPPORT, timeout=self.timeout)
(self, fpaths: subgrounds.subgraph.fieldpath.FieldPath | list[subgrounds.subgraph.fieldpath.FieldPath], columns: Optional[list[str]] = None, concat: bool = False, pagination_strategy: Optional[Type[subgrounds.pagination.strategies.PaginationStrategy]] = <class 'subgrounds.pagination.strategies.LegacyStrategy'>) -> pandas.core.frame.DataFrame | list[pandas.core.frame.DataFrame]
708,963
subgrounds.client.async_
query_json
See :func:`~subgrounds.Subgrounds.query_json`. Args: fpaths: One or more :class:`FieldPath` objects that should be included in the request. pagination_strategy: A Class implementing the :class:`PaginationStrategy` ``Protocol``. If ``None``, then automatic pagination is disabled. Defaults to :class:`LegacyStrategy`. Returns: The reponse data
@cached_property def _client(self): """Cached client""" return httpx.AsyncClient(http2=HTTP2_SUPPORT, timeout=self.timeout)
(self, fpaths: subgrounds.subgraph.fieldpath.FieldPath | list[subgrounds.subgraph.fieldpath.FieldPath], pagination_strategy: Optional[Type[subgrounds.pagination.strategies.PaginationStrategy]] = <class 'subgrounds.pagination.strategies.LegacyStrategy'>) -> list[dict[str, typing.Any]]
708,964
subgrounds.subgraph.fieldpath
FieldPath
FieldPath(subgraph: 'Subgraph', root_type: 'TypeRef.T', type_: 'TypeRef.T', path: 'list[tuple[dict[str, Any] | None, TypeMeta.FieldMeta]]') -> 'None'
class FieldPath(FieldOperatorMixin): _subgraph: Subgraph _root_type: TypeRef.T _type: TypeRef.T _path: list[tuple[dict[str, Any] | None, TypeMeta.FieldMeta]] # Purely for testing __test_mode: ClassVar[bool] = False def __init__( self, subgraph: Subgraph, root_type: TypeRef.T, type_: TypeRef.T, path: list[tuple[dict[str, Any] | None, TypeMeta.FieldMeta]], ) -> None: self._subgraph = subgraph self._root_type = root_type self._type = type_ self._path = path # Add fields as attributes if leaf is object match self._subgraph._schema.type_of(self._leaf): case TypeMeta.ObjectMeta() | TypeMeta.InterfaceMeta() as type_ if len( self._path ) < FPATH_DEPTH_LIMIT: # We generate fieldpaths up to depth 8 for fmeta in type_.fields: path = self._path.copy() path.append((None, fmeta)) super().__setattr__( fmeta.name, FieldPath( subgraph=self._subgraph, root_type=self._root_type, type_=fmeta.type_, path=path, ), ) case TypeMeta.ObjectMeta() | TypeMeta.InterfaceMeta() as type_ if len( self._path ) == FPATH_DEPTH_LIMIT: for fmeta in type_.fields: # NOTE: We set the attribute to None on purpose since we want code # completion to work while avoiding infinite loops caused by cycles # in the GraphQL schema. The attribute itself will be initialized # on __getattribute_ super().__setattr__(fmeta.name, None) case _: pass @property def _schema(self) -> SchemaMeta: return self._subgraph._schema @property def _root(self) -> TypeMeta.FieldMeta: """Returns the type information of the root field of the current :class:`FieldPath` Returns: Type information of the root field of the current :class:`FieldPath` """ return self._path[0][1] @property def _leaf(self) -> TypeMeta.FieldMeta: """Returns the type information of the leaf field of the current :class:`FieldPath` Returns: Type information of the leaf field of the current :class:`FieldPath` """ return self._path[-1][1] @staticmethod def _hash(msg: str) -> str: h = blake2b(digest_size=8) h.update(msg.encode("UTF-8")) return "x" + h.hexdigest() @staticmethod def _merge(fpaths: list[FieldPath]) -> list[Selection]: """Returns a Selection tree containing all selection paths in `fpaths`. Note: Assumes that all fieldpaths in `fpaths` belong to the same subgraph Args: fpaths: _description_ Returns: _description_ """ query = reduce(Query.add, fpaths | map(FieldPath._selection), Query()) return query.selection def _name_path(self, use_aliases: bool = False) -> list[str]: """Returns a list of strings correspoding to the names of all fields selected in the current :class:`FieldPath`. If :attr:`use_aliases` is True, then if a field has an automatically generated alias, the alias will be returned. Args: use_aliases: Flag indicating wether of not to use the fields' automatically generated alias (if present). Defaults to False. Returns: List of field names selected in the current :class:`FieldPath` """ def gen_alias(ele: tuple[dict[str, Any] | None, TypeMeta.FieldMeta]) -> str: if ele[0] != {} and ele[0] is not None: return FieldPath._hash(ele[1].name + str(ele[0])) else: return ele[1].name return list( self._path | map(lambda ele: gen_alias(ele) if use_aliases else ele[1].name) ) def _name(self, use_aliases: bool = False) -> str: """Generates the name of the current :class:`FieldPath` using the names of the fields it selects. If :attr:`use_aliases` is True, then if a field has an automatically generated alias, the alias will be used. Args: use_aliases: Flag indicating wether of not to use the fields' automatically generated alias (if present). Defaults to False. Returns: The generated name of the current :class:`FieldPath`. """ return "_".join(self._name_path(use_aliases=use_aliases)) def _auto_select(self) -> FieldPath | list[FieldPath]: match self._subgraph._schema.type_of_typeref(self._leaf.type_): case TypeMeta.ObjectMeta() | TypeMeta.InterfaceMeta() as obj: return list( fieldpaths_of_object(self._subgraph, obj) | map(partial(FieldPath._extend, self)) ) case _: return self def _extract_data( self, data: dict[str, Any] | list[dict[str, Any]] ) -> list[Any] | Any: """Extract the data corresponding to the current :class:`FieldPath` from the dictionary :attr:`data`. Args: data: Data dictionary that contains the data corresponding to the current :class:`FieldPath`. Returns: Data corresponding to the current :class:`FieldPath`. """ return extract_data(self._name_path(use_aliases=True), data) def _selection(self) -> Selection | list[Selection]: """Returns a selection or list of selections corresponding to the current :class:`FieldPath`. Returns: Selection | list[Selection]: _description_ """ def f( path: list[tuple[dict[str, Any] | None, TypeMeta.FieldMeta]] ) -> list[Selection]: match path: case [ (args, TypeMeta.FieldMeta() as fmeta), *rest, ] if args == {} or args is None: return [Selection(fmeta, selection=f(rest))] case [(args, TypeMeta.FieldMeta() as fmeta), *rest]: return [ Selection( fmeta, # TODO: Revisit this alias=FieldPath._hash(fmeta.name + str(args)), arguments=arguments_of_field_args( self._subgraph._schema, fmeta, args ), selection=f(rest), ) ] case []: return [] assert False # Suppress mypy missing return statement warning return f(self._path)[0] def _set_arguments( self, args: dict[str, Any], selection: list[FieldPath] = [] ) -> FieldPath | list[FieldPath]: """Set the arguments to the leaf of the current :class:`FieldPath`. The method returns the :attr:`self`. Args: args: _description_ selection: _description_. Defaults to []. Returns: _description_ """ def fmt_arg(name, raw_arg): match (name, raw_arg): case ("where", [Filter(), *_] as filters): return Filter.to_dict(filters) case ("orderBy", FieldPath() as fpath): if paths := fpath._name_path(): return "__".join(paths) raise Exception( f"Cannot use empty paths as orderBy argument {fpath}" ) case _: return raw_arg match self._leaf: case TypeMeta.FieldMeta(): args = {key: fmt_arg(key, val) for key, val in args.items()} self._path[-1] = (args, self._path[-1][1]) if len(selection) > 0: return list(selection | map(partial(FieldPath._extend, self))) else: return self case _: raise TypeError(f"Unexpected type for FieldPath {self}") def _select(self, name: str) -> FieldPath: """Returns a new FieldPath corresponding to the FieldPath `self` extended with an additional selection on the field named `name`. Args: name: The name of the field to expand on the leaf of `fpath` Raises: TypeError: [description] TypeError: [description] TypeError: [description] Returns: A new FieldPath containing `fpath` extended with the field named `name` """ match self._schema.type_of_typeref(self._type): # If the FieldPath fpath case TypeMeta.EnumMeta() | TypeMeta.ScalarMeta(): raise TypeError( f"FieldPath: path {self} ends with a scalar field!" f" cannot select field {name}" ) case TypeMeta.ObjectMeta() | TypeMeta.InterfaceMeta() as obj: field = obj.field(name) match self._schema.type_of_typeref(field.type_): case ( TypeMeta.ObjectMeta() | TypeMeta.InterfaceMeta() | TypeMeta.EnumMeta() | TypeMeta.ScalarMeta() ): # Copy current path and append newly selected field path = self._path.copy() path.append((None, field)) # Return new FieldPath return FieldPath( subgraph=self._subgraph, root_type=self._root_type, type_=field.type_, path=path, ) case _: raise TypeError( f"FieldPath: field {name} is not a valid field for object" f" {self._type.name} at path {self}" ) case _: raise TypeError( f"FieldPath: Unexpected type {self._type.name}" f" when selection {name} on {self}" ) def _extend(self, ext: FieldPath) -> FieldPath: """Extends the current :class:`FieldPath` with the :class:`FieldPath` :attr:`ext`. :attr:`ext` must start where the current :class:`FieldPath` ends. Args: ext: The :class:`FieldPath` representing the extension Raises: TypeError: [description] TypeError: [description] TypeError: [description] Returns: A new :class:`FieldPath` containing the initial current :class:`FieldPath` extended with :attr:`ext` """ match self._leaf: case TypeMeta.FieldMeta() as fmeta: match self._schema.type_of_typeref(fmeta.type_): case TypeMeta.ObjectMeta(name=name) | TypeMeta.InterfaceMeta( name=name ): if name == ext._root_type.name: return FieldPath( subgraph=self._subgraph, root_type=self._root_type, type_=ext._type, path=self._path + ext._path, ) else: raise TypeError( f"extend: FieldPath {ext} does not start at the" f" same type from where FieldPath {self} ends" ) case _: raise TypeError(f"extend: FieldPath {self} is not object field") case _: raise TypeError(f"extend: FieldPath {self} is not an object field") # ================================================================ # Overloaded magic functions # ================================================================ # When setting arguments def __call__(self, **kwargs: Any) -> Any: """Sets field arguments and expand subfields. The updated FieldPath is returned. Example: >>> aaveV2 = sg.load_subgraph( ... "https://api.thegraph.com/subgraphs/name/aave/protocol-v2" ... ) >>> query = aaveV2.Query.borrows( ... first=10, ... order_by=aaveV2.Borrow.timestamp, ... order_direction="desc", ... selection=[ ... aaveV2.Borrow.id, ... aaveV2.Borrow.timestamp, ... aaveV2.Borrow.amount ... ] ... ) Returns: The updated field path if :attr:`selection` is not specified, or a list of fieldpaths when :attr:`selection` is specified. """ selection = kwargs.pop("selection", []) return self._set_arguments(kwargs, selection) # Field selection def __getattribute__(self, __name: str) -> Any: # Small hack to get code completion to work while allowing updates to FieldPath # (i.e.: setting arguments) try: match super().__getattribute__(__name): case FieldPath() | SyntheticField() | None: return self._select(__name) case value: return value except AttributeError: return self._select(__name) # Filtering def __eq__(self, value: FieldPath | Any) -> Filter | bool: if FieldPath.__test_mode: # Purely used for testing so that assertEqual works return ( self._subgraph == value._subgraph and self._type == value._type and self._path == value._path ) else: return Filter.mk_filter(self, Filter.Operator.EQ, value) def __ne__(self, value: Any) -> Filter: return Filter.mk_filter(self, Filter.Operator.NEQ, value) def __lt__(self, value: Any) -> Filter: return Filter.mk_filter(self, Filter.Operator.LT, value) def __gt__(self, value: Any) -> Filter: return Filter.mk_filter(self, Filter.Operator.GT, value) def __le__(self, value: Any) -> Filter: return Filter.mk_filter(self, Filter.Operator.LTE, value) def __ge__(self, value: Any) -> Filter: return Filter.mk_filter(self, Filter.Operator.GTE, value) # Utility def __str__(self) -> str: return ".".join(self._path | map(lambda ele: ele[1].name)) def __repr__(self) -> str: vars = f"{self._subgraph._url}, {self._root_type.name}, {self._name_path()}" return f"FieldPath({vars})"
(subgraph: 'Subgraph', root_type: 'TypeRef.T', type_: 'TypeRef.T', path: 'list[tuple[dict[str, Any] | None, TypeMeta.FieldMeta]]') -> 'None'
708,965
subgrounds.subgraph.fieldpath
__abs__
null
def __abs__(self) -> SyntheticField: return SyntheticField( operator.abs, type_ref_of_unary_op("abs", self._type), self )
(self) -> subgrounds.subgraph.fieldpath.SyntheticField
708,966
subgrounds.subgraph.fieldpath
__add__
null
def __add__(self, other: Any) -> SyntheticField: return SyntheticField( operator.add, typeref_of_binary_op("add", self._type, other), [self, other] )
(self, other: Any) -> subgrounds.subgraph.fieldpath.SyntheticField
708,967
subgrounds.subgraph.fieldpath
__call__
Sets field arguments and expand subfields. The updated FieldPath is returned. Example: >>> aaveV2 = sg.load_subgraph( ... "https://api.thegraph.com/subgraphs/name/aave/protocol-v2" ... ) >>> query = aaveV2.Query.borrows( ... first=10, ... order_by=aaveV2.Borrow.timestamp, ... order_direction="desc", ... selection=[ ... aaveV2.Borrow.id, ... aaveV2.Borrow.timestamp, ... aaveV2.Borrow.amount ... ] ... ) Returns: The updated field path if :attr:`selection` is not specified, or a list of fieldpaths when :attr:`selection` is specified.
def __call__(self, **kwargs: Any) -> Any: """Sets field arguments and expand subfields. The updated FieldPath is returned. Example: >>> aaveV2 = sg.load_subgraph( ... "https://api.thegraph.com/subgraphs/name/aave/protocol-v2" ... ) >>> query = aaveV2.Query.borrows( ... first=10, ... order_by=aaveV2.Borrow.timestamp, ... order_direction="desc", ... selection=[ ... aaveV2.Borrow.id, ... aaveV2.Borrow.timestamp, ... aaveV2.Borrow.amount ... ] ... ) Returns: The updated field path if :attr:`selection` is not specified, or a list of fieldpaths when :attr:`selection` is specified. """ selection = kwargs.pop("selection", []) return self._set_arguments(kwargs, selection)
(self, **kwargs: Any) -> Any
708,968
subgrounds.subgraph.fieldpath
__eq__
null
def __eq__(self, value: FieldPath | Any) -> Filter | bool: if FieldPath.__test_mode: # Purely used for testing so that assertEqual works return ( self._subgraph == value._subgraph and self._type == value._type and self._path == value._path ) else: return Filter.mk_filter(self, Filter.Operator.EQ, value)
(self, value: Union[subgrounds.subgraph.fieldpath.FieldPath, Any]) -> subgrounds.subgraph.filter.Filter | bool
708,969
subgrounds.subgraph.fieldpath
__floordiv__
null
def __floordiv__(self, other: Any) -> SyntheticField: return SyntheticField( operator.floordiv, typeref_of_binary_op("div", self._type, other), [self, other], )
(self, other: Any) -> subgrounds.subgraph.fieldpath.SyntheticField
708,970
subgrounds.subgraph.fieldpath
__ge__
null
def __ge__(self, value: Any) -> Filter: return Filter.mk_filter(self, Filter.Operator.GTE, value)
(self, value: Any) -> subgrounds.subgraph.filter.Filter