language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
mlflow__mlflow
dev/clint/src/clint/rules/docstring_param_order.py
{ "start": 36, "end": 248 }
class ____(Rule): def __init__(self, params: list[str]) -> None: self.params = params def _message(self) -> str: return f"Unordered parameters in docstring: {self.params}"
DocstringParamOrder
python
plotly__plotly.py
plotly/graph_objs/histogram2d/_hoverlabel.py
{ "start": 233, "end": 11269 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "histogram2d" _path_str = "histogram2d.hoverlabel" _valid_props = { "align", "alignsrc", "bgcolor", "bgcolorsrc", "bordercolor", "bordercolorsrc", "font", "namelength", "namelengthsrc", "showarrow", } @property def align(self): """ Sets the horizontal alignment of the text content within hover label box. Has an effect only if the hover label text spans more two or more lines The 'align' property is an enumeration that may be specified as: - One of the following enumeration values: ['left', 'right', 'auto'] - A tuple, list, or one-dimensional numpy array of the above Returns ------- Any|numpy.ndarray """ return self["align"] @align.setter def align(self, val): self["align"] = val @property def alignsrc(self): """ Sets the source reference on Chart Studio Cloud for `align`. The 'alignsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["alignsrc"] @alignsrc.setter def alignsrc(self, val): self["alignsrc"] = val @property def bgcolor(self): """ Sets the background color of the hover labels for this trace The 'bgcolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["bgcolor"] @bgcolor.setter def bgcolor(self, val): self["bgcolor"] = val @property def bgcolorsrc(self): """ Sets the source reference on Chart Studio Cloud for `bgcolor`. The 'bgcolorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["bgcolorsrc"] @bgcolorsrc.setter def bgcolorsrc(self, val): self["bgcolorsrc"] = val @property def bordercolor(self): """ Sets the border color of the hover labels for this trace. The 'bordercolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["bordercolor"] @bordercolor.setter def bordercolor(self, val): self["bordercolor"] = val @property def bordercolorsrc(self): """ Sets the source reference on Chart Studio Cloud for `bordercolor`. The 'bordercolorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["bordercolorsrc"] @bordercolorsrc.setter def bordercolorsrc(self, val): self["bordercolorsrc"] = val @property def font(self): """ Sets the font used in hover labels. The 'font' property is an instance of Font that may be specified as: - An instance of :class:`plotly.graph_objs.histogram2d.hoverlabel.Font` - A dict of string/value properties that will be passed to the Font constructor Returns ------- plotly.graph_objs.histogram2d.hoverlabel.Font """ return self["font"] @font.setter def font(self, val): self["font"] = val @property def namelength(self): """ Sets the default length (in number of characters) of the trace name in the hover labels for all traces. -1 shows the whole name regardless of length. 0-3 shows the first 0-3 characters, and an integer >3 will show the whole name if it is less than that many characters, but if it is longer, will truncate to `namelength - 3` characters and add an ellipsis. The 'namelength' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [-1, 9223372036854775807] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|numpy.ndarray """ return self["namelength"] @namelength.setter def namelength(self, val): self["namelength"] = val @property def namelengthsrc(self): """ Sets the source reference on Chart Studio Cloud for `namelength`. The 'namelengthsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["namelengthsrc"] @namelengthsrc.setter def namelengthsrc(self, val): self["namelengthsrc"] = val @property def showarrow(self): """ Sets whether or not to show the hover label arrow/triangle pointing to the data point. The 'showarrow' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["showarrow"] @showarrow.setter def showarrow(self, val): self["showarrow"] = val @property def _prop_descriptions(self): return """\ align Sets the horizontal alignment of the text content within hover label box. Has an effect only if the hover label text spans more two or more lines alignsrc Sets the source reference on Chart Studio Cloud for `align`. bgcolor Sets the background color of the hover labels for this trace bgcolorsrc Sets the source reference on Chart Studio Cloud for `bgcolor`. bordercolor Sets the border color of the hover labels for this trace. bordercolorsrc Sets the source reference on Chart Studio Cloud for `bordercolor`. font Sets the font used in hover labels. namelength Sets the default length (in number of characters) of the trace name in the hover labels for all traces. -1 shows the whole name regardless of length. 0-3 shows the first 0-3 characters, and an integer >3 will show the whole name if it is less than that many characters, but if it is longer, will truncate to `namelength - 3` characters and add an ellipsis. namelengthsrc Sets the source reference on Chart Studio Cloud for `namelength`. showarrow Sets whether or not to show the hover label arrow/triangle pointing to the data point. """ def __init__( self, arg=None, align=None, alignsrc=None, bgcolor=None, bgcolorsrc=None, bordercolor=None, bordercolorsrc=None, font=None, namelength=None, namelengthsrc=None, showarrow=None, **kwargs, ): """ Construct a new Hoverlabel object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.histogram2d.Hoverlabel` align Sets the horizontal alignment of the text content within hover label box. Has an effect only if the hover label text spans more two or more lines alignsrc Sets the source reference on Chart Studio Cloud for `align`. bgcolor Sets the background color of the hover labels for this trace bgcolorsrc Sets the source reference on Chart Studio Cloud for `bgcolor`. bordercolor Sets the border color of the hover labels for this trace. bordercolorsrc Sets the source reference on Chart Studio Cloud for `bordercolor`. font Sets the font used in hover labels. namelength Sets the default length (in number of characters) of the trace name in the hover labels for all traces. -1 shows the whole name regardless of length. 0-3 shows the first 0-3 characters, and an integer >3 will show the whole name if it is less than that many characters, but if it is longer, will truncate to `namelength - 3` characters and add an ellipsis. namelengthsrc Sets the source reference on Chart Studio Cloud for `namelength`. showarrow Sets whether or not to show the hover label arrow/triangle pointing to the data point. Returns ------- Hoverlabel """ super().__init__("hoverlabel") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.histogram2d.Hoverlabel constructor must be a dict or an instance of :class:`plotly.graph_objs.histogram2d.Hoverlabel`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("align", arg, align) self._set_property("alignsrc", arg, alignsrc) self._set_property("bgcolor", arg, bgcolor) self._set_property("bgcolorsrc", arg, bgcolorsrc) self._set_property("bordercolor", arg, bordercolor) self._set_property("bordercolorsrc", arg, bordercolorsrc) self._set_property("font", arg, font) self._set_property("namelength", arg, namelength) self._set_property("namelengthsrc", arg, namelengthsrc) self._set_property("showarrow", arg, showarrow) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Hoverlabel
python
Netflix__metaflow
test/core/tests/basic_parameters.py
{ "start": 67, "end": 2155 }
class ____(MetaflowTest): PRIORITY = 1 SKIP_GRAPHS = [ "simple_switch", "nested_switch", "branch_in_switch", "foreach_in_switch", "switch_in_branch", "switch_in_foreach", "recursive_switch", "recursive_switch_inside_foreach", ] PARAMETERS = { "no_default_param": {"default": None}, # Note this value is overridden in contexts.json "bool_param": {"default": False}, "bool_true_param": {"default": True}, "int_param": {"default": 123}, "str_param": {"default": "'foobar'"}, "list_param": {"separator": "','", "default": '"a,b,c"'}, "json_param": {"default": """'{"a": [1,2,3]}'""", "type": "JSONType"}, } HEADER = """ import os os.environ['METAFLOW_RUN_NO_DEFAULT_PARAM'] = 'test_str' os.environ['METAFLOW_RUN_BOOL_PARAM'] = 'False' """ @steps(0, ["all"]) def step_all(self): assert_equals("test_str", self.no_default_param) assert_equals(False, self.bool_param) assert_equals(True, self.bool_true_param) assert_equals(123, self.int_param) assert_equals("foobar", self.str_param) assert_equals(["a", "b", "c"], self.list_param) assert_equals({"a": [1, 2, 3]}, self.json_param) try: # parameters should be immutable self.int_param = 5 raise ExpectationFailed(AttributeError, "nothing") except AttributeError: pass def check_results(self, flow, checker): for step in flow: checker.assert_artifact(step.name, "no_default_param", "test_str") checker.assert_artifact(step.name, "bool_param", False) checker.assert_artifact(step.name, "bool_true_param", True) checker.assert_artifact(step.name, "int_param", 123) checker.assert_artifact(step.name, "str_param", "foobar") checker.assert_artifact(step.name, "list_param", ["a", "b", "c"]) checker.assert_artifact(step.name, "json_param", {"a": [1, 2, 3]})
BasicParameterTest
python
PyCQA__pylint
tests/functional/u/unnecessary/unnecessary_dunder_call.py
{ "start": 2239, "end": 2377 }
class ____(list): def __contains__(self, item): print("do some special checks") return super().__contains__(item)
MyClass
python
scikit-learn__scikit-learn
sklearn/discriminant_analysis.py
{ "start": 29777, "end": 43621 }
class ____( DiscriminantAnalysisPredictionMixin, ClassifierMixin, BaseEstimator ): """Quadratic Discriminant Analysis. A classifier with a quadratic decision boundary, generated by fitting class conditional densities to the data and using Bayes' rule. The model fits a Gaussian density to each class. .. versionadded:: 0.17 For a comparison between :class:`~sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis` and :class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis`, see :ref:`sphx_glr_auto_examples_classification_plot_lda_qda.py`. Read more in the :ref:`User Guide <lda_qda>`. Parameters ---------- solver : {'svd', 'eigen'}, default='svd' Solver to use, possible values: - 'svd': Singular value decomposition (default). Does not compute the covariance matrix, therefore this solver is recommended for data with a large number of features. - 'eigen': Eigenvalue decomposition. Can be combined with shrinkage or custom covariance estimator. shrinkage : 'auto' or float, default=None Shrinkage parameter, possible values: - None: no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Enabling shrinkage is expected to improve the model when some classes have a relatively small number of training data points compared to the number of features by mitigating overfitting during the covariance estimation step. This should be left to `None` if `covariance_estimator` is used. Note that shrinkage works only with 'eigen' solver. priors : array-like of shape (n_classes,), default=None Class priors. By default, the class proportions are inferred from the training data. reg_param : float, default=0.0 Regularizes the per-class covariance estimates by transforming S2 as ``S2 = (1 - reg_param) * S2 + reg_param * np.eye(n_features)``, where S2 corresponds to the `scaling_` attribute of a given class. store_covariance : bool, default=False If True, the class covariance matrices are explicitly computed and stored in the `self.covariance_` attribute. .. versionadded:: 0.17 tol : float, default=1.0e-4 Absolute threshold for the covariance matrix to be considered rank deficient after applying some regularization (see `reg_param`) to each `Sk` where `Sk` represents covariance matrix for k-th class. This parameter does not affect the predictions. It controls when a warning is raised if the covariance matrix is not full rank. .. versionadded:: 0.17 covariance_estimator : covariance estimator, default=None If not None, `covariance_estimator` is used to estimate the covariance matrices instead of relying on the empirical covariance estimator (with potential shrinkage). The object should have a fit method and a ``covariance_`` attribute like the estimators in :mod:`sklearn.covariance`. If None the shrinkage parameter drives the estimate. This should be left to `None` if `shrinkage` is used. Note that `covariance_estimator` works only with the 'eigen' solver. Attributes ---------- covariance_ : list of len n_classes of ndarray \ of shape (n_features, n_features) For each class, gives the covariance matrix estimated using the samples of that class. The estimations are unbiased. Only present if `store_covariance` is True. means_ : array-like of shape (n_classes, n_features) Class-wise means. priors_ : array-like of shape (n_classes,) Class priors (sum to 1). rotations_ : list of len n_classes of ndarray of shape (n_features, n_k) For each class k an array of shape (n_features, n_k), where ``n_k = min(n_features, number of elements in class k)`` It is the rotation of the Gaussian distribution, i.e. its principal axis. It corresponds to `V`, the matrix of eigenvectors coming from the SVD of `Xk = U S Vt` where `Xk` is the centered matrix of samples from class k. scalings_ : list of len n_classes of ndarray of shape (n_k,) For each class, contains the scaling of the Gaussian distributions along its principal axes, i.e. the variance in the rotated coordinate system. It corresponds to `S^2 / (n_samples - 1)`, where `S` is the diagonal matrix of singular values from the SVD of `Xk`, where `Xk` is the centered matrix of samples from class k. classes_ : ndarray of shape (n_classes,) Unique class labels. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- LinearDiscriminantAnalysis : Linear Discriminant Analysis. Examples -------- >>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> clf = QuadraticDiscriminantAnalysis() >>> clf.fit(X, y) QuadraticDiscriminantAnalysis() >>> print(clf.predict([[-0.8, -1]])) [1] """ _parameter_constraints: dict = { "solver": [StrOptions({"svd", "eigen"})], "shrinkage": [StrOptions({"auto"}), Interval(Real, 0, 1, closed="both"), None], "priors": ["array-like", None], "reg_param": [Interval(Real, 0, 1, closed="both")], "store_covariance": ["boolean"], "tol": [Interval(Real, 0, None, closed="left")], "covariance_estimator": [HasMethods("fit"), None], } def __init__( self, *, solver="svd", shrinkage=None, priors=None, reg_param=0.0, store_covariance=False, tol=1.0e-4, covariance_estimator=None, ): self.solver = solver self.shrinkage = shrinkage self.priors = priors self.reg_param = reg_param self.store_covariance = store_covariance self.tol = tol self.covariance_estimator = covariance_estimator def _solve_eigen(self, X): """Eigenvalue solver. The eigenvalue solver uses the eigen decomposition of the data to compute the rotation and scaling matrices used for scoring new samples. This solver supports use of any covariance estimator. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. """ n_samples, n_features = X.shape cov = _cov(X, self.shrinkage, self.covariance_estimator) scaling, rotation = linalg.eigh(cov) # scalings are eigenvalues rotation = rotation[:, np.argsort(scaling)[::-1]] # sort eigenvectors scaling = scaling[np.argsort(scaling)[::-1]] # sort eigenvalues return scaling, rotation, cov def _solve_svd(self, X): """SVD solver for Quadratic Discriminant Analysis. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. """ n_samples, n_features = X.shape mean = X.mean(0) Xc = X - mean # Xc = U * S * V.T _, S, Vt = np.linalg.svd(Xc, full_matrices=False) scaling = (S**2) / (n_samples - 1) # scalings are squared singular values scaling = ((1 - self.reg_param) * scaling) + self.reg_param rotation = Vt.T cov = None if self.store_covariance: # cov = V * (S^2 / (n-1)) * V.T cov = scaling * Vt.T @ Vt return scaling, rotation, cov @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y): """Fit the model according to the given training data and parameters. .. versionchanged:: 0.19 ``store_covariances`` has been moved to main constructor as ``store_covariance``. .. versionchanged:: 0.19 ``tol`` has been moved to main constructor. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values (integers). Returns ------- self : object Fitted estimator. """ X, y = validate_data(self, X, y) check_classification_targets(y) self.classes_ = np.unique(y) n_samples, n_features = X.shape n_classes = len(self.classes_) if n_classes < 2: raise ValueError( "The number of classes has to be greater than one. Got " f"{n_classes} class." ) if self.priors is None: _, cnts = np.unique(y, return_counts=True) self.priors_ = cnts / float(n_samples) else: self.priors_ = np.array(self.priors) if self.solver == "svd": if self.shrinkage is not None: # Support for `shrinkage` could be implemented as in # https://github.com/scikit-learn/scikit-learn/issues/32590 raise NotImplementedError("shrinkage not supported with 'svd' solver.") if self.covariance_estimator is not None: raise ValueError( "covariance_estimator is not supported with solver='svd'. " "Try solver='eigen' instead." ) specific_solver = self._solve_svd elif self.solver == "eigen": specific_solver = self._solve_eigen means = [] cov = [] scalings = [] rotations = [] for class_idx, class_label in enumerate(self.classes_): X_class = X[y == class_label, :] if len(X_class) == 1: raise ValueError( "y has only 1 sample in class %s, covariance is ill defined." % str(self.classes_[class_idx]) ) mean_class = X_class.mean(0) means.append(mean_class) scaling_class, rotation_class, cov_class = specific_solver(X_class) rank = np.sum(scaling_class > self.tol) if rank < n_features: n_samples_class = X_class.shape[0] if self.solver == "svd" and n_samples_class <= n_features: raise linalg.LinAlgError( f"The covariance matrix of class {class_label} is not full " f"rank. When using `solver='svd'` the number of samples in " f"each class should be more than the number of features, but " f"class {class_label} has {n_samples_class} samples and " f"{n_features} features. Try using `solver='eigen'` and " f"setting the parameter `shrinkage` for regularization." ) else: msg_param = "shrinkage" if self.solver == "eigen" else "reg_param" raise linalg.LinAlgError( f"The covariance matrix of class {class_label} is not full " f"rank. Increase the value of `{msg_param}` to reduce the " f"collinearity.", ) cov.append(cov_class) scalings.append(scaling_class) rotations.append(rotation_class) if self.store_covariance: self.covariance_ = cov self.means_ = np.asarray(means) self.scalings_ = scalings self.rotations_ = rotations return self def _decision_function(self, X): # return log posterior, see eq (4.12) p. 110 of the ESL. check_is_fitted(self) X = validate_data(self, X, reset=False) norm2 = [] for i in range(len(self.classes_)): R = self.rotations_[i] S = self.scalings_[i] Xm = X - self.means_[i] X2 = np.dot(Xm, R * (S ** (-0.5))) norm2.append(np.sum(X2**2, axis=1)) norm2 = np.array(norm2).T # shape = [len(X), n_classes] u = np.asarray([np.sum(np.log(s)) for s in self.scalings_]) return -0.5 * (norm2 + u) + np.log(self.priors_) def decision_function(self, X): """Apply decision function to an array of samples. The decision function is equal (up to a constant factor) to the log-posterior of the model, i.e. `log p(y = k | x)`. In a binary classification setting this instead corresponds to the difference `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`. Parameters ---------- X : array-like of shape (n_samples, n_features) Array of samples (test vectors). Returns ------- C : ndarray of shape (n_samples,) or (n_samples, n_classes) Decision function values related to each class, per sample. In the two-class case, the shape is `(n_samples,)`, giving the log likelihood ratio of the positive class. """ # Only overrides for the docstring. return super().decision_function(X)
QuadraticDiscriminantAnalysis
python
doocs__leetcode
solution/2000-2099/2048.Next Greater Numerically Balanced Number/Solution.py
{ "start": 0, "end": 318 }
class ____: def nextBeautifulNumber(self, n: int) -> int: for x in count(n + 1): y = x cnt = [0] * 10 while y: y, v = divmod(y, 10) cnt[v] += 1 if all(v == 0 or i == v for i, v in enumerate(cnt)): return x
Solution
python
ansible__ansible
test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py
{ "start": 3121, "end": 5971 }
class ____(): """Context manager to handle capturing stderr and stdout""" def __enter__(self): self.sys_stdout = sys.stdout self.sys_stderr = sys.stderr sys.stdout = self.stdout = AnsibleTextIOWrapper(BytesIO(), encoding=self.sys_stdout.encoding) sys.stderr = self.stderr = AnsibleTextIOWrapper(BytesIO(), encoding=self.sys_stderr.encoding) return self def __exit__(self, exc_type, exc_value, traceback): sys.stdout = self.sys_stdout sys.stderr = self.sys_stderr def get(self): """Return ``(stdout, stderr)``""" return self.stdout.buffer.getvalue(), self.stderr.buffer.getvalue() def get_module_name_from_filename(filename, collection): # Calculate the module's name so that relative imports work correctly if collection: # collection is a relative path, example: ansible_collections/my_namespace/my_collection # filename is a relative path, example: plugins/modules/my_module.py path = os.path.join(collection, filename) else: # filename is a relative path, example: lib/ansible/modules/system/ping.py path = os.path.relpath(filename, 'lib') name = os.path.splitext(path)[0].replace(os.path.sep, '.') return name def parse_yaml(value, lineno, module, name, load_all=False, ansible_loader=False): traces = [] errors = [] data = None if load_all: yaml_load = yaml.load_all else: yaml_load = yaml.load if ansible_loader: loader = AnsibleLoader else: loader = SafeLoader try: data = yaml_load(value, Loader=loader) if load_all: data = list(data) except yaml.MarkedYAMLError as e: errors.append({ 'msg': '%s is not valid YAML' % name, 'line': e.problem_mark.line + lineno, 'column': e.problem_mark.column + 1 }) traces.append(e) except yaml.reader.ReaderError as e: traces.append(e) # TODO: Better line/column detection errors.append({ 'msg': ('%s is not valid YAML. Character ' '0x%x at position %d.' % (name, e.character, e.position)), 'line': lineno }) except yaml.YAMLError as e: traces.append(e) errors.append({ 'msg': '%s is not valid YAML: %s: %s' % (name, type(e), e), 'line': lineno }) return data, errors, traces def is_empty(value): """Evaluate null like values excluding False""" if value is False: return False return not bool(value) def compare_unordered_lists(a, b): """Safe list comparisons Supports: - unordered lists - unhashable elements """ return len(a) == len(b) and all(x in b for x in a) and all(x in a for x in b)
CaptureStd
python
scipy__scipy
scipy/interpolate/_polyint.py
{ "start": 8058, "end": 18320 }
class ____(_Interpolator1DWithDerivatives): """Krogh interpolator (C∞ smooth). The polynomial passes through all the pairs ``(xi, yi)``. One may additionally specify a number of derivatives at each point `xi`; this is done by repeating the value `xi` and specifying the derivatives as successive `yi` values. Allows evaluation of the polynomial and all its derivatives. For reasons of numerical stability, this function does not compute the coefficients of the polynomial, although they can be obtained by evaluating all the derivatives. Parameters ---------- xi : array_like, shape (npoints, ) Known x-coordinates. Must be sorted in increasing order. yi : array_like, shape (..., npoints, ...) Known y-coordinates. When an xi occurs two or more times in a row, the corresponding yi's represent derivative values. The length of `yi` along the interpolation axis must be equal to the length of `xi`. Use the `axis` parameter to select the correct axis. axis : int, optional Axis in the `yi` array corresponding to the x-coordinate values. Defaults to ``axis=0``. Notes ----- Be aware that the algorithms implemented here are not necessarily the most numerically stable known. Moreover, even in a world of exact computation, unless the x coordinates are chosen very carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice - polynomial interpolation itself is a very ill-conditioned process due to the Runge phenomenon. In general, even with well-chosen x values, degrees higher than about thirty cause problems with numerical instability in this code. Based on [1]_. References ---------- .. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation and Numerical Differentiation", 1970. Examples -------- To produce a polynomial that is zero at 0 and 1 and has derivative 2 at 0, call >>> from scipy.interpolate import KroghInterpolator >>> KroghInterpolator([0,0,1],[0,2,0]) This constructs the quadratic :math:`2x^2-2x`. The derivative condition is indicated by the repeated zero in the `xi` array; the corresponding yi values are 0, the function value, and 2, the derivative value. For another example, given `xi`, `yi`, and a derivative `ypi` for each point, appropriate arrays can be constructed as: >>> import numpy as np >>> rng = np.random.default_rng() >>> xi = np.linspace(0, 1, 5) >>> yi, ypi = rng.random((2, 5)) >>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi))) >>> KroghInterpolator(xi_k, yi_k) To produce a vector-valued polynomial, supply a higher-dimensional array for `yi`: >>> KroghInterpolator([0,1],[[2,3],[4,5]]) This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1. """ def __init__(self, xi, yi, axis=0): super().__init__(xi, yi, axis) self.xi = np.asarray(xi) self.yi = self._reshape_yi(yi) self.n, self.r = self.yi.shape if (deg := self.xi.size) > 30: warnings.warn(f"{deg} degrees provided, degrees higher than about" " thirty cause problems with numerical instability " "with 'KroghInterpolator'", stacklevel=2) c = np.zeros((self.n+1, self.r), dtype=self.dtype) c[0] = self.yi[0] Vk = np.zeros((self.n, self.r), dtype=self.dtype) for k in range(1, self.n): s = 0 while s <= k and xi[k-s] == xi[k]: s += 1 s -= 1 Vk[0] = self.yi[k]/float_factorial(s) for i in range(k-s): if xi[i] == xi[k]: raise ValueError("Elements of `xi` can't be equal.") if s == 0: Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k]) else: Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k]) c[k] = Vk[k-s] self.c = c def _evaluate(self, x): pi = 1 p = np.zeros((len(x), self.r), dtype=self.dtype) p += self.c[0,np.newaxis,:] for k in range(1, self.n): w = x - self.xi[k-1] pi = w*pi p += pi[:,np.newaxis] * self.c[k] return p def _evaluate_derivatives(self, x, der=None): n = self.n r = self.r if der is None: der = self.n pi = np.zeros((n, len(x))) w = np.zeros((n, len(x))) pi[0] = 1 p = np.zeros((len(x), self.r), dtype=self.dtype) p += self.c[0, np.newaxis, :] for k in range(1, n): w[k-1] = x - self.xi[k-1] pi[k] = w[k-1] * pi[k-1] p += pi[k, :, np.newaxis] * self.c[k] cn = np.zeros((max(der, n+1), len(x), r), dtype=self.dtype) cn[:n+1, :, :] += self.c[:n+1, np.newaxis, :] cn[0] = p for k in range(1, n): for i in range(1, n-k+1): pi[i] = w[k+i-1]*pi[i-1] + pi[i] cn[k] = cn[k] + pi[i, :, np.newaxis]*cn[k+i] cn[k] *= float_factorial(k) cn[n, :, :] = 0 return cn[:der] def krogh_interpolate(xi, yi, x, der=0, axis=0): """Convenience function for Krogh interpolation. See `KroghInterpolator` for more details. Parameters ---------- xi : array_like Interpolation points (known x-coordinates). yi : array_like Known y-coordinates, of shape ``(xi.size, R)``. Interpreted as vectors of length R, or scalars if R=1. x : array_like Point or points at which to evaluate the derivatives. der : int or list or None, optional How many derivatives to evaluate, or None for all potentially nonzero derivatives (that is, a number equal to the number of points), or a list of derivatives to evaluate. This number includes the function value as the '0th' derivative. axis : int, optional Axis in the `yi` array corresponding to the x-coordinate values. Returns ------- d : ndarray If the interpolator's values are R-D then the returned array will be the number of derivatives by N by R. If `x` is a scalar, the middle dimension will be dropped; if the `yi` are scalars then the last dimension will be dropped. See Also -------- KroghInterpolator : Krogh interpolator Notes ----- Construction of the interpolating polynomial is a relatively expensive process. If you want to evaluate it repeatedly consider using the class KroghInterpolator (which is what this function uses). Examples -------- We can interpolate 2D observed data using Krogh interpolation: >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.interpolate import krogh_interpolate >>> x_observed = np.linspace(0.0, 10.0, 11) >>> y_observed = np.sin(x_observed) >>> x = np.linspace(min(x_observed), max(x_observed), num=100) >>> y = krogh_interpolate(x_observed, y_observed, x) >>> plt.plot(x_observed, y_observed, "o", label="observation") >>> plt.plot(x, y, label="krogh interpolation") >>> plt.legend() >>> plt.show() """ P = KroghInterpolator(xi, yi, axis=axis) if der == 0: return P(x) elif _isscalar(der): return P.derivative(x, der=der) else: return P.derivatives(x, der=np.amax(der)+1)[der] def approximate_taylor_polynomial(f,x,degree,scale,order=None): """ Estimate the Taylor polynomial of f at x by polynomial fitting. Parameters ---------- f : callable The function whose Taylor polynomial is sought. Should accept a vector of `x` values. x : scalar The point at which the polynomial is to be evaluated. degree : int The degree of the Taylor polynomial scale : scalar The width of the interval to use to evaluate the Taylor polynomial. Function values spread over a range this wide are used to fit the polynomial. Must be chosen carefully. order : int or None, optional The order of the polynomial to be used in the fitting; `f` will be evaluated ``order+1`` times. If None, use `degree`. Returns ------- p : poly1d instance The Taylor polynomial (translated to the origin, so that for example p(0)=f(x)). Notes ----- The appropriate choice of "scale" is a trade-off; too large and the function differs from its Taylor polynomial too much to get a good answer, too small and round-off errors overwhelm the higher-order terms. The algorithm used becomes numerically unstable around order 30 even under ideal circumstances. Choosing order somewhat larger than degree may improve the higher-order terms. Examples -------- We can calculate Taylor approximation polynomials of sin function with various degrees: >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.interpolate import approximate_taylor_polynomial >>> x = np.linspace(-10.0, 10.0, num=100) >>> plt.plot(x, np.sin(x), label="sin curve") >>> for degree in np.arange(1, 15, step=2): ... sin_taylor = approximate_taylor_polynomial(np.sin, 0, degree, 1, ... order=degree + 2) ... plt.plot(x, sin_taylor(x), label=f"degree={degree}") >>> plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', ... borderaxespad=0.0, shadow=True) >>> plt.tight_layout() >>> plt.axis([-10, 10, -10, 10]) >>> plt.show() """ if order is None: order = degree n = order+1 # Choose n points that cluster near the endpoints of the interval in # a way that avoids the Runge phenomenon. Ensure, by including the # endpoint or not as appropriate, that one point always falls at x # exactly. xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n % 1)) + x P = KroghInterpolator(xs, f(xs)) d = P.derivatives(x,der=degree+1) return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
KroghInterpolator
python
tqdm__tqdm
tqdm/_monitor.py
{ "start": 346, "end": 3699 }
class ____(Thread): """ Monitoring thread for tqdm bars. Monitors if tqdm bars are taking too much time to display and readjusts miniters automatically if necessary. Parameters ---------- tqdm_cls : class tqdm class to use (can be core tqdm or a submodule). sleep_interval : float Time to sleep between monitoring checks. """ _test = {} # internal vars for unit testing def __init__(self, tqdm_cls, sleep_interval): Thread.__init__(self) self.daemon = True # kill thread when main killed (KeyboardInterrupt) self.woken = 0 # last time woken up, to sync with monitor self.tqdm_cls = tqdm_cls self.sleep_interval = sleep_interval self._time = self._test.get("time", time) self.was_killed = self._test.get("Event", Event)() atexit.register(self.exit) self.start() def exit(self): self.was_killed.set() if self is not current_thread(): self.join() return self.report() def get_instances(self): # returns a copy of started `tqdm_cls` instances return [i for i in self.tqdm_cls._instances.copy() # Avoid race by checking that the instance started if hasattr(i, 'start_t')] def run(self): cur_t = self._time() while True: # After processing and before sleeping, notify that we woke # Need to be done just before sleeping self.woken = cur_t # Sleep some time... self.was_killed.wait(self.sleep_interval) # Quit if killed if self.was_killed.is_set(): return # Then monitor! # Acquire lock (to access _instances) with self.tqdm_cls.get_lock(): cur_t = self._time() # Check tqdm instances are waiting too long to print instances = self.get_instances() for instance in instances: # Check event in loop to reduce blocking time on exit if self.was_killed.is_set(): return # Only if mininterval > 1 (else iterations are just slow) # and last refresh exceeded maxinterval if ( instance.miniters > 1 and (cur_t - instance.last_print_t) >= instance.maxinterval ): # force bypassing miniters on next iteration # (dynamic_miniters adjusts mininterval automatically) instance.miniters = 1 # Refresh now! (works only for manual tqdm) instance.refresh(nolock=True) # Remove accidental long-lived strong reference del instance if instances != self.get_instances(): # pragma: nocover warn("Set changed size during iteration" + " (see https://github.com/tqdm/tqdm/issues/481)", TqdmSynchronisationWarning, stacklevel=2) # Remove accidental long-lived strong references del instances def report(self): return not self.was_killed.is_set()
TMonitor
python
doocs__leetcode
lcof2/剑指 Offer II 078. 合并排序链表/Solution.py
{ "start": 151, "end": 813 }
class ____: def mergeKLists(self, lists: List[ListNode]) -> ListNode: n = len(lists) if n == 0: return None for i in range(n - 1): lists[i + 1] = self.mergeTwoLists(lists[i], lists[i + 1]) return lists[-1] def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode: dummy = ListNode() cur = dummy while l1 and l2: if l1.val <= l2.val: cur.next = l1 l1 = l1.next else: cur.next = l2 l2 = l2.next cur = cur.next cur.next = l1 or l2 return dummy.next
Solution
python
virgili0__Virgilio
Tools/regex-bin/regexPrinter.py
{ "start": 2420, "end": 2909 }
class ____(TreeNode): def __init__(self, token, value, children, quantifier, next_node): TreeNode.__init__(self, token, value, next_node) self.children = children self.quantifier = quantifier def print(self): temp = [c for child in self.children for c in child.print()] printer = self.quantifier.get_printer(temp) for child_print in printer(): for sub in self.next_node.print(): yield child_print+sub
OrNode
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/abstractClass11.py
{ "start": 158, "end": 196 }
class ____(ABCMeta): pass
CustomMeta
python
walkccc__LeetCode
solutions/977. Squares of a Sorted Array/977.py
{ "start": 0, "end": 326 }
class ____: def sortedSquares(self, nums: list[int]) -> list[int]: n = len(nums) l = 0 r = n - 1 ans = [0] * n while n: n -= 1 if abs(nums[l]) > abs(nums[r]): ans[n] = nums[l] * nums[l] l += 1 else: ans[n] = nums[r] * nums[r] r -= 1 return ans
Solution
python
huggingface__transformers
src/transformers/integrations/bitnet.py
{ "start": 7858, "end": 8691 }
class ____(torch.autograd.Function): """ Implements a custom autograd function for activation quantization. This performs symmetric 8-bit quantization (to the range [-128, 127]) based on the maximum absolute value along the last dimension (per-token/row scaling). It uses the Straight-Through Estimator (STE) for the backward pass. """ @staticmethod @torch.compile def forward(ctx, activation): dtype = activation.dtype activation = activation.float() scale = 127 / activation.abs().max(dim=-1, keepdim=True).values.clamp_(min=1e-5) activation = (activation * scale).round().clamp(-128, 127) / scale return activation.to(dtype) @staticmethod def backward(ctx, grad_output): grad_input = grad_output.clone() return grad_input
ActQuant
python
eventlet__eventlet
eventlet/queue.py
{ "start": 15218, "end": 17622 }
class ____(LightQueue): '''Create a queue object with a given maximum size. If *maxsize* is less than zero or ``None``, the queue size is infinite. ``Queue(0)`` is a channel, that is, its :meth:`put` method always blocks until the item is delivered. (This is unlike the standard :class:`Stdlib_Queue`, where 0 means infinite size). In all other respects, this Queue class resembles the standard library, :class:`Stdlib_Queue`. ''' def __init__(self, maxsize=None): LightQueue.__init__(self, maxsize) self.unfinished_tasks = 0 self._cond = Event() def _format(self): result = LightQueue._format(self) if self.unfinished_tasks: result += ' tasks=%s _cond=%s' % (self.unfinished_tasks, self._cond) return result def _put(self, item): LightQueue._put(self, item) self._put_bookkeeping() def _put_bookkeeping(self): self.unfinished_tasks += 1 if self._cond.ready(): self._cond.reset() def task_done(self): '''Indicate that a formerly enqueued task is complete. Used by queue consumer threads. For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue that the processing on the task is complete. If a :meth:`join` is currently blocking, it will resume when all items have been processed (meaning that a :meth:`task_done` call was received for every item that had been :meth:`put <Queue.put>` into the queue). Raises a :exc:`ValueError` if called more times than there were items placed in the queue. ''' if self.unfinished_tasks <= 0: raise ValueError('task_done() called too many times') self.unfinished_tasks -= 1 if self.unfinished_tasks == 0: self._cond.send(None) def join(self): '''Block until all items in the queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls :meth:`task_done` to indicate that the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, :meth:`join` unblocks. ''' if self.unfinished_tasks > 0: self._cond.wait()
Queue
python
google__jax
docs/autodidax.py
{ "start": 32891, "end": 35691 }
class ____(NamedTuple): in_types: list[ShapedArray] out_types: list[ShapedArray] def __repr__(self): in_types = ', '.join(aval.str_short() for aval in self.in_types) out_types = ', '.join(aval.str_short() for aval in self.out_types) return f'({in_types}) -> ({out_types})' def typecheck_jaxpr(jaxpr: Jaxpr) -> JaxprType: env: set[Var] = set() for v in jaxpr.in_binders: if v in env: raise TypeError env.add(v) for eqn in jaxpr.eqns: in_types = [typecheck_atom(env, x) for x in eqn.inputs] out_types = abstract_eval_rules[eqn.primitive](*in_types, **eqn.params) for out_binder, out_type in zip(eqn.out_binders, out_types): if not out_type == out_binder.aval: raise TypeError for out_binder in eqn.out_binders: if out_binder in env: raise TypeError env.add(out_binder) in_types = [v.aval for v in jaxpr.in_binders] out_types = [typecheck_atom(env, x) for x in jaxpr.outs] return JaxprType(in_types, out_types) def typecheck_atom(env: set[Var], x: Atom) -> ShapedArray: if isinstance(x, Var): if x not in env: raise TypeError("unbound variable") return x.aval elif isinstance(x, Lit): return raise_to_shaped(get_aval(x.val)) else: assert False # - # We can apply the function represented by a jaxpr to arguments with a simple # interpreter. # + def eval_jaxpr(jaxpr: Jaxpr, args: list[Any]) -> list[Any]: env: dict[Var, Any] = {} def read(x: Atom) -> Any: return env[x] if type(x) is Var else x.val def write(v: Var, val: Any) -> None: assert v not in env # single-assignment env[v] = val map(write, jaxpr.in_binders, args) for eqn in jaxpr.eqns: in_vals = map(read, eqn.inputs) outs = bind(eqn.primitive, *in_vals, **eqn.params) map(write, eqn.out_binders, outs) return map(read, jaxpr.outs) def jaxpr_as_fun(jaxpr: Jaxpr): return lambda *args: eval_jaxpr(jaxpr, args) # - # By using `bind` in the interpreter, this interpreter itself is traceable. # ### Building jaxprs with tracing # # Now that we have jaxprs as a data structure, we need ways to produce these # from tracing Python code. In general there are two variants of how we trace to # a jaxpr; `jit` uses one and `vjp` uses the other. We'll start with the one # used by `jit`, which is also used by control flow primitives like `lax.cond`, # `lax.while_loop`, and `lax.scan`. # + def split_list(lst: list[Any], n: int) -> tuple[list[Any], list[Any]]: assert 0 <= n <= len(lst) return lst[:n], lst[n:] def partition_list(bs: list[bool], l: list[Any]) -> tuple[list[Any], list[Any]]: assert len(bs) == len(l) lists = lst1, lst2 = [], [] for b, x in zip(bs, l): lists[b].append(x) return lst1, lst2 # + # NB: the analogous class in JAX is called 'DynamicJaxprTracer'
JaxprType
python
google__jax
tests/random_test.py
{ "start": 53175, "end": 62483 }
class ____(jtu.JaxTestCase): def assertKeysEqual(self, key1, key2): self.assertEqual(key1.dtype, key2.dtype) self.assertArraysEqual(random.key_data(key1), random.key_data(key2)) def check_shape(self, func, *args): like = lambda keys: jnp.ones(keys.shape) out_key = func(*args) self.assertIsInstance(out_key, prng_internal.PRNGKeyArray) out_like_key = func(*jax.tree.map(like, args)) self.assertIsInstance(out_like_key, jax.Array) self.assertEqual(out_key.shape, out_like_key.shape) def check_against_reference(self, key_func, arr_func, *key_args): out_arr = arr_func(*jax.tree.map(lambda x: random.key_data(x), key_args)) self.assertIsInstance(out_arr, jax.Array) out_key = key_func(*key_args) self.assertIsInstance(out_key, prng_internal.PRNGKeyArray) self.assertArraysEqual(random.key_data(out_key), out_arr) out_key = jax.jit(key_func)(*key_args) self.assertIsInstance(out_key, prng_internal.PRNGKeyArray) self.assertArraysEqual(random.key_data(out_key), out_arr) @parameterized.parameters([ [(2, 3), 'shape', (2, 3)], [(2, 3), 'size', 6], [(2, 3), 'ndim', 2] ]) def test_properties(self, shape, prop, expected): get_prop = lambda x: getattr(x, prop) key = random.split(random.key(0), math.prod(shape)).reshape(shape) self.assertEqual(get_prop(key), expected) self.assertEqual(jax.jit(get_prop)(key), expected) def test_reshape(self): key = random.key(123) keys = random.split(key, 4) shape = (2, 2) key_func = partial(jnp.reshape, shape=shape) arr_func = partial(jnp.reshape, shape=(*shape, *key._impl.key_shape)) self.check_shape(key_func, keys) self.check_against_reference(key_func, arr_func, keys) def test_tile(self): key = random.key(123) reps = 3 key_func = partial(jnp.tile, reps=reps) arr_func = lambda x: jnp.tile(x[None], reps=(reps, *(1 for _ in key._impl.key_shape))) self.check_shape(key_func, key) self.check_against_reference(key_func, arr_func, key) def test_concatenate(self): args = lambda: [random.split(k, 2) for k in random.split(random.key(123), 3)] key_func = arr_func = partial(jnp.concatenate, axis=0) self.check_shape(key_func, args()) self.check_against_reference(key_func, arr_func, args()) def test_broadcast_to(self): key = random.key(123) shape = (3,) key_func = partial(jnp.broadcast_to, shape=shape) arr_func = partial(jnp.broadcast_to, shape=(*shape, *key._impl.key_shape)) self.check_shape(key_func, key) self.check_against_reference(key_func, arr_func, key) def test_expand_dims(self): key = random.key(123) keys = random.split(key, 6).reshape(2, 3) key_func = arr_func = partial(jnp.expand_dims, axis=1) self.check_shape(key_func, keys) self.check_against_reference(key_func, arr_func, keys) def test_broadcast_arrays(self): key = random.key(123) keys = random.split(key, 3) key_func = arr_func = lambda *args: jnp.broadcast_arrays(*args)[0] self.check_shape(key_func, key, keys) self.check_against_reference(key_func, arr_func, key, keys) def test_append(self): key = lambda: random.key(123) keys = lambda: random.split(random.key(123), 4) key_func = jnp.append arr_func = lambda keys, key: jnp.append(keys, key[None], axis=0) self.check_shape(key_func, keys(), key()) self.check_shape(arr_func, keys(), key()) with jax.debug_key_reuse(False): self.check_against_reference(key_func, arr_func, keys(), key()) def test_ravel(self): key = random.key(123) keys = random.split(key, 4).reshape(2, 2) key_func = jnp.ravel arr_func = partial(jnp.reshape, shape=(4, *key._impl.key_shape)) self.check_shape(key_func, keys) self.check_against_reference(key_func, arr_func, keys) def test_stack(self): key = random.key(123) keys = random.split(key, 2) key_func = arr_func = partial(jnp.stack, axis=0) self.check_shape(key_func, keys) self.check_against_reference(key_func, arr_func, keys) def test_array(self): key = random.key(123) self.assertKeysEqual(key, jnp.array(key)) self.assertKeysEqual(key, jnp.asarray(key)) self.assertKeysEqual(key, jax.jit(jnp.array)(key)) self.assertKeysEqual(key, jax.jit(jnp.asarray)(key)) def test_array_user_dtype(self): key = random.key(123) self.assertKeysEqual(key, jnp.array(key, dtype=key.dtype)) self.assertKeysEqual(key, jnp.asarray(key, dtype=key.dtype)) @parameterized.parameters([ (0,), (slice(1),), (np.array([0, 2]),), (np.array([False, True, True]),) ]) def test_getitem(self, idx): keys = lambda: random.split(random.key(123), 3) key_func = arr_func = lambda x: x[idx] self.check_shape(key_func, keys()) with jax.debug_key_reuse(False): self.check_against_reference(key_func, arr_func, keys()) @parameterized.parameters([ (0,), (slice(1),), (np.array([0, 2]),), (np.array([False, True, True]),) ]) def test_gather(self, idx): keys = lambda: random.split(random.key(123), 3) key_func = arr_func = lambda key: key.at[idx].get() self.check_shape(key_func, keys()) with jax.debug_key_reuse(False): self.check_against_reference(key_func, arr_func, keys()) @jax.debug_key_reuse(False) def test_equality(self): key = random.key(123) key2 = random.key(456) self.assertTrue(key == key) self.assertFalse(key == key2) self.assertTrue(key != key2) self.assertFalse(key != key) size = 5 idx = slice(2, 4) key_arr = random.split(key, size).at[idx].set(key) expected = jnp.zeros(size, dtype=bool).at[idx].set(True) self.assertArraysEqual(key == key_arr, expected) self.assertArraysEqual(key != key_arr, ~expected) @parameterized.parameters([ (0,), (slice(1),), (np.array([0, 2]),), (np.array([False, True, True]),) ]) def test_scatter(self, idx): key = lambda: random.key(123) keys = lambda: random.split(key(), 3) key_func = arr_func = lambda k1, k2: k1.at[idx].set(k2) self.check_shape(key_func, keys(), key()) self.check_against_reference(key_func, arr_func, keys(), key()) def test_errors(self): key = random.key(123) with self.assertRaisesRegex(TypeError, "add does not accept dtypes key<fry>, int.*"): jnp.add(key, 1) with self.assertRaisesRegex(TypeError, "add does not accept dtypes key<fry>, int.*"): key + 1 with self.assertRaisesRegex(TypeError, "add does not accept dtype key<fry>"): jnp.add(key, key) with self.assertRaisesRegex(TypeError, "add does not accept dtype key<fry>"): key + key with self.assertRaisesRegex(TypeError, "neg does not accept dtype key<fry>"): jnp.negative(key) with self.assertRaisesRegex(TypeError, "neg does not accept dtype key<fry>"): -key with self.assertRaisesRegex(ValueError, "Cannot convert_element_type from key<fry> to int(32|64)"): lax.convert_element_type(key, int) with self.assertRaisesRegex(ValueError, "Cannot convert_element_type from int32 to key<fry>"): lax.convert_element_type(np.int32(0), key.dtype) def test_eval_shape(self): key = random.key(1701) shapedtype = jax.ShapeDtypeStruct(key.shape, key.dtype) out = jax.eval_shape(lambda x: x, shapedtype) self.assertEqual(out, shapedtype) def test_result_type(self): key = random.key(123456) self.assertEqual(jnp.result_type(key), key.dtype) @parameterized.parameters([ (jnp.empty_like, ()), (jnp.zeros_like, ()), (jnp.ones_like, ()), (jnp.full_like, (100,)), ]) def test_full_like(self, func, args): keys = random.split(random.key(789543)) key_func = arr_func = lambda x: func(x, *args) self.check_shape(key_func, keys) self.check_against_reference(key_func, arr_func, keys) def test_full_like_with_key_fillvalue(self): keys = random.split(random.key(789543)) fill_value = random.key(42) self.check_shape(jnp.full_like, keys, fill_value) self.check_against_reference(jnp.full_like, jnp.full_like, keys, fill_value) @parameterized.parameters([ (jnp.empty, {}), (jnp.zeros, {}), (jnp.ones, {}), (jnp.full, {'fill_value': 100}), ]) def test_full(self, func, kwds): keys = random.split(random.key(789543)) key_func = arr_func = lambda x: func(x.shape, dtype=x.dtype, **kwds) self.check_shape(key_func, keys) self.check_against_reference(key_func, arr_func, keys) def test_full_with_key_fillvalue(self): keys = random.split(random.key(789543)) fill_value = random.key(42) func = lambda x, val: jnp.full(x.shape, val, dtype=x.dtype) self.check_shape(func, keys, fill_value) self.check_against_reference(func, func, keys, fill_value) def test_int_shape(self): # It's not clear if we want to accept ints as the shape argument; the point # of this test is not to check the API functionality but rather to ensure # this doesn't fail in core.py like it used to. @jax.jit def f(): jax.random.normal(jax.random.key(0), 1000) f() # don't crash if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
JnpWithKeyArrayTest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 958869, "end": 961497 }
class ____(sgqlc.types.relay.Connection): """A list of results that matched against a search query. Regardless of the number of matches, a maximum of 1,000 results will be available across all types, potentially split across many pages. """ __schema__ = github_schema __field_names__ = ( "code_count", "discussion_count", "edges", "issue_count", "nodes", "page_info", "repository_count", "user_count", "wiki_count", ) code_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="codeCount") """The total number of pieces of code that matched the search query. Regardless of the total number of matches, a maximum of 1,000 results will be available across all types. """ discussion_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="discussionCount") """The total number of discussions that matched the search query. Regardless of the total number of matches, a maximum of 1,000 results will be available across all types. """ edges = sgqlc.types.Field(sgqlc.types.list_of("SearchResultItemEdge"), graphql_name="edges") """A list of edges.""" issue_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="issueCount") """The total number of issues that matched the search query. Regardless of the total number of matches, a maximum of 1,000 results will be available across all types. """ nodes = sgqlc.types.Field(sgqlc.types.list_of("SearchResultItem"), graphql_name="nodes") """A list of nodes.""" page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo") """Information to aid in pagination.""" repository_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="repositoryCount") """The total number of repositories that matched the search query. Regardless of the total number of matches, a maximum of 1,000 results will be available across all types. """ user_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="userCount") """The total number of users that matched the search query. Regardless of the total number of matches, a maximum of 1,000 results will be available across all types. """ wiki_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="wikiCount") """The total number of wiki pages that matched the search query. Regardless of the total number of matches, a maximum of 1,000 results will be available across all types. """
SearchResultItemConnection
python
walkccc__LeetCode
solutions/2157. Groups of Strings/2157.py
{ "start": 0, "end": 530 }
class ____: def __init__(self, n: int): self.count = n self.id = list(range(n)) self.sz = [1] * n def unionBySize(self, u: int, v: int) -> None: i = self._find(u) j = self._find(v) if i == j: return if self.sz[i] < self.sz[j]: self.sz[j] += self.sz[i] self.id[i] = j else: self.sz[i] += self.sz[j] self.id[j] = i self.count -= 1 def _find(self, u: int) -> int: if self.id[u] != u: self.id[u] = self._find(self.id[u]) return self.id[u]
UnionFind
python
pytest-dev__pytest
testing/test_runner.py
{ "start": 34624, "end": 40349 }
class ____: """Test user-level API of ``TestReport`` objects.""" def getrunner(self): return lambda item: runner.runtestprotocol(item, log=False) def test_longreprtext_pass(self, pytester: Pytester) -> None: reports = pytester.runitem( """ def test_func(): pass """ ) rep = reports[1] assert rep.longreprtext == "" def test_longreprtext_skip(self, pytester: Pytester) -> None: """TestReport.longreprtext can handle non-str ``longrepr`` attributes (#7559)""" reports = pytester.runitem( """ import pytest def test_func(): pytest.skip() """ ) _, call_rep, _ = reports assert isinstance(call_rep.longrepr, tuple) assert "Skipped" in call_rep.longreprtext def test_longreprtext_collect_skip(self, pytester: Pytester) -> None: """CollectReport.longreprtext can handle non-str ``longrepr`` attributes (#7559)""" pytester.makepyfile( """ import pytest pytest.skip(allow_module_level=True) """ ) rec = pytester.inline_run() calls = rec.getcalls("pytest_collectreport") _, call, _ = calls assert isinstance(call.report.longrepr, tuple) assert "Skipped" in call.report.longreprtext def test_longreprtext_failure(self, pytester: Pytester) -> None: reports = pytester.runitem( """ def test_func(): x = 1 assert x == 4 """ ) rep = reports[1] assert "assert 1 == 4" in rep.longreprtext def test_captured_text(self, pytester: Pytester) -> None: reports = pytester.runitem( """ import pytest import sys @pytest.fixture def fix(): sys.stdout.write('setup: stdout\\n') sys.stderr.write('setup: stderr\\n') yield sys.stdout.write('teardown: stdout\\n') sys.stderr.write('teardown: stderr\\n') assert 0 def test_func(fix): sys.stdout.write('call: stdout\\n') sys.stderr.write('call: stderr\\n') assert 0 """ ) setup, call, teardown = reports assert setup.capstdout == "setup: stdout\n" assert call.capstdout == "setup: stdout\ncall: stdout\n" assert teardown.capstdout == "setup: stdout\ncall: stdout\nteardown: stdout\n" assert setup.capstderr == "setup: stderr\n" assert call.capstderr == "setup: stderr\ncall: stderr\n" assert teardown.capstderr == "setup: stderr\ncall: stderr\nteardown: stderr\n" def test_no_captured_text(self, pytester: Pytester) -> None: reports = pytester.runitem( """ def test_func(): pass """ ) rep = reports[1] assert rep.capstdout == "" assert rep.capstderr == "" def test_longrepr_type(self, pytester: Pytester) -> None: reports = pytester.runitem( """ import pytest def test_func(): pytest.fail(pytrace=False) """ ) rep = reports[1] assert isinstance(rep.longrepr, ExceptionChainRepr) def test_outcome_exception_bad_msg() -> None: """Check that OutcomeExceptions validate their input to prevent confusing errors (#5578)""" def func() -> None: raise NotImplementedError() expected = ( "OutcomeException expected string as 'msg' parameter, got 'function' instead.\n" "Perhaps you meant to use a mark?" ) with pytest.raises(TypeError) as excinfo: OutcomeException(func) # type: ignore assert str(excinfo.value) == expected def test_pytest_version_env_var(pytester: Pytester, monkeypatch: MonkeyPatch) -> None: monkeypatch.setenv("PYTEST_VERSION", "old version") pytester.makepyfile( """ import pytest import os def test(): assert os.environ.get("PYTEST_VERSION") == pytest.__version__ """ ) result = pytester.runpytest_inprocess() assert result.ret == ExitCode.OK assert os.environ["PYTEST_VERSION"] == "old version" def test_teardown_session_failed(pytester: Pytester) -> None: """Test that higher-scoped fixture teardowns run in the context of the last item after the test session bails early due to --maxfail. Regression test for #11706. """ pytester.makepyfile( """ import pytest @pytest.fixture(scope="module") def baz(): yield pytest.fail("This is a failing teardown") def test_foo(baz): pytest.fail("This is a failing test") def test_bar(): pass """ ) result = pytester.runpytest("--maxfail=1") result.assert_outcomes(failed=1, errors=1) def test_teardown_session_stopped(pytester: Pytester) -> None: """Test that higher-scoped fixture teardowns run in the context of the last item after the test session bails early due to --stepwise. Regression test for #11706. """ pytester.makepyfile( """ import pytest @pytest.fixture(scope="module") def baz(): yield pytest.fail("This is a failing teardown") def test_foo(baz): pytest.fail("This is a failing test") def test_bar(): pass """ ) result = pytester.runpytest("--stepwise") result.assert_outcomes(failed=1, errors=1)
TestReportContents
python
apache__airflow
task-sdk/src/airflow/sdk/exceptions.py
{ "start": 2607, "end": 3072 }
class ____(TypeError): """Raise when an unmappable type is pushed as a mapped downstream's dependency.""" def __init__(self, value: Any, *values: Any) -> None: super().__init__(value, *values) def __str__(self) -> str: typename = type(self.args[0]).__qualname__ for arg in self.args[1:]: typename = f"{typename}[{type(arg).__qualname__}]" return f"unmappable return type {typename!r}"
UnmappableXComTypePushed
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/filters/base.py
{ "start": 5858, "end": 6176 }
class ____(Filter): """ Never enable feature. """ def __call__(self) -> bool: return False def __and__(self, other: Filter) -> Filter: return self def __or__(self, other: Filter) -> Filter: return other def __invert__(self) -> Always: return Always()
Never
python
numpy__numpy
benchmarks/benchmarks/bench_ma.py
{ "start": 4902, "end": 6022 }
class ____(Benchmark): param_names = ['mtype', 'func', 'msize'] params = [['np', 'np.ma'], ['multiply', 'divide', 'power'], ['small', 'big']] def setup(self, mtype, func, msize): # Small arrays xs = 2.0 + np.random.uniform(-1, 1, 6).reshape(2, 3) ys = 2.0 + np.random.uniform(-1, 1, 6).reshape(2, 3) m1 = [[True, False, False], [False, False, True]] m2 = [[True, False, True], [False, False, True]] self.nmxs = np.ma.array(xs, mask=m1) self.nmys = np.ma.array(ys, mask=m2) # Big arrays xl = 2.0 + np.random.uniform(-1, 1, 100 * 100).reshape(100, 100) yl = 2.0 + np.random.uniform(-1, 1, 100 * 100).reshape(100, 100) maskx = xl > 2.8 masky = yl < 1.8 self.nmxl = np.ma.array(xl, mask=maskx) self.nmyl = np.ma.array(yl, mask=masky) def time_functions_2v(self, mtype, func, msize): fun = eval(f"{mtype}.{func}") if msize == 'small': fun(self.nmxs, self.nmys) elif msize == 'big': fun(self.nmxl, self.nmyl)
MAFunctions2v
python
doocs__leetcode
solution/0700-0799/0779.K-th Symbol in Grammar/Solution.py
{ "start": 0, "end": 244 }
class ____: def kthGrammar(self, n: int, k: int) -> int: if n == 1: return 0 if k <= (1 << (n - 2)): return self.kthGrammar(n - 1, k) return self.kthGrammar(n - 1, k - (1 << (n - 2))) ^ 1
Solution
python
numba__numba
numba/tests/npyufunc/test_vectorize_decor.py
{ "start": 2309, "end": 2442 }
class ____(unittest.TestCase, BaseVectorizeDecor): _numba_parallel_test_ = False target = 'parallel'
TestParallelVectorizeDecor
python
Lightning-AI__lightning
src/lightning/fabric/plugins/environments/lightning.py
{ "start": 802, "end": 4131 }
class ____(ClusterEnvironment): """The default environment used by Lightning for a single node or free cluster (not managed). There are two modes the Lightning environment can operate with: 1. The user only launches the main process by :code:`python train.py ...` with no additional environment variables set. Lightning will spawn new worker processes for distributed training in the current node. 2. The user launches all processes manually or with utilities like :code:`torch.distributed.launch`. The appropriate environment variables need to be set, and at minimum :code:`LOCAL_RANK`. If the main address and port are not provided, the default environment will choose them automatically. It is recommended to use this default environment for single-node distributed training as it provides a convenient way to launch the training script. """ def __init__(self) -> None: super().__init__() self._main_port: int = -1 self._global_rank: int = 0 self._world_size: int = 1 @property @override def creates_processes_externally(self) -> bool: """Returns whether the cluster creates the processes or not. If at least :code:`LOCAL_RANK` is available as environment variable, Lightning assumes the user acts as the process launcher/job scheduler and Lightning will not launch new processes. """ return "LOCAL_RANK" in os.environ @property @override def main_address(self) -> str: return os.environ.get("MASTER_ADDR", "127.0.0.1") @property @override def main_port(self) -> int: if self._main_port == -1: self._main_port = ( int(os.environ["MASTER_PORT"]) if "MASTER_PORT" in os.environ else find_free_network_port() ) return self._main_port @staticmethod @override def detect() -> bool: return True @override def world_size(self) -> int: return self._world_size @override def set_world_size(self, size: int) -> None: self._world_size = size @override def global_rank(self) -> int: return self._global_rank @override def set_global_rank(self, rank: int) -> None: self._global_rank = rank rank_zero_only.rank = rank @override def local_rank(self) -> int: return int(os.environ.get("LOCAL_RANK", 0)) @override def node_rank(self) -> int: group_rank = os.environ.get("GROUP_RANK", 0) return int(os.environ.get("NODE_RANK", group_rank)) @override def teardown(self) -> None: if "WORLD_SIZE" in os.environ: del os.environ["WORLD_SIZE"] def find_free_network_port() -> int: """Finds a free port on localhost. If the environment variable `STANDALONE_PORT` is set, its value is used as the port number. It is useful in single-node training when we don't want to connect to a real main node but have to set the `MASTER_PORT` environment variable. """ if "STANDALONE_PORT" in os.environ: _port = os.environ["STANDALONE_PORT"] return int(_port) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("", 0)) port = s.getsockname()[1] s.close() return port
LightningEnvironment
python
django__django
tests/model_regress/tests.py
{ "start": 8828, "end": 9069 }
class ____(TestCase): def test_pk_validation(self): NonAutoPK.objects.create(name="one") again = NonAutoPK(name="one") with self.assertRaises(ValidationError): again.validate_unique()
ModelValidationTest
python
pytorch__pytorch
test/distributed/_composable/fsdp/test_fully_shard_training.py
{ "start": 27677, "end": 32732 }
class ____(FSDPTest): @property def world_size(self) -> int: # Since these tests run with a larger transformer model, they may see # some numeric drift with >2 GPUs return min(torch.get_device_module(device_type).device_count(), 2) @skip_if_lt_x_gpu(2) @compiled_fsdp_test(compile_compute_on_module=Transformer) @xfailIf(TEST_XPU) # https://github.com/intel/torch-xpu-ops/issues/1661 def test_train_parity_with_activation_checkpointing(self): """ Tests train parity against DDP when composing with activation checkpointing. """ self.run_subtests( { "reshard_after_forward": [True, False], "checkpoint_impl": ["composable", "utils", "wrapper"], "module_grouping": ["block", "mem_eff", "mem_eff_weight_tied"], }, self._test_train_parity_with_activation_checkpointing, ) def _test_train_parity_with_activation_checkpointing( self, reshard_after_forward: Union[bool, int], checkpoint_impl: str, module_grouping: str, ): assert checkpoint_impl in ("composable", "utils", "wrapper") testing_compile = fully_shard != torch.distributed.fsdp.fully_shard if testing_compile and checkpoint_impl == "composable": return torch.manual_seed(42) vocab_size = 1024 with torch.device(device_type): model_args = ModelArgs( n_layers=3, n_heads=4, vocab_size=vocab_size, max_seq_len=64, dropout_p=0, checkpoint_activations=(checkpoint_impl == "utils"), # For the mem-efficient module grouping, we separate the # embeddings from the output projection, which does not support # weight tying weight_tying=module_grouping != "mem_eff", ) model = Transformer(model_args) ref_model = replicate( copy.deepcopy(model), device_ids=[self.rank], ) ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2) # Apply activation checkpointing prefixes_to_ignore = () if checkpoint_impl == "wrapper": prefixes_to_ignore = (_CHECKPOINT_PREFIX,) apply_activation_checkpointing( model, check_fn=lambda m: isinstance(m, TransformerBlock) ) elif checkpoint_impl == "composable": for module in model.modules(): if isinstance(module, TransformerBlock): checkpoint(module) # Apply FSDP fsdp_kwargs = {"reshard_after_forward": reshard_after_forward} if module_grouping == "mem_eff": assert model_args.n_layers == 3 fully_shard(model.layers[0], **fsdp_kwargs) fully_shard([model.layers[1], model.layers[2]], **fsdp_kwargs) fully_shard([model.tok_embeddings, model.pos_embeddings], **fsdp_kwargs) # Embedding weights are not needed for embedding backward model.tok_embeddings.set_unshard_in_backward(False) fully_shard([model.norm, model.output], **fsdp_kwargs) elif module_grouping == "mem_eff_weight_tied": fully_shard([model.tok_embeddings, model.output], **fsdp_kwargs) for layer in model.layers: fully_shard(layer, **fsdp_kwargs) elif module_grouping == "block": for layer in model.layers: fully_shard(layer, **fsdp_kwargs) else: raise NotImplementedError(f"Unknown module grouping: {module_grouping}") fully_shard(model, **fsdp_kwargs) optim = torch.optim.Adam(model.parameters(), lr=1e-2) torch.manual_seed(42 + self.rank) # Reuse the same input across iterations to avoid loss explosion from # trying to learn from random inputs inp = torch.randint(0, vocab_size, (3, 64), device=device_type.type) check_sharded_parity( self, ref_model, model, prefixes_to_ignore=prefixes_to_ignore ) for iter_idx in range(10): losses: list[torch.Tensor] = [] for _model in (ref_model, model): torch.manual_seed(iter_idx + 1) # for dropout determinism losses.append(_model(inp).sum()) losses[-1].backward() if not testing_compile: check_sharded_parity( self, ref_model, model, prefixes_to_ignore=prefixes_to_ignore ) self.assertEqual(losses[0], losses[1]) for _optim in (ref_optim, optim): _optim.step() _optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) if not testing_compile: check_sharded_parity( self, ref_model, model, prefixes_to_ignore=prefixes_to_ignore )
TestFullyShard1DTrainingCompose
python
dagster-io__dagster
python_modules/libraries/dagster-azure/dagster_azure/blob/fake_blob_client.py
{ "start": 1281, "end": 3032 }
class ____: """Stateful mock of an Blob container client for testing.""" def __init__(self, account_name, container_name): self._container = {} self._account_name = account_name self._container_name = container_name @property def account_name(self): return self._account_name @property def container_name(self): return self._container_name @property def url(self): return f"https://{self.account_name}.blob.core.windows.net/{self.container_name}" def keys(self): return self._container.keys() def get_container_properties(self): return {"account_name": self.account_name, "container_name": self.container_name} def has_blob(self, blob_key): return bool(self._container.get(blob_key)) def get_blob_client(self, blob_key): if blob_key not in self._container: blob = self.create_blob(blob_key) else: blob = self._container[blob_key] return blob def create_blob(self, blob_key): blob = FakeBlobClient() self._container[blob_key] = blob return blob def list_blobs(self, name_starts_with=None): for k, v in self._container.items(): if name_starts_with is None or k.startswith(name_starts_with): yield FakeBlob(name=k, url=v.contents) def delete_blob(self, prefix): # Use list to avoid mutating dict as we iterate for k in list(self._container.keys()): if k.startswith(prefix): del self._container[k] def delete_blobs(self, *keys): for key in keys: if key in self._container: del self._container[key]
FakeBlobContainerClient
python
pypa__warehouse
tests/unit/admin/views/test_projects.py
{ "start": 4965, "end": 6589 }
class ____: def test_gets_release(self, db_request): project = ProjectFactory.create() release = ReleaseFactory.create(project=project) journals = sorted( JournalEntryFactory.create_batch( 3, name=project.name, version=release.version ), key=lambda x: (x.submitted_date, x.id), reverse=True, ) assert views.release_detail(release, db_request) == { "release": release, "journals": journals, "observation_kinds": ObservationKind, "observations": [], } def test_release_render(self, db_request): project = ProjectFactory.create() release = ReleaseFactory.create(project=project) db_request.matchdict["project_name"] = str(project.normalized_name) db_request.matchdict["version"] = str(release.version) db_request.route_path = pretend.call_recorder( lambda *a, **kw: "/admin/projects/" ) db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) db_request.user = UserFactory.create() # Mock request task handler request_task_mock = mock.Mock() db_request.task = request_task_mock views.release_render(release, db_request) request_task_mock.assert_called_with(update_release_description) assert db_request.session.flash.calls == [ pretend.call( f"Task sent to re-render description for {release}", queue="success" ) ]
TestReleaseDetail
python
spack__spack
lib/spack/spack/store.py
{ "start": 13498, "end": 13623 }
class ____(spack.error.SpackError): """Error occurring when trying to match specs in store against a constraint"""
MatchError
python
openai__openai-python
src/openai/_client.py
{ "start": 26956, "end": 31044 }
class ____: _client: OpenAI def __init__(self, client: OpenAI) -> None: self._client = client @cached_property def completions(self) -> completions.CompletionsWithRawResponse: from .resources.completions import CompletionsWithRawResponse return CompletionsWithRawResponse(self._client.completions) @cached_property def chat(self) -> chat.ChatWithRawResponse: from .resources.chat import ChatWithRawResponse return ChatWithRawResponse(self._client.chat) @cached_property def embeddings(self) -> embeddings.EmbeddingsWithRawResponse: from .resources.embeddings import EmbeddingsWithRawResponse return EmbeddingsWithRawResponse(self._client.embeddings) @cached_property def files(self) -> files.FilesWithRawResponse: from .resources.files import FilesWithRawResponse return FilesWithRawResponse(self._client.files) @cached_property def images(self) -> images.ImagesWithRawResponse: from .resources.images import ImagesWithRawResponse return ImagesWithRawResponse(self._client.images) @cached_property def audio(self) -> audio.AudioWithRawResponse: from .resources.audio import AudioWithRawResponse return AudioWithRawResponse(self._client.audio) @cached_property def moderations(self) -> moderations.ModerationsWithRawResponse: from .resources.moderations import ModerationsWithRawResponse return ModerationsWithRawResponse(self._client.moderations) @cached_property def models(self) -> models.ModelsWithRawResponse: from .resources.models import ModelsWithRawResponse return ModelsWithRawResponse(self._client.models) @cached_property def fine_tuning(self) -> fine_tuning.FineTuningWithRawResponse: from .resources.fine_tuning import FineTuningWithRawResponse return FineTuningWithRawResponse(self._client.fine_tuning) @cached_property def vector_stores(self) -> vector_stores.VectorStoresWithRawResponse: from .resources.vector_stores import VectorStoresWithRawResponse return VectorStoresWithRawResponse(self._client.vector_stores) @cached_property def beta(self) -> beta.BetaWithRawResponse: from .resources.beta import BetaWithRawResponse return BetaWithRawResponse(self._client.beta) @cached_property def batches(self) -> batches.BatchesWithRawResponse: from .resources.batches import BatchesWithRawResponse return BatchesWithRawResponse(self._client.batches) @cached_property def uploads(self) -> uploads.UploadsWithRawResponse: from .resources.uploads import UploadsWithRawResponse return UploadsWithRawResponse(self._client.uploads) @cached_property def responses(self) -> responses.ResponsesWithRawResponse: from .resources.responses import ResponsesWithRawResponse return ResponsesWithRawResponse(self._client.responses) @cached_property def realtime(self) -> realtime.RealtimeWithRawResponse: from .resources.realtime import RealtimeWithRawResponse return RealtimeWithRawResponse(self._client.realtime) @cached_property def conversations(self) -> conversations.ConversationsWithRawResponse: from .resources.conversations import ConversationsWithRawResponse return ConversationsWithRawResponse(self._client.conversations) @cached_property def evals(self) -> evals.EvalsWithRawResponse: from .resources.evals import EvalsWithRawResponse return EvalsWithRawResponse(self._client.evals) @cached_property def containers(self) -> containers.ContainersWithRawResponse: from .resources.containers import ContainersWithRawResponse return ContainersWithRawResponse(self._client.containers) @cached_property def videos(self) -> videos.VideosWithRawResponse: from .resources.videos import VideosWithRawResponse return VideosWithRawResponse(self._client.videos)
OpenAIWithRawResponse
python
django__django
django/contrib/auth/password_validation.py
{ "start": 5195, "end": 7577 }
class ____: """ Validate that the password is sufficiently different from the user's attributes. If no specific attributes are provided, look at a sensible list of defaults. Attributes that don't exist are ignored. Comparison is made to not only the full attribute value, but also its components, so that, for example, a password is validated against either part of an email address, as well as the full address. """ DEFAULT_USER_ATTRIBUTES = ("username", "first_name", "last_name", "email") def __init__(self, user_attributes=DEFAULT_USER_ATTRIBUTES, max_similarity=0.7): self.user_attributes = user_attributes if max_similarity < 0.1: raise ValueError("max_similarity must be at least 0.1") self.max_similarity = max_similarity def validate(self, password, user=None): if not user: return password = password.lower() for attribute_name in self.user_attributes: value = getattr(user, attribute_name, None) if not value or not isinstance(value, str): continue value_lower = value.lower() value_parts = [*re.split(r"\W+", value_lower), value_lower] for value_part in value_parts: if exceeds_maximum_length_ratio( password, self.max_similarity, value_part ): continue if ( SequenceMatcher(a=password, b=value_part).quick_ratio() >= self.max_similarity ): try: verbose_name = str( user._meta.get_field(attribute_name).verbose_name ) except FieldDoesNotExist: verbose_name = attribute_name raise ValidationError( self.get_error_message(), code="password_too_similar", params={"verbose_name": verbose_name}, ) def get_error_message(self): return _("The password is too similar to the %(verbose_name)s.") def get_help_text(self): return _( "Your password can’t be too similar to your other personal information." )
UserAttributeSimilarityValidator
python
bokeh__bokeh
src/bokeh/core/serialization.py
{ "start": 2653, "end": 2798 }
class ____(TypedDict): type: Literal["array"] entries: NotRequired[list[AnyRep]] ArrayRepLike: TypeAlias = ArrayRep | list[AnyRep]
ArrayRep
python
redis__redis-py
tests/test_cluster.py
{ "start": 40344, "end": 98183 }
class ____: """ Tests for RedisCluster unique commands """ def test_case_insensitive_command_names(self, r): assert ( r.cluster_response_callbacks["cluster slots"] == r.cluster_response_callbacks["CLUSTER SLOTS"] ) def test_get_and_set(self, r): # get and set can't be tested independently of each other assert r.get("a") is None byte_string = b"value" integer = 5 unicode_string = chr(3456) + "abcd" + chr(3421) assert r.set("byte_string", byte_string) assert r.set("integer", 5) assert r.set("unicode_string", unicode_string) assert r.get("byte_string") == byte_string assert r.get("integer") == str(integer).encode() assert r.get("unicode_string").decode("utf-8") == unicode_string @pytest.mark.parametrize( "load_balancing_strategy", [ LoadBalancingStrategy.ROUND_ROBIN, LoadBalancingStrategy.ROUND_ROBIN_REPLICAS, LoadBalancingStrategy.RANDOM_REPLICA, ], ) def test_get_and_set_with_load_balanced_client( self, request, load_balancing_strategy: LoadBalancingStrategy ) -> None: r = _get_client( cls=RedisCluster, request=request, load_balancing_strategy=load_balancing_strategy, ) # get and set can't be tested independently of each other assert r.get("a") is None byte_string = b"value" assert r.set("byte_string", byte_string) # run the get command for the same key several times # to iterate over the read nodes assert r.get("byte_string") == byte_string assert r.get("byte_string") == byte_string assert r.get("byte_string") == byte_string def test_mget_nonatomic(self, r): assert r.mget_nonatomic([]) == [] assert r.mget_nonatomic(["a", "b"]) == [None, None] r["a"] = "1" r["b"] = "2" r["c"] = "3" assert r.mget_nonatomic("a", "other", "b", "c") == [b"1", None, b"2", b"3"] def test_mset_nonatomic(self, r): d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"} assert r.mset_nonatomic(d) for k, v in d.items(): assert r[k] == v def test_config_set(self, r): assert r.config_set("slowlog-log-slower-than", 0) def test_cluster_config_resetstat(self, r): r.ping(target_nodes="all") all_info = r.info(target_nodes="all") prior_commands_processed = -1 for node_info in all_info.values(): prior_commands_processed = node_info["total_commands_processed"] assert prior_commands_processed >= 1 r.config_resetstat(target_nodes="all") all_info = r.info(target_nodes="all") for node_info in all_info.values(): reset_commands_processed = node_info["total_commands_processed"] assert reset_commands_processed < prior_commands_processed def test_client_setname(self, r): node = r.get_random_node() r.client_setname("redis_py_test", target_nodes=node) client_name = r.client_getname(target_nodes=node) assert_resp_response(r, client_name, "redis_py_test", b"redis_py_test") def test_exists(self, r): d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"} r.mset_nonatomic(d) assert r.exists(*d.keys()) == len(d) def test_delete(self, r): d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"} r.mset_nonatomic(d) assert r.delete(*d.keys()) == len(d) assert r.delete(*d.keys()) == 0 def test_touch(self, r): d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"} r.mset_nonatomic(d) assert r.touch(*d.keys()) == len(d) def test_unlink(self, r): d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"} r.mset_nonatomic(d) assert r.unlink(*d.keys()) == len(d) # Unlink is non-blocking so we sleep before # verifying the deletion sleep(0.1) assert r.unlink(*d.keys()) == 0 def test_pubsub_channels_merge_results(self, r): nodes = r.get_nodes() channels = [] pubsub_nodes = [] i = 0 for node in nodes: channel = f"foo{i}" # We will create different pubsub clients where each one is # connected to a different node p = r.pubsub(node) pubsub_nodes.append(p) p.subscribe(channel) b_channel = channel.encode("utf-8") channels.append(b_channel) # Assert that each node returns only the channel it subscribed to sub_channels = node.redis_connection.pubsub_channels() if not sub_channels: # Try again after a short sleep sleep(0.3) sub_channels = node.redis_connection.pubsub_channels() assert sub_channels == [b_channel] i += 1 # Assert that the cluster's pubsub_channels function returns ALL of # the cluster's channels result = r.pubsub_channels(target_nodes="all") result.sort() assert result == channels def test_pubsub_numsub_merge_results(self, r): nodes = r.get_nodes() pubsub_nodes = [] channel = "foo" b_channel = channel.encode("utf-8") for node in nodes: # We will create different pubsub clients where each one is # connected to a different node p = r.pubsub(node) pubsub_nodes.append(p) p.subscribe(channel) # Assert that each node returns that only one client is subscribed sub_chann_num = node.redis_connection.pubsub_numsub(channel) if sub_chann_num == [(b_channel, 0)]: sleep(0.3) sub_chann_num = node.redis_connection.pubsub_numsub(channel) assert sub_chann_num == [(b_channel, 1)] # Assert that the cluster's pubsub_numsub function returns ALL clients # subscribed to this channel in the entire cluster assert r.pubsub_numsub(channel, target_nodes="all") == [(b_channel, len(nodes))] def test_pubsub_numpat_merge_results(self, r): nodes = r.get_nodes() pubsub_nodes = [] pattern = "foo*" for node in nodes: # We will create different pubsub clients where each one is # connected to a different node p = r.pubsub(node) pubsub_nodes.append(p) p.psubscribe(pattern) # Assert that each node returns that only one client is subscribed sub_num_pat = node.redis_connection.pubsub_numpat() if sub_num_pat == 0: sleep(0.3) sub_num_pat = node.redis_connection.pubsub_numpat() assert sub_num_pat == 1 # Assert that the cluster's pubsub_numsub function returns ALL clients # subscribed to this channel in the entire cluster assert r.pubsub_numpat(target_nodes="all") == len(nodes) @skip_if_server_version_lt("2.8.0") def test_cluster_pubsub_channels(self, r): p = r.pubsub() p.subscribe("foo", "bar", "baz", "quux") for i in range(4): assert wait_for_message(p, timeout=0.5)["type"] == "subscribe" expected = [b"bar", b"baz", b"foo", b"quux"] assert all( [channel in r.pubsub_channels(target_nodes="all") for channel in expected] ) @skip_if_server_version_lt("2.8.0") def test_cluster_pubsub_numsub(self, r): p1 = r.pubsub() p1.subscribe("foo", "bar", "baz") for i in range(3): assert wait_for_message(p1, timeout=0.5)["type"] == "subscribe" p2 = r.pubsub() p2.subscribe("bar", "baz") for i in range(2): assert wait_for_message(p2, timeout=0.5)["type"] == "subscribe" p3 = r.pubsub() p3.subscribe("baz") assert wait_for_message(p3, timeout=0.5)["type"] == "subscribe" channels = [(b"foo", 1), (b"bar", 2), (b"baz", 3)] assert r.pubsub_numsub("foo", "bar", "baz", target_nodes="all") == channels @skip_if_redis_enterprise() def test_cluster_myid(self, r): node = r.get_random_node() myid = r.cluster_myid(node) assert len(myid) == 40 @skip_if_redis_enterprise() def test_cluster_slots(self, r): mock_all_nodes_resp(r, default_cluster_slots) cluster_slots = r.cluster_slots() assert isinstance(cluster_slots, dict) assert len(default_cluster_slots) == len(cluster_slots) assert cluster_slots.get((0, 8191)) is not None assert cluster_slots.get((0, 8191)).get("primary") == ("127.0.0.1", 7000) @skip_if_server_version_lt("7.0.0") @skip_if_redis_enterprise() def test_cluster_shards(self, r): cluster_shards = r.cluster_shards() assert isinstance(cluster_shards, list) assert isinstance(cluster_shards[0], dict) attributes = [ b"id", b"endpoint", b"ip", b"hostname", b"port", b"tls-port", b"role", b"replication-offset", b"health", ] for x in cluster_shards: assert_resp_response( r, list(x.keys()), ["slots", "nodes"], [b"slots", b"nodes"] ) try: x["nodes"] key = "nodes" except KeyError: key = b"nodes" for node in x[key]: for attribute in node.keys(): assert attribute in attributes @skip_if_server_version_lt("7.2.0") @skip_if_redis_enterprise() def test_cluster_myshardid(self, r): myshardid = r.cluster_myshardid() assert isinstance(myshardid, str) assert len(myshardid) > 0 @skip_if_redis_enterprise() def test_cluster_addslots(self, r): node = r.get_random_node() mock_node_resp(node, "OK") assert r.cluster_addslots(node, 1, 2, 3) is True @skip_if_server_version_lt("7.0.0") @skip_if_redis_enterprise() def test_cluster_addslotsrange(self, r): node = r.get_random_node() mock_node_resp(node, "OK") assert r.cluster_addslotsrange(node, 1, 5) @skip_if_redis_enterprise() def test_cluster_countkeysinslot(self, r): node = r.nodes_manager.get_node_from_slot(1) mock_node_resp(node, 2) assert r.cluster_countkeysinslot(1) == 2 def test_cluster_count_failure_report(self, r): mock_all_nodes_resp(r, 0) assert r.cluster_count_failure_report("node_0") == 0 @skip_if_redis_enterprise() def test_cluster_delslots(self): cluster_slots = [ [0, 8191, ["127.0.0.1", 7000, "node_0"]], [8192, 16383, ["127.0.0.1", 7001, "node_1"]], ] r = get_mocked_redis_client( host=default_host, port=default_port, cluster_slots=cluster_slots ) mock_all_nodes_resp(r, "OK") node0 = r.get_node(default_host, 7000) node1 = r.get_node(default_host, 7001) assert r.cluster_delslots(0, 8192) == [True, True] assert node0.redis_connection.connection.read_response.called assert node1.redis_connection.connection.read_response.called @skip_if_server_version_lt("7.0.0") @skip_if_redis_enterprise() def test_cluster_delslotsrange(self): cluster_slots = [ [ 0, 8191, ["127.0.0.1", 7000, "node_0"], ], [ 8192, 16383, ["127.0.0.1", 7001, "node_1"], ], ] r = get_mocked_redis_client( host=default_host, port=default_port, cluster_slots=cluster_slots ) mock_all_nodes_resp(r, "OK") node = r.get_random_node() r.cluster_addslots(node, 1, 2, 3, 4, 5) assert r.cluster_delslotsrange(1, 5) @skip_if_redis_enterprise() def test_cluster_failover(self, r): node = r.get_random_node() mock_node_resp(node, "OK") assert r.cluster_failover(node) is True assert r.cluster_failover(node, "FORCE") is True assert r.cluster_failover(node, "TAKEOVER") is True with pytest.raises(RedisError): r.cluster_failover(node, "FORCT") @skip_if_redis_enterprise() def test_cluster_info(self, r): info = r.cluster_info() assert isinstance(info, dict) assert info["cluster_state"] == "ok" @skip_if_redis_enterprise() def test_cluster_keyslot(self, r): mock_all_nodes_resp(r, 12182) assert r.cluster_keyslot("foo") == 12182 @skip_if_redis_enterprise() def test_cluster_meet(self, r): node = r.get_default_node() mock_node_resp(node, "OK") assert r.cluster_meet("127.0.0.1", 6379) is True @skip_if_redis_enterprise() def test_cluster_nodes(self, r): response = ( "c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 " "slave aa90da731f673a99617dfe930306549a09f83a6b 0 " "1447836263059 5 connected\n" "9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 " "master - 0 1447836264065 0 connected\n" "aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 " "myself,master - 0 0 2 connected 5461-10922\n" "1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 " "slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 " "1447836262556 3 connected\n" "4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 " "master - 0 1447836262555 7 connected 0-5460\n" "19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 " "master - 0 1447836263562 3 connected 10923-16383\n" "fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 " "master,fail - 1447829446956 1447829444948 1 disconnected\n" ) mock_all_nodes_resp(r, response) nodes = r.cluster_nodes() assert len(nodes) == 7 assert nodes.get("172.17.0.7:7006") is not None assert ( nodes.get("172.17.0.7:7006").get("node_id") == "c8253bae761cb1ecb2b61857d85dfe455a0fec8b" ) @skip_if_redis_enterprise() def test_cluster_nodes_importing_migrating(self, r): response = ( "488ead2fcce24d8c0f158f9172cb1f4a9e040fe5 127.0.0.1:16381@26381 " "master - 0 1648975557664 3 connected 10923-16383\n" "8ae2e70812db80776f739a72374e57fc4ae6f89d 127.0.0.1:16380@26380 " "master - 0 1648975555000 2 connected 1 5461-10922 [" "2-<-ed8007ccfa2d91a7b76f8e6fba7ba7e257034a16]\n" "ed8007ccfa2d91a7b76f8e6fba7ba7e257034a16 127.0.0.1:16379@26379 " "myself,master - 0 1648975556000 1 connected 0 2-5460 [" "2->-8ae2e70812db80776f739a72374e57fc4ae6f89d]\n" ) mock_all_nodes_resp(r, response) nodes = r.cluster_nodes() assert len(nodes) == 3 node_16379 = nodes.get("127.0.0.1:16379") node_16380 = nodes.get("127.0.0.1:16380") node_16381 = nodes.get("127.0.0.1:16381") assert node_16379.get("migrations") == [ { "slot": "2", "node_id": "8ae2e70812db80776f739a72374e57fc4ae6f89d", "state": "migrating", } ] assert node_16379.get("slots") == [["0"], ["2", "5460"]] assert node_16380.get("migrations") == [ { "slot": "2", "node_id": "ed8007ccfa2d91a7b76f8e6fba7ba7e257034a16", "state": "importing", } ] assert node_16380.get("slots") == [["1"], ["5461", "10922"]] assert node_16381.get("slots") == [["10923", "16383"]] assert node_16381.get("migrations") == [] @skip_if_redis_enterprise() def test_cluster_replicate(self, r): node = r.get_random_node() all_replicas = r.get_replicas() mock_all_nodes_resp(r, "OK") assert r.cluster_replicate(node, "c8253bae761cb61857d") is True results = r.cluster_replicate(all_replicas, "c8253bae761cb61857d") if isinstance(results, dict): for res in results.values(): assert res is True else: assert results is True @skip_if_redis_enterprise() def test_cluster_reset(self, r): mock_all_nodes_resp(r, "OK") assert r.cluster_reset() is True assert r.cluster_reset(False) is True all_results = r.cluster_reset(False, target_nodes="all") for res in all_results.values(): assert res is True @skip_if_redis_enterprise() def test_cluster_save_config(self, r): node = r.get_random_node() all_nodes = r.get_nodes() mock_all_nodes_resp(r, "OK") assert r.cluster_save_config(node) is True all_results = r.cluster_save_config(all_nodes) for res in all_results.values(): assert res is True @skip_if_redis_enterprise() def test_cluster_get_keys_in_slot(self, r): response = ["{foo}1", "{foo}2"] node = r.nodes_manager.get_node_from_slot(12182) mock_node_resp(node, response) keys = r.cluster_get_keys_in_slot(12182, 4) assert keys == response @skip_if_redis_enterprise() def test_cluster_set_config_epoch(self, r): mock_all_nodes_resp(r, "OK") assert r.cluster_set_config_epoch(3) is True all_results = r.cluster_set_config_epoch(3, target_nodes="all") for res in all_results.values(): assert res is True @skip_if_redis_enterprise() def test_cluster_setslot(self, r): node = r.get_random_node() mock_node_resp(node, "OK") assert r.cluster_setslot(node, "node_0", 1218, "IMPORTING") is True assert r.cluster_setslot(node, "node_0", 1218, "NODE") is True assert r.cluster_setslot(node, "node_0", 1218, "MIGRATING") is True with pytest.raises(RedisError): r.cluster_failover(node, "STABLE") with pytest.raises(RedisError): r.cluster_failover(node, "STATE") def test_cluster_setslot_stable(self, r): node = r.nodes_manager.get_node_from_slot(12182) mock_node_resp(node, "OK") assert r.cluster_setslot_stable(12182) is True assert node.redis_connection.connection.read_response.called @skip_if_redis_enterprise() def test_cluster_replicas(self, r): response = [ b"01eca22229cf3c652b6fca0d09ff6941e0d2e3 " b"127.0.0.1:6377@16377 slave " b"52611e796814b78e90ad94be9d769a4f668f9a 0 " b"1634550063436 4 connected", b"r4xfga22229cf3c652b6fca0d09ff69f3e0d4d " b"127.0.0.1:6378@16378 slave " b"52611e796814b78e90ad94be9d769a4f668f9a 0 " b"1634550063436 4 connected", ] mock_all_nodes_resp(r, response) replicas = r.cluster_replicas("52611e796814b78e90ad94be9d769a4f668f9a") assert replicas.get("127.0.0.1:6377") is not None assert replicas.get("127.0.0.1:6378") is not None assert ( replicas.get("127.0.0.1:6378").get("node_id") == "r4xfga22229cf3c652b6fca0d09ff69f3e0d4d" ) @skip_if_server_version_lt("7.0.0") def test_cluster_links(self, r): node = r.get_random_node() res = r.cluster_links(node) if is_resp2_connection(r): links_to = sum(x.count(b"to") for x in res) links_for = sum(x.count(b"from") for x in res) assert links_to == links_for for i in range(0, len(res) - 1, 2): assert res[i][3] == res[i + 1][3] else: links_to = len(list(filter(lambda x: x[b"direction"] == b"to", res))) links_for = len(list(filter(lambda x: x[b"direction"] == b"from", res))) assert links_to == links_for for i in range(0, len(res) - 1, 2): assert res[i][b"node"] == res[i + 1][b"node"] def test_cluster_flshslots_not_implemented(self, r): with pytest.raises(NotImplementedError): r.cluster_flushslots() def test_cluster_bumpepoch_not_implemented(self, r): with pytest.raises(NotImplementedError): r.cluster_bumpepoch() @skip_if_redis_enterprise() def test_readonly(self): r = get_mocked_redis_client(host=default_host, port=default_port) mock_all_nodes_resp(r, "OK") assert r.readonly() is True all_replicas_results = r.readonly(target_nodes="replicas") for res in all_replicas_results.values(): assert res is True for replica in r.get_replicas(): assert replica.redis_connection.connection.read_response.called @skip_if_redis_enterprise() def test_readwrite(self): r = get_mocked_redis_client(host=default_host, port=default_port) mock_all_nodes_resp(r, "OK") assert r.readwrite() is True all_replicas_results = r.readwrite(target_nodes="replicas") for res in all_replicas_results.values(): assert res is True for replica in r.get_replicas(): assert replica.redis_connection.connection.read_response.called @skip_if_redis_enterprise() def test_bgsave(self, r): assert r.bgsave() sleep(0.3) assert r.bgsave(True) def test_info(self, r): # Map keys to same slot r.set("x{1}", 1) r.set("y{1}", 2) r.set("z{1}", 3) # Get node that handles the slot slot = r.keyslot("x{1}") node = r.nodes_manager.get_node_from_slot(slot) # Run info on that node info = r.info(target_nodes=node) assert isinstance(info, dict) assert info["db0"]["keys"] == 3 def _init_slowlog_test(self, r, node): slowlog_lim = r.config_get("slowlog-log-slower-than", target_nodes=node) assert r.config_set("slowlog-log-slower-than", 0, target_nodes=node) is True return slowlog_lim["slowlog-log-slower-than"] def _teardown_slowlog_test(self, r, node, prev_limit): assert ( r.config_set("slowlog-log-slower-than", prev_limit, target_nodes=node) is True ) def test_slowlog_get(self, r, slowlog): unicode_string = chr(3456) + "abcd" + chr(3421) node = r.get_node_from_key(unicode_string) slowlog_limit = self._init_slowlog_test(r, node) assert r.slowlog_reset(target_nodes=node) r.get(unicode_string) slowlog = r.slowlog_get(target_nodes=node) assert isinstance(slowlog, list) commands = [log["command"] for log in slowlog] get_command = b" ".join((b"GET", unicode_string.encode("utf-8"))) assert get_command in commands assert b"SLOWLOG RESET" in commands # the order should be ['GET <uni string>', 'SLOWLOG RESET'], # but if other clients are executing commands at the same time, there # could be commands, before, between, or after, so just check that # the two we care about are in the appropriate order. assert commands.index(get_command) < commands.index(b"SLOWLOG RESET") # make sure other attributes are typed correctly assert isinstance(slowlog[0]["start_time"], int) assert isinstance(slowlog[0]["duration"], int) # rollback the slowlog limit to its original value self._teardown_slowlog_test(r, node, slowlog_limit) def test_slowlog_get_limit(self, r, slowlog): assert r.slowlog_reset() node = r.get_node_from_key("foo") slowlog_limit = self._init_slowlog_test(r, node) r.get("foo") slowlog = r.slowlog_get(1, target_nodes=node) assert isinstance(slowlog, list) # only one command, based on the number we passed to slowlog_get() assert len(slowlog) == 1 self._teardown_slowlog_test(r, node, slowlog_limit) def test_slowlog_length(self, r, slowlog): r.get("foo") node = r.nodes_manager.get_node_from_slot(key_slot(b"foo")) slowlog_len = r.slowlog_len(target_nodes=node) assert isinstance(slowlog_len, int) def test_time(self, r): t = r.time(target_nodes=r.get_primaries()[0]) assert len(t) == 2 assert isinstance(t[0], int) assert isinstance(t[1], int) @skip_if_server_version_lt("4.0.0") def test_memory_usage(self, r): r.set("foo", "bar") assert isinstance(r.memory_usage("foo"), int) @skip_if_server_version_lt("4.0.0") @skip_if_redis_enterprise() def test_memory_malloc_stats(self, r): assert r.memory_malloc_stats() @skip_if_server_version_lt("4.0.0") @skip_if_redis_enterprise() def test_memory_stats(self, r): # put a key into the current db to make sure that "db.<current-db>" # has data r.set("foo", "bar") node = r.nodes_manager.get_node_from_slot(key_slot(b"foo")) stats = r.memory_stats(target_nodes=node) assert isinstance(stats, dict) for key, value in stats.items(): if key.startswith("db."): assert not isinstance(value, list) @skip_if_server_version_lt("4.0.0") def test_memory_help(self, r): with pytest.raises(NotImplementedError): r.memory_help() @skip_if_server_version_lt("4.0.0") def test_memory_doctor(self, r): with pytest.raises(NotImplementedError): r.memory_doctor() @skip_if_redis_enterprise() def test_lastsave(self, r): node = r.get_primaries()[0] assert isinstance(r.lastsave(target_nodes=node), datetime.datetime) def test_cluster_echo(self, r): node = r.get_primaries()[0] assert r.echo("foo bar", target_nodes=node) == b"foo bar" @skip_if_server_version_lt("1.0.0") def test_debug_segfault(self, r): with pytest.raises(NotImplementedError): r.debug_segfault() def test_config_resetstat(self, r): node = r.get_primaries()[0] r.ping(target_nodes=node) prior_commands_processed = int( r.info(target_nodes=node)["total_commands_processed"] ) assert prior_commands_processed >= 1 r.config_resetstat(target_nodes=node) reset_commands_processed = int( r.info(target_nodes=node)["total_commands_processed"] ) assert reset_commands_processed < prior_commands_processed @skip_if_server_version_lt("6.2.0") def test_client_trackinginfo(self, r): node = r.get_primaries()[0] res = r.client_trackinginfo(target_nodes=node) assert len(res) > 2 assert "prefixes" in res or b"prefixes" in res @skip_if_server_version_lt("2.9.50") def test_client_pause(self, r): node = r.get_primaries()[0] assert r.client_pause(1, target_nodes=node) assert r.client_pause(timeout=1, target_nodes=node) with pytest.raises(RedisError): r.client_pause(timeout="not an integer", target_nodes=node) @skip_if_server_version_lt("6.2.0") @skip_if_redis_enterprise() def test_client_unpause(self, r): assert r.client_unpause() @skip_if_server_version_lt("5.0.0") def test_client_id(self, r): node = r.get_primaries()[0] assert r.client_id(target_nodes=node) > 0 @skip_if_server_version_lt("5.0.0") def test_client_unblock(self, r): node = r.get_primaries()[0] myid = r.client_id(target_nodes=node) assert not r.client_unblock(myid, target_nodes=node) assert not r.client_unblock(myid, error=True, target_nodes=node) assert not r.client_unblock(myid, error=False, target_nodes=node) @skip_if_server_version_lt("6.0.0") def test_client_getredir(self, r): node = r.get_primaries()[0] assert isinstance(r.client_getredir(target_nodes=node), int) assert r.client_getredir(target_nodes=node) == -1 @skip_if_server_version_lt("6.2.0") def test_client_info(self, r): node = r.get_primaries()[0] info = r.client_info(target_nodes=node) assert isinstance(info, dict) assert "addr" in info @skip_if_server_version_lt("2.6.9") def test_client_kill(self, r, r2): node = r.get_primaries()[0] r.client_setname("redis-py-c1", target_nodes="all") r2.client_setname("redis-py-c2", target_nodes="all") clients = [ client for client in r.client_list(target_nodes=node) if client.get("name") in ["redis-py-c1", "redis-py-c2"] ] assert len(clients) == 2 clients_by_name = {client.get("name"): client for client in clients} client_addr = clients_by_name["redis-py-c2"].get("addr") assert r.client_kill(client_addr, target_nodes=node) is True clients = [ client for client in r.client_list(target_nodes=node) if client.get("name") in ["redis-py-c1", "redis-py-c2"] ] assert len(clients) == 1 assert clients[0].get("name") == "redis-py-c1" @skip_if_server_version_lt("2.6.0") def test_cluster_bitop_not_empty_string(self, r): r["{foo}a"] = "" r.bitop("not", "{foo}r", "{foo}a") assert r.get("{foo}r") is None @skip_if_server_version_lt("2.6.0") def test_cluster_bitop_not(self, r): test_str = b"\xaa\x00\xff\x55" correct = ~0xAA00FF55 & 0xFFFFFFFF r["{foo}a"] = test_str r.bitop("not", "{foo}r", "{foo}a") assert int(binascii.hexlify(r["{foo}r"]), 16) == correct @skip_if_server_version_lt("2.6.0") def test_cluster_bitop_not_in_place(self, r): test_str = b"\xaa\x00\xff\x55" correct = ~0xAA00FF55 & 0xFFFFFFFF r["{foo}a"] = test_str r.bitop("not", "{foo}a", "{foo}a") assert int(binascii.hexlify(r["{foo}a"]), 16) == correct @skip_if_server_version_lt("2.6.0") def test_cluster_bitop_single_string(self, r): test_str = b"\x01\x02\xff" r["{foo}a"] = test_str r.bitop("and", "{foo}res1", "{foo}a") r.bitop("or", "{foo}res2", "{foo}a") r.bitop("xor", "{foo}res3", "{foo}a") assert r["{foo}res1"] == test_str assert r["{foo}res2"] == test_str assert r["{foo}res3"] == test_str @skip_if_server_version_lt("2.6.0") def test_cluster_bitop_string_operands(self, r): r["{foo}a"] = b"\x01\x02\xff\xff" r["{foo}b"] = b"\x01\x02\xff" r.bitop("and", "{foo}res1", "{foo}a", "{foo}b") r.bitop("or", "{foo}res2", "{foo}a", "{foo}b") r.bitop("xor", "{foo}res3", "{foo}a", "{foo}b") assert int(binascii.hexlify(r["{foo}res1"]), 16) == 0x0102FF00 assert int(binascii.hexlify(r["{foo}res2"]), 16) == 0x0102FFFF assert int(binascii.hexlify(r["{foo}res3"]), 16) == 0x000000FF @skip_if_server_version_lt("6.2.0") def test_cluster_copy(self, r): assert r.copy("{foo}a", "{foo}b") == 0 r.set("{foo}a", "bar") assert r.copy("{foo}a", "{foo}b") == 1 assert r.get("{foo}a") == b"bar" assert r.get("{foo}b") == b"bar" @skip_if_server_version_lt("6.2.0") def test_cluster_copy_and_replace(self, r): r.set("{foo}a", "foo1") r.set("{foo}b", "foo2") assert r.copy("{foo}a", "{foo}b") == 0 assert r.copy("{foo}a", "{foo}b", replace=True) == 1 @skip_if_server_version_lt("6.2.0") def test_cluster_lmove(self, r): r.rpush("{foo}a", "one", "two", "three", "four") assert r.lmove("{foo}a", "{foo}b") assert r.lmove("{foo}a", "{foo}b", "right", "left") @skip_if_server_version_lt("6.2.0") def test_cluster_blmove(self, r): r.rpush("{foo}a", "one", "two", "three", "four") assert r.blmove("{foo}a", "{foo}b", 5) assert r.blmove("{foo}a", "{foo}b", 1, "RIGHT", "LEFT") def test_cluster_msetnx(self, r): d = {"{foo}a": b"1", "{foo}b": b"2", "{foo}c": b"3"} assert r.msetnx(d) d2 = {"{foo}a": b"x", "{foo}d": b"4"} assert not r.msetnx(d2) for k, v in d.items(): assert r[k] == v assert r.get("{foo}d") is None def test_cluster_rename(self, r): r["{foo}a"] = "1" assert r.rename("{foo}a", "{foo}b") assert r.get("{foo}a") is None assert r["{foo}b"] == b"1" def test_cluster_renamenx(self, r): r["{foo}a"] = "1" r["{foo}b"] = "2" assert not r.renamenx("{foo}a", "{foo}b") assert r["{foo}a"] == b"1" assert r["{foo}b"] == b"2" # LIST COMMANDS def test_cluster_blpop(self, r): r.rpush("{foo}a", "1", "2") r.rpush("{foo}b", "3", "4") assert_resp_response( r, r.blpop(["{foo}b", "{foo}a"], timeout=1), (b"{foo}b", b"3"), [b"{foo}b", b"3"], ) assert_resp_response( r, r.blpop(["{foo}b", "{foo}a"], timeout=1), (b"{foo}b", b"4"), [b"{foo}b", b"4"], ) assert_resp_response( r, r.blpop(["{foo}b", "{foo}a"], timeout=1), (b"{foo}a", b"1"), [b"{foo}a", b"1"], ) assert_resp_response( r, r.blpop(["{foo}b", "{foo}a"], timeout=1), (b"{foo}a", b"2"), [b"{foo}a", b"2"], ) assert r.blpop(["{foo}b", "{foo}a"], timeout=1) is None r.rpush("{foo}c", "1") assert_resp_response( r, r.blpop("{foo}c", timeout=1), (b"{foo}c", b"1"), [b"{foo}c", b"1"] ) def test_cluster_brpop(self, r): r.rpush("{foo}a", "1", "2") r.rpush("{foo}b", "3", "4") assert_resp_response( r, r.brpop(["{foo}b", "{foo}a"], timeout=1), (b"{foo}b", b"4"), [b"{foo}b", b"4"], ) assert_resp_response( r, r.brpop(["{foo}b", "{foo}a"], timeout=1), (b"{foo}b", b"3"), [b"{foo}b", b"3"], ) assert_resp_response( r, r.brpop(["{foo}b", "{foo}a"], timeout=1), (b"{foo}a", b"2"), [b"{foo}a", b"2"], ) assert_resp_response( r, r.brpop(["{foo}b", "{foo}a"], timeout=1), (b"{foo}a", b"1"), [b"{foo}a", b"1"], ) assert r.brpop(["{foo}b", "{foo}a"], timeout=1) is None r.rpush("{foo}c", "1") assert_resp_response( r, r.brpop("{foo}c", timeout=1), (b"{foo}c", b"1"), [b"{foo}c", b"1"] ) def test_cluster_brpoplpush(self, r): r.rpush("{foo}a", "1", "2") r.rpush("{foo}b", "3", "4") assert r.brpoplpush("{foo}a", "{foo}b") == b"2" assert r.brpoplpush("{foo}a", "{foo}b") == b"1" assert r.brpoplpush("{foo}a", "{foo}b", timeout=1) is None assert r.lrange("{foo}a", 0, -1) == [] assert r.lrange("{foo}b", 0, -1) == [b"1", b"2", b"3", b"4"] def test_cluster_brpoplpush_empty_string(self, r): r.rpush("{foo}a", "") assert r.brpoplpush("{foo}a", "{foo}b") == b"" def test_cluster_rpoplpush(self, r): r.rpush("{foo}a", "a1", "a2", "a3") r.rpush("{foo}b", "b1", "b2", "b3") assert r.rpoplpush("{foo}a", "{foo}b") == b"a3" assert r.lrange("{foo}a", 0, -1) == [b"a1", b"a2"] assert r.lrange("{foo}b", 0, -1) == [b"a3", b"b1", b"b2", b"b3"] def test_cluster_sdiff(self, r): r.sadd("{foo}a", "1", "2", "3") assert r.sdiff("{foo}a", "{foo}b") == {b"1", b"2", b"3"} r.sadd("{foo}b", "2", "3") assert r.sdiff("{foo}a", "{foo}b") == {b"1"} def test_cluster_sdiffstore(self, r): r.sadd("{foo}a", "1", "2", "3") assert r.sdiffstore("{foo}c", "{foo}a", "{foo}b") == 3 assert r.smembers("{foo}c") == {b"1", b"2", b"3"} r.sadd("{foo}b", "2", "3") assert r.sdiffstore("{foo}c", "{foo}a", "{foo}b") == 1 assert r.smembers("{foo}c") == {b"1"} def test_cluster_sinter(self, r): r.sadd("{foo}a", "1", "2", "3") assert r.sinter("{foo}a", "{foo}b") == set() r.sadd("{foo}b", "2", "3") assert r.sinter("{foo}a", "{foo}b") == {b"2", b"3"} def test_cluster_sinterstore(self, r): r.sadd("{foo}a", "1", "2", "3") assert r.sinterstore("{foo}c", "{foo}a", "{foo}b") == 0 assert r.smembers("{foo}c") == set() r.sadd("{foo}b", "2", "3") assert r.sinterstore("{foo}c", "{foo}a", "{foo}b") == 2 assert r.smembers("{foo}c") == {b"2", b"3"} def test_cluster_smove(self, r): r.sadd("{foo}a", "a1", "a2") r.sadd("{foo}b", "b1", "b2") assert r.smove("{foo}a", "{foo}b", "a1") assert r.smembers("{foo}a") == {b"a2"} assert r.smembers("{foo}b") == {b"b1", b"b2", b"a1"} def test_cluster_sunion(self, r): r.sadd("{foo}a", "1", "2") r.sadd("{foo}b", "2", "3") assert r.sunion("{foo}a", "{foo}b") == {b"1", b"2", b"3"} def test_cluster_sunionstore(self, r): r.sadd("{foo}a", "1", "2") r.sadd("{foo}b", "2", "3") assert r.sunionstore("{foo}c", "{foo}a", "{foo}b") == 3 assert r.smembers("{foo}c") == {b"1", b"2", b"3"} @skip_if_server_version_lt("6.2.0") def test_cluster_zdiff(self, r): r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3}) r.zadd("{foo}b", {"a1": 1, "a2": 2}) assert r.zdiff(["{foo}a", "{foo}b"]) == [b"a3"] response = r.zdiff(["{foo}a", "{foo}b"], withscores=True) assert_resp_response( r, response, [b"a3", b"3"], [[b"a3", 3.0]], ) @skip_if_server_version_lt("6.2.0") def test_cluster_zdiffstore(self, r): r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3}) r.zadd("{foo}b", {"a1": 1, "a2": 2}) assert r.zdiffstore("{foo}out", ["{foo}a", "{foo}b"]) assert r.zrange("{foo}out", 0, -1) == [b"a3"] response = r.zrange("{foo}out", 0, -1, withscores=True) assert_resp_response(r, response, [(b"a3", 3.0)], [[b"a3", 3.0]]) @skip_if_server_version_lt("6.2.0") def test_cluster_zinter(self, r): r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 1}) r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2}) r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) assert r.zinter(["{foo}a", "{foo}b", "{foo}c"]) == [b"a3", b"a1"] # invalid aggregation with pytest.raises(DataError): r.zinter(["{foo}a", "{foo}b", "{foo}c"], aggregate="foo", withscores=True) assert_resp_response( r, r.zinter(["{foo}a", "{foo}b", "{foo}c"], withscores=True), [(b"a3", 8), (b"a1", 9)], [[b"a3", 8], [b"a1", 9]], ) assert_resp_response( r, r.zinter(["{foo}a", "{foo}b", "{foo}c"], withscores=True, aggregate="MAX"), [(b"a3", 5), (b"a1", 6)], [[b"a3", 5], [b"a1", 6]], ) assert_resp_response( r, r.zinter(["{foo}a", "{foo}b", "{foo}c"], withscores=True, aggregate="MIN"), [(b"a1", 1), (b"a3", 1)], [[b"a1", 1], [b"a3", 1]], ) assert_resp_response( r, r.zinter({"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}, withscores=True), [(b"a3", 20.0), (b"a1", 23.0)], [[b"a3", 20.0], [b"a1", 23.0]], ) def test_cluster_zinterstore_sum(self, r): r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1}) r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2}) r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) assert r.zinterstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"]) == 2 assert_resp_response( r, r.zrange("{foo}d", 0, -1, withscores=True), [(b"a3", 8), (b"a1", 9)], [[b"a3", 8.0], [b"a1", 9.0]], ) def test_cluster_zinterstore_max(self, r): r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1}) r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2}) r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) assert ( r.zinterstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"], aggregate="MAX") == 2 ) assert_resp_response( r, r.zrange("{foo}d", 0, -1, withscores=True), [(b"a3", 5), (b"a1", 6)], [[b"a3", 5.0], [b"a1", 6.0]], ) def test_cluster_zinterstore_min(self, r): r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3}) r.zadd("{foo}b", {"a1": 2, "a2": 3, "a3": 5}) r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) assert ( r.zinterstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"], aggregate="MIN") == 2 ) assert_resp_response( r, r.zrange("{foo}d", 0, -1, withscores=True), [(b"a1", 1), (b"a3", 3)], [[b"a1", 1.0], [b"a3", 3.0]], ) def test_cluster_zinterstore_with_weight(self, r): r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1}) r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2}) r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) assert r.zinterstore("{foo}d", {"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}) == 2 assert_resp_response( r, r.zrange("{foo}d", 0, -1, withscores=True), [(b"a3", 20), (b"a1", 23)], [[b"a3", 20.0], [b"a1", 23.0]], ) @skip_if_server_version_lt("4.9.0") def test_cluster_bzpopmax(self, r): r.zadd("{foo}a", {"a1": 1, "a2": 2}) r.zadd("{foo}b", {"b1": 10, "b2": 20}) assert_resp_response( r, r.bzpopmax(["{foo}b", "{foo}a"], timeout=1), (b"{foo}b", b"b2", 20), [b"{foo}b", b"b2", 20], ) assert_resp_response( r, r.bzpopmax(["{foo}b", "{foo}a"], timeout=1), (b"{foo}b", b"b1", 10), [b"{foo}b", b"b1", 10], ) assert_resp_response( r, r.bzpopmax(["{foo}b", "{foo}a"], timeout=1), (b"{foo}a", b"a2", 2), [b"{foo}a", b"a2", 2], ) assert_resp_response( r, r.bzpopmax(["{foo}b", "{foo}a"], timeout=1), (b"{foo}a", b"a1", 1), [b"{foo}a", b"a1", 1], ) assert r.bzpopmax(["{foo}b", "{foo}a"], timeout=1) is None r.zadd("{foo}c", {"c1": 100}) assert_resp_response( r, r.bzpopmax("{foo}c", timeout=1), (b"{foo}c", b"c1", 100), [b"{foo}c", b"c1", 100], ) @skip_if_server_version_lt("4.9.0") def test_cluster_bzpopmin(self, r): r.zadd("{foo}a", {"a1": 1, "a2": 2}) r.zadd("{foo}b", {"b1": 10, "b2": 20}) assert_resp_response( r, r.bzpopmin(["{foo}b", "{foo}a"], timeout=1), (b"{foo}b", b"b1", 10), [b"{foo}b", b"b1", 10], ) assert_resp_response( r, r.bzpopmin(["{foo}b", "{foo}a"], timeout=1), (b"{foo}b", b"b2", 20), [b"{foo}b", b"b2", 20], ) assert_resp_response( r, r.bzpopmin(["{foo}b", "{foo}a"], timeout=1), (b"{foo}a", b"a1", 1), [b"{foo}a", b"a1", 1], ) assert_resp_response( r, r.bzpopmin(["{foo}b", "{foo}a"], timeout=1), (b"{foo}a", b"a2", 2), [b"{foo}a", b"a2", 2], ) assert r.bzpopmin(["{foo}b", "{foo}a"], timeout=1) is None r.zadd("{foo}c", {"c1": 100}) assert_resp_response( r, r.bzpopmin("{foo}c", timeout=1), (b"{foo}c", b"c1", 100), [b"{foo}c", b"c1", 100], ) @skip_if_server_version_lt("6.2.0") def test_cluster_zrangestore(self, r): r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3}) assert r.zrangestore("{foo}b", "{foo}a", 0, 1) assert r.zrange("{foo}b", 0, -1) == [b"a1", b"a2"] assert r.zrangestore("{foo}b", "{foo}a", 1, 2) assert r.zrange("{foo}b", 0, -1) == [b"a2", b"a3"] assert_resp_response( r, r.zrange("{foo}b", 0, 1, withscores=True), [(b"a2", 2), (b"a3", 3)], [[b"a2", 2.0], [b"a3", 3.0]], ) # reversed order assert r.zrangestore("{foo}b", "{foo}a", 1, 2, desc=True) assert r.zrange("{foo}b", 0, -1) == [b"a1", b"a2"] # by score assert r.zrangestore( "{foo}b", "{foo}a", 2, 1, byscore=True, offset=0, num=1, desc=True ) assert r.zrange("{foo}b", 0, -1) == [b"a2"] # by lex assert r.zrangestore( "{foo}b", "{foo}a", "[a2", "(a3", bylex=True, offset=0, num=1 ) assert r.zrange("{foo}b", 0, -1) == [b"a2"] @skip_if_server_version_lt("6.2.0") def test_cluster_zunion(self, r): r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1}) r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2}) r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) # sum assert r.zunion(["{foo}a", "{foo}b", "{foo}c"]) == [b"a2", b"a4", b"a3", b"a1"] assert_resp_response( r, r.zunion(["{foo}a", "{foo}b", "{foo}c"], withscores=True), [(b"a2", 3), (b"a4", 4), (b"a3", 8), (b"a1", 9)], [[b"a2", 3.0], [b"a4", 4.0], [b"a3", 8.0], [b"a1", 9.0]], ) # max assert_resp_response( r, r.zunion(["{foo}a", "{foo}b", "{foo}c"], aggregate="MAX", withscores=True), [(b"a2", 2), (b"a4", 4), (b"a3", 5), (b"a1", 6)], [[b"a2", 2.0], [b"a4", 4.0], [b"a3", 5.0], [b"a1", 6.0]], ) # min assert_resp_response( r, r.zunion(["{foo}a", "{foo}b", "{foo}c"], aggregate="MIN", withscores=True), [(b"a1", 1), (b"a2", 1), (b"a3", 1), (b"a4", 4)], [[b"a1", 1.0], [b"a2", 1.0], [b"a3", 1.0], [b"a4", 4.0]], ) # with weight assert_resp_response( r, r.zunion({"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}, withscores=True), [(b"a2", 5), (b"a4", 12), (b"a3", 20), (b"a1", 23)], [[b"a2", 5.0], [b"a4", 12.0], [b"a3", 20.0], [b"a1", 23.0]], ) def test_cluster_zunionstore_sum(self, r): r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1}) r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2}) r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) assert r.zunionstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"]) == 4 assert_resp_response( r, r.zrange("{foo}d", 0, -1, withscores=True), [(b"a2", 3), (b"a4", 4), (b"a3", 8), (b"a1", 9)], [[b"a2", 3.0], [b"a4", 4.0], [b"a3", 8.0], [b"a1", 9.0]], ) def test_cluster_zunionstore_max(self, r): r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1}) r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2}) r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) assert ( r.zunionstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"], aggregate="MAX") == 4 ) assert_resp_response( r, r.zrange("{foo}d", 0, -1, withscores=True), [(b"a2", 2), (b"a4", 4), (b"a3", 5), (b"a1", 6)], [[b"a2", 2.0], [b"a4", 4.0], [b"a3", 5.0], [b"a1", 6.0]], ) def test_cluster_zunionstore_min(self, r): r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3}) r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 4}) r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) assert ( r.zunionstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"], aggregate="MIN") == 4 ) assert_resp_response( r, r.zrange("{foo}d", 0, -1, withscores=True), [(b"a1", 1), (b"a2", 2), (b"a3", 3), (b"a4", 4)], [[b"a1", 1.0], [b"a2", 2.0], [b"a3", 3.0], [b"a4", 4.0]], ) def test_cluster_zunionstore_with_weight(self, r): r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1}) r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2}) r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) assert r.zunionstore("{foo}d", {"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}) == 4 assert_resp_response( r, r.zrange("{foo}d", 0, -1, withscores=True), [(b"a2", 5), (b"a4", 12), (b"a3", 20), (b"a1", 23)], [[b"a2", 5.0], [b"a4", 12.0], [b"a3", 20.0], [b"a1", 23.0]], ) @skip_if_server_version_lt("2.8.9") def test_cluster_pfcount(self, r): members = {b"1", b"2", b"3"} r.pfadd("{foo}a", *members) assert r.pfcount("{foo}a") == len(members) members_b = {b"2", b"3", b"4"} r.pfadd("{foo}b", *members_b) assert r.pfcount("{foo}b") == len(members_b) assert r.pfcount("{foo}a", "{foo}b") == len(members_b.union(members)) @skip_if_server_version_lt("2.8.9") def test_cluster_pfmerge(self, r): mema = {b"1", b"2", b"3"} memb = {b"2", b"3", b"4"} memc = {b"5", b"6", b"7"} r.pfadd("{foo}a", *mema) r.pfadd("{foo}b", *memb) r.pfadd("{foo}c", *memc) r.pfmerge("{foo}d", "{foo}c", "{foo}a") assert r.pfcount("{foo}d") == 6 r.pfmerge("{foo}d", "{foo}b") assert r.pfcount("{foo}d") == 7 def test_cluster_sort_store(self, r): r.rpush("{foo}a", "2", "3", "1") assert r.sort("{foo}a", store="{foo}sorted_values") == 3 assert r.lrange("{foo}sorted_values", 0, -1) == [b"1", b"2", b"3"] # GEO COMMANDS @skip_if_server_version_lt("6.2.0") def test_cluster_geosearchstore(self, r): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, "place2", ) r.geoadd("{foo}barcelona", values) r.geosearchstore( "{foo}places_barcelona", "{foo}barcelona", longitude=2.191, latitude=41.433, radius=1000, ) assert r.zrange("{foo}places_barcelona", 0, -1) == [b"place1"] @skip_unless_arch_bits(64) @skip_if_server_version_lt("6.2.0") def test_geosearchstore_dist(self, r): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, "place2", ) r.geoadd("{foo}barcelona", values) r.geosearchstore( "{foo}places_barcelona", "{foo}barcelona", longitude=2.191, latitude=41.433, radius=1000, storedist=True, ) # instead of save the geo score, the distance is saved. assert r.zscore("{foo}places_barcelona", "place1") == 88.05060698409301 @skip_if_server_version_lt("3.2.0") def test_cluster_georadius_store(self, r): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, "place2", ) r.geoadd("{foo}barcelona", values) r.georadius( "{foo}barcelona", 2.191, 41.433, 1000, store="{foo}places_barcelona" ) assert r.zrange("{foo}places_barcelona", 0, -1) == [b"place1"] @skip_unless_arch_bits(64) @skip_if_server_version_lt("3.2.0") def test_cluster_georadius_store_dist(self, r): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, "place2", ) r.geoadd("{foo}barcelona", values) r.georadius( "{foo}barcelona", 2.191, 41.433, 1000, store_dist="{foo}places_barcelona" ) # instead of save the geo score, the distance is saved. assert r.zscore("{foo}places_barcelona", "place1") == 88.05060698409301 def test_cluster_dbsize(self, r): d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"} assert r.mset_nonatomic(d) assert r.dbsize(target_nodes="primaries") == len(d) def test_cluster_keys(self, r): assert r.keys() == [] keys_with_underscores = {b"test_a", b"test_b"} keys = keys_with_underscores.union({b"testc"}) for key in keys: r[key] = 1 assert ( set(r.keys(pattern="test_*", target_nodes="primaries")) == keys_with_underscores ) assert set(r.keys(pattern="test*", target_nodes="primaries")) == keys # SCAN COMMANDS @skip_if_server_version_lt("2.8.0") def test_cluster_scan(self, r): r.set("a", 1) r.set("b", 2) r.set("c", 3) for target_nodes, nodes in zip( ["primaries", "replicas"], [r.get_primaries(), r.get_replicas()] ): cursors, keys = r.scan(target_nodes=target_nodes) assert sorted(keys) == [b"a", b"b", b"c"] assert sorted(cursors.keys()) == sorted(node.name for node in nodes) assert all(cursor == 0 for cursor in cursors.values()) cursors, keys = r.scan(match="a*", target_nodes=target_nodes) assert sorted(keys) == [b"a"] assert sorted(cursors.keys()) == sorted(node.name for node in nodes) assert all(cursor == 0 for cursor in cursors.values()) @skip_if_server_version_lt("6.0.0") def test_cluster_scan_type(self, r): r.sadd("a-set", 1) r.sadd("b-set", 1) r.sadd("c-set", 1) r.hset("a-hash", "foo", 2) r.lpush("a-list", "aux", 3) for target_nodes, nodes in zip( ["primaries", "replicas"], [r.get_primaries(), r.get_replicas()] ): cursors, keys = r.scan(_type="SET", target_nodes=target_nodes) assert sorted(keys) == [b"a-set", b"b-set", b"c-set"] assert sorted(cursors.keys()) == sorted(node.name for node in nodes) assert all(cursor == 0 for cursor in cursors.values()) cursors, keys = r.scan(_type="SET", match="a*", target_nodes=target_nodes) assert sorted(keys) == [b"a-set"] assert sorted(cursors.keys()) == sorted(node.name for node in nodes) assert all(cursor == 0 for cursor in cursors.values()) @skip_if_server_version_lt("2.8.0") def test_cluster_scan_iter(self, r): keys_all = [] keys_1 = [] for i in range(100): s = str(i) r.set(s, 1) keys_all.append(s.encode("utf-8")) if s.startswith("1"): keys_1.append(s.encode("utf-8")) keys_all.sort() keys_1.sort() for target_nodes in ["primaries", "replicas"]: keys = r.scan_iter(target_nodes=target_nodes) assert sorted(keys) == keys_all keys = r.scan_iter(match="1*", target_nodes=target_nodes) assert sorted(keys) == keys_1 def test_cluster_randomkey(self, r): node = r.get_node_from_key("{foo}") assert r.randomkey(target_nodes=node) is None for key in ("{foo}a", "{foo}b", "{foo}c"): r[key] = 1 assert r.randomkey(target_nodes=node) in (b"{foo}a", b"{foo}b", b"{foo}c") @skip_if_server_version_lt("6.0.0") @skip_if_redis_enterprise() def test_acl_log(self, r, request): key = "{cache}:" node = r.get_node_from_key(key) username = "redis-py-user" def teardown(): r.acl_deluser(username, target_nodes="primaries") request.addfinalizer(teardown) r.acl_setuser( username, enabled=True, reset=True, commands=["+get", "+set", "+select", "+cluster", "+command", "+info"], keys=["{cache}:*"], nopass=True, target_nodes="primaries", ) r.acl_log_reset(target_nodes=node) user_client = _get_client( RedisCluster, request, flushdb=False, username=username ) # Valid operation and key assert user_client.set("{cache}:0", 1) assert user_client.get("{cache}:0") == b"1" # Invalid key with pytest.raises(NoPermissionError): user_client.get("{cache}violated_cache:0") # Invalid operation with pytest.raises(NoPermissionError): user_client.hset("{cache}:0", "hkey", "hval") assert isinstance(r.acl_log(target_nodes=node), list) assert len(r.acl_log(target_nodes=node)) == 3 assert len(r.acl_log(count=1, target_nodes=node)) == 1 assert isinstance(r.acl_log(target_nodes=node)[0], dict) assert "client-info" in r.acl_log(count=1, target_nodes=node)[0] assert r.acl_log_reset(target_nodes=node) def generate_lib_code(self, lib_name): return f"""#!js api_version=1.0 name={lib_name}\n redis.registerFunction('foo', ()=>{{return 'bar'}})""" # noqa def try_delete_libs(self, r, *lib_names): for lib_name in lib_names: try: r.tfunction_delete(lib_name) except Exception: pass @pytest.mark.onlycluster
TestClusterRedisCommands
python
gevent__gevent
src/gevent/tests/test__greenlet.py
{ "start": 31221, "end": 31618 }
class ____(greentest.TestCase): # pragma: no cover (we only do coverage on pure-Python) def test_c_extension(self): self.assertEqual(greenlet.Greenlet.__module__, 'gevent._gevent_cgreenlet') self.assertEqual(greenlet.SpawnedLink.__module__, 'gevent._gevent_cgreenlet') @greentest.skipWithCExtensions("Needs pure python")
TestCExt
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 788059, "end": 792425 }
class ____(sgqlc.types.Type, Node): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ( "color", "created_at", "description", "is_default", "issues", "name", "pull_requests", "repository", "resource_path", "updated_at", "url", ) color = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="color") created_at = sgqlc.types.Field(DateTime, graphql_name="createdAt") description = sgqlc.types.Field(String, graphql_name="description") is_default = sgqlc.types.Field( sgqlc.types.non_null(Boolean), graphql_name="isDefault" ) issues = sgqlc.types.Field( sgqlc.types.non_null(IssueConnection), graphql_name="issues", args=sgqlc.types.ArgDict( ( ( "order_by", sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None), ), ( "labels", sgqlc.types.Arg( sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="labels", default=None, ), ), ( "states", sgqlc.types.Arg( sgqlc.types.list_of(sgqlc.types.non_null(IssueState)), graphql_name="states", default=None, ), ), ( "filter_by", sgqlc.types.Arg( IssueFilters, graphql_name="filterBy", default=None ), ), ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name") pull_requests = sgqlc.types.Field( sgqlc.types.non_null(PullRequestConnection), graphql_name="pullRequests", args=sgqlc.types.ArgDict( ( ( "states", sgqlc.types.Arg( sgqlc.types.list_of(sgqlc.types.non_null(PullRequestState)), graphql_name="states", default=None, ), ), ( "labels", sgqlc.types.Arg( sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="labels", default=None, ), ), ( "head_ref_name", sgqlc.types.Arg(String, graphql_name="headRefName", default=None), ), ( "base_ref_name", sgqlc.types.Arg(String, graphql_name="baseRefName", default=None), ), ( "order_by", sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None), ), ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) repository = sgqlc.types.Field( sgqlc.types.non_null("Repository"), graphql_name="repository" ) resource_path = sgqlc.types.Field( sgqlc.types.non_null(URI), graphql_name="resourcePath" ) updated_at = sgqlc.types.Field(DateTime, graphql_name="updatedAt") url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
Label
python
getsentry__sentry
src/sentry/integrations/github/client.py
{ "start": 10564, "end": 30461 }
class ____( GithubProxyClient, RepositoryClient, CommitContextClient, RepoTreesClient, StatusCheckClient ): allow_redirects = True base_url = "https://api.github.com" integration_name = IntegrationProviderSlug.GITHUB.value # Github gives us links to navigate, however, let's be safe in case we're fed garbage page_number_limit = 50 # With a default of 100 per page -> 5,000 items def get_last_commits(self, repo: str, end_sha: str) -> Sequence[Any]: """ Return API request that fetches last ~30 commits see https://docs.github.com/en/rest/commits/commits#list-commits-on-a-repository using end_sha as parameter. """ return self.get_cached(f"/repos/{repo}/commits", params={"sha": end_sha}) def compare_commits(self, repo: str, start_sha: str, end_sha: str) -> Any: """ See https://docs.github.com/en/rest/commits/commits#compare-two-commits where start sha is oldest and end is most recent. """ return self.get_cached(f"/repos/{repo}/compare/{start_sha}...{end_sha}") def repo_hooks(self, repo: str) -> Sequence[Any]: """ https://docs.github.com/en/rest/webhooks/repos#list-repository-webhooks """ return self.get(f"/repos/{repo}/hooks") def get_commits(self, repo: str) -> Sequence[Any]: """ https://docs.github.com/en/rest/commits/commits#list-commits """ return self.get(f"/repos/{repo}/commits") def get_commit(self, repo: str, sha: str) -> Any: """ https://docs.github.com/en/rest/commits/commits#get-a-commit """ return self.get_cached(f"/repos/{repo}/commits/{sha}") def get_installation_info(self, installation_id: int | str) -> Any: """ https://docs.github.com/en/rest/apps/apps?apiVersion=2022-11-28#get-an-installation-for-the-authenticated-app """ return self.get(f"/app/installations/{installation_id}") def get_merge_commit_sha_from_commit(self, repo: Repository, sha: str) -> str | None: """ Get the merge commit sha from a commit sha. """ response = self.get_pullrequest_from_commit(repo.name, sha) if not response or (isinstance(response, list) and len(response) != 1): # the response should return a single merged PR, return if multiple return None (pull_request,) = response if pull_request["state"] == "open": metrics.incr( "github_pr_comment.queue_comment_check.open_pr", sample_rate=1.0, ) return None return pull_request.get("merge_commit_sha") def get_pullrequest_from_commit(self, repo: str, sha: str) -> Any: """ https://docs.github.com/en/rest/commits/commits#list-pull-requests-associated-with-a-commit Returns the merged pull request that introduced the commit to the repository. If the commit is not present in the default branch, will only return open pull requests associated with the commit. """ return self.get(f"/repos/{repo}/commits/{sha}/pulls") def get_pullrequest_files(self, repo: str, pull_number: str) -> Any: """ https://docs.github.com/en/rest/pulls/pulls#list-pull-requests-files Returns up to 30 files associated with a pull request. Responses are paginated. """ return self.get(f"/repos/{repo}/pulls/{pull_number}/files") def get_repo(self, repo: str) -> Any: """ https://docs.github.com/en/rest/repos/repos#get-a-repository """ return self.get(f"/repos/{repo}") # https://docs.github.com/en/rest/rate-limit?apiVersion=2022-11-28 def get_rate_limit(self, specific_resource: str = "core") -> GithubRateLimitInfo: """This gives information of the current rate limit""" # There's more but this is good enough assert specific_resource in ("core", "search", "graphql") return GithubRateLimitInfo(self.get("/rate_limit")["resources"][specific_resource]) # This method is used by RepoTreesIntegration def get_remaining_api_requests(self) -> int: """This gives information of the current rate limit""" return self.get_rate_limit().remaining # This method is used by RepoTreesIntegration # https://docs.github.com/en/rest/git/trees#get-a-tree def get_tree(self, repo_full_name: str, tree_sha: str) -> list[dict[str, Any]]: # We do not cache this call since it is a rather large object contents: dict[str, Any] = self.get( f"/repos/{repo_full_name}/git/trees/{tree_sha}", # Will cause all objects or subtrees referenced by the tree specified in :tree_sha params={"recursive": 1}, ) # If truncated is true in the response then the number of items in the tree array exceeded our maximum limit. # If you need to fetch more items, use the non-recursive method of fetching trees, and fetch one sub-tree at a time. # Note: The limit for the tree array is 100,000 entries with a maximum size of 7 MB when using the recursive parameter. # XXX: We will need to improve this by iterating through trees without using the recursive parameter if contents.get("truncated"): # e.g. getsentry/DataForThePeople logger.warning( "The tree for %s has been truncated. Use different a approach for retrieving contents of tree.", repo_full_name, ) return contents["tree"] # Used by RepoTreesIntegration def should_count_api_error(self, error: ApiError, extra: dict[str, str]) -> bool: """ Returns a boolean indicating whether the error should count towards the connection errors tally. """ should_count_error = False error_message = error.json.get("message") if error.json else error.text if error_message in ( "Git Repository is empty.", "Not Found.", # The app does not have access to the repo "Repository access blocked", # GitHub has blocked the repository "Bad credentials", # No permission granted for this repo ): logger.warning(error_message, extra=extra) elif error_message in ( "Server Error", # Github failed to respond "Connection reset by peer", # Connection reset by GitHub "Connection broken: invalid chunk length", # Connection broken by chunk with invalid length "Unable to reach host:", # Unable to reach host at the moment ): should_count_error = True elif error_message and error_message.startswith( "Due to U.S. trade controls law restrictions, this GitHub" ): # Raising the error will stop execution and let the task handle it raise error else: # We do not raise the exception so we can keep iterating through the repos. # Nevertheless, investigate the error to determine if we should abort the processing logger.warning("Continuing execution. Investigate: %s", error_message, extra=extra) return should_count_error def get_repos(self, page_number_limit: int | None = None) -> list[dict[str, Any]]: """ This fetches all repositories accessible to the Github App https://docs.github.com/en/rest/apps/installations#list-repositories-accessible-to-the-app-installation It uses page_size from the base class to specify how many items per page. The upper bound of requests is controlled with self.page_number_limit to prevent infinite requests. """ return self._get_with_pagination( "/installation/repositories", response_key="repositories", page_number_limit=page_number_limit, ) def search_repositories(self, query: bytes) -> Mapping[str, Sequence[Any]]: """ Find repositories matching a query. https://docs.github.com/en/rest/search/search?apiVersion=2022-11-28#search-repositories NOTE: All search APIs (except code search) share a rate limit of 30 requests/minute """ return self.get("/search/repositories", params={"q": query}) def get_assignees(self, repo: str) -> Sequence[Any]: """ https://docs.github.com/en/rest/issues/assignees#list-assignees """ return self._get_with_pagination(f"/repos/{repo}/assignees") def _get_with_pagination( self, path: str, response_key: str | None = None, page_number_limit: int | None = None ) -> list[Any]: """ Github uses the Link header to provide pagination links. Github recommends using the provided link relations and not constructing our own URL. https://docs.github.com/en/rest/guides/traversing-with-pagination Use response_key when the API stores the results within a key. For instance, the repositories API returns the list of repos under the "repositories" key """ if page_number_limit is None or page_number_limit > self.page_number_limit: page_number_limit = self.page_number_limit with sentry_sdk.start_span( op=f"{self.integration_type}.http.pagination", name=f"{self.integration_type}.http_response.pagination.{self.name}", ): output: list[dict[str, Any]] = [] page_number = 1 resp = self.get(path, params={"per_page": self.page_size}) output.extend(resp) if not response_key else output.extend(resp[response_key]) next_link = get_next_link(resp) # XXX: In order to speed up this function we will need to parallelize this # Use ThreadPoolExecutor; see src/sentry/utils/snuba.py#L358 while next_link and page_number < page_number_limit: # If a per_page is specified, GitHub preserves the per_page value # in the response headers. resp = self.get(next_link) output.extend(resp) if not response_key else output.extend(resp[response_key]) next_link = get_next_link(resp) page_number += 1 return output def search_issues(self, query: str) -> Mapping[str, Sequence[Mapping[str, Any]]]: """ https://docs.github.com/en/rest/search?apiVersion=2022-11-28#search-issues-and-pull-requests NOTE: All search APIs (except code search) share a rate limit of 30 requests/minute """ return self.get("/search/issues", params={"q": query}) def get_issue(self, repo: str, number: str) -> Any: """ https://docs.github.com/en/rest/issues/issues#get-an-issue """ return self.get(f"/repos/{repo}/issues/{number}") def get_issue_comments(self, repo: str, issue_number: str) -> Any: """ https://docs.github.com/en/rest/issues/comments#list-issue-comments """ return self.get(f"/repos/{repo}/issues/{issue_number}/comments") def get_pullrequest_comments(self, repo: str, pull_number: str) -> Any: """ https://docs.github.com/en/rest/pulls/comments#list-review-comments-on-a-pull-request """ return self.get(f"/repos/{repo}/pulls/{pull_number}/comments") def create_issue(self, repo: str, data: Mapping[str, Any]) -> Any: """ https://docs.github.com/en/rest/issues/issues#create-an-issue """ endpoint = f"/repos/{repo}/issues" return self.post(endpoint, data=data) def update_issue_assignees(self, repo: str, issue_number: str, assignees: list[str]) -> Any: """ https://docs.github.com/en/rest/issues/issues#update-an-issue """ endpoint = f"/repos/{repo}/issues/{issue_number}" return self.patch(endpoint, data={"assignees": assignees}) def update_issue_status(self, repo: str, issue_number: str, status: str) -> Any: """ https://docs.github.com/en/rest/issues/issues#update-an-issue """ endpoint = f"/repos/{repo}/issues/{issue_number}" return self.patch(endpoint, data={"state": status}) def create_comment(self, repo: str, issue_id: str, data: dict[str, Any]) -> Any: """ https://docs.github.com/en/rest/issues/comments#create-an-issue-comment """ endpoint = f"/repos/{repo}/issues/{issue_id}/comments" return self.post(endpoint, data=data) def update_comment( self, repo: str, issue_id: str, comment_id: str, data: dict[str, Any] ) -> Any: endpoint = f"/repos/{repo}/issues/comments/{comment_id}" return self.patch(endpoint, data=data) def create_pr_comment(self, repo: Repository, pr: PullRequest, data: dict[str, Any]) -> Any: return self.create_comment(repo.name, pr.key, data) def update_pr_comment( self, repo: Repository, pr: PullRequest, pr_comment: PullRequestComment, data: dict[str, Any], ) -> Any: return self.update_comment(repo.name, pr.key, pr_comment.external_id, data) def get_comment_reactions(self, repo: str, comment_id: str) -> Any: """ https://docs.github.com/en/rest/issues/comments?#get-an-issue-comment """ endpoint = f"/repos/{repo}/issues/comments/{comment_id}" response = self.get(endpoint) reactions = response.get("reactions", {}) reactions.pop("url", None) return reactions def get_user(self, gh_username: str) -> Any: """ https://docs.github.com/en/rest/users/users#get-a-user """ return self.get(f"/users/{gh_username}") def get_labels(self, owner: str, repo: str) -> list[Any]: """ Fetches all labels for a repository. https://docs.github.com/en/rest/issues/labels#list-labels-for-a-repository """ return self._get_with_pagination(f"/repos/{owner}/{repo}/labels") def check_file(self, repo: Repository, path: str, version: str | None) -> object | None: return self.head_cached(path=f"/repos/{repo.name}/contents/{path}", params={"ref": version}) def get_file( self, repo: Repository, path: str, ref: str | None, codeowners: bool = False ) -> str: """Get the contents of a file See https://docs.github.com/en/rest/reference/repos#get-repository-content """ from base64 import b64decode headers = {"Content-Type": "application/raw; charset=utf-8"} if codeowners else {} contents = self.get( path=f"/repos/{repo.name}/contents/{path}", params={"ref": ref}, raw_response=True if codeowners else False, headers=headers, ) result = ( contents.content.decode("utf-8") if codeowners else b64decode(contents["content"]).decode("utf-8") ) return result def get_blame_for_files( self, files: Sequence[SourceLineInfo], extra: dict[str, Any] ) -> list[FileBlameInfo]: log_info = { **extra, "provider": IntegrationProviderSlug.GITHUB, "organization_integration_id": self.org_integration_id, } metrics.incr("integrations.github.get_blame_for_files") try: rate_limit = self.get_rate_limit(specific_resource="graphql") except ApiError: # Some GitHub instances don't enforce rate limiting and will respond with a 404 pass else: if rate_limit.remaining < MINIMUM_REQUESTS: metrics.incr( "integrations.github.get_blame_for_files.not_enough_requests_remaining" ) logger.error( "sentry.integrations.github.get_blame_for_files.rate_limit", extra={ "provider": IntegrationProviderSlug.GITHUB, "specific_resource": "graphql", "remaining": rate_limit.remaining, "next_window": rate_limit.next_window(), "organization_integration_id": self.org_integration_id, }, ) raise ApiRateLimitedError("Not enough requests remaining for GitHub") file_path_mapping = generate_file_path_mapping(files) query, variables = create_blame_query(file_path_mapping, extra=log_info) data = {"query": query, "variables": variables} cache_key = self.get_cache_key("/graphql", "", orjson.dumps(data).decode()) response = self.check_cache(cache_key) if response: metrics.incr("integrations.github.get_blame_for_files.got_cached") logger.info( "sentry.integrations.github.get_blame_for_files.got_cached", extra=log_info, ) else: try: response = self.post( path="/graphql", data=data, allow_text=False, ) except ValueError as e: logger.exception(str(e), log_info) return [] else: self.set_cache(cache_key, response, 60) if not is_graphql_response(response): raise ApiError("Response is not JSON") errors = response.get("errors", []) if len(errors) > 0: if any([error for error in errors if error.get("type") == "RATE_LIMITED"]): raise ApiRateLimitedError("GitHub rate limit exceeded") # When data is present, it means that the query was at least partially successful, # usually a missing repo/branch/file which is expected with wrong configurations. # If data is not present, the query may be formed incorrectly, so raise an error. if not response.get("data"): err_message = "" for error in response.get("errors", []): err = error.get("message", "") err_message += err + "\n" if err and "something went wrong" in err.lower(): raise UnknownHostError(err) raise ApiError(err_message) detail = str(response.get("detail", "")) if detail and "internal error" in detail.lower(): errorId = response.get("errorId", "") logger.info( "github.get_blame_for_files.host_error", extra={**log_info, "errorId": errorId} ) raise UnknownHostError("Something went wrong when communicating with GitHub") return extract_commits_from_blame_response( response=response, file_path_mapping=file_path_mapping, files=files, extra={ **extra, "provider": IntegrationProviderSlug.GITHUB, "organization_integration_id": self.org_integration_id, }, ) def create_check_run(self, repo: str, data: dict[str, Any]) -> Any: """ https://docs.github.com/en/rest/checks/runs#create-a-check-run The repo must be in the format of "owner/repo". """ endpoint = f"/repos/{repo}/check-runs" return self.post(endpoint, data=data) def get_check_runs(self, repo: str, sha: str) -> Any: """ https://docs.github.com/en/rest/checks/runs#list-check-runs-for-a-git-reference The repo must be in the format of "owner/repo". SHA can be any reference. """ endpoint = f"/repos/{repo}/commits/{sha}/check-runs" return self.get(endpoint)
GitHubBaseClient
python
google__jax
jax/experimental/mosaic/gpu/profiler.py
{ "start": 5373, "end": 10065 }
class ____: ENTER = 0 EXIT = 1 << 31 def __init__(self, entries_per_warpgroup: int, dump_path: str = "sponge"): self.entries_per_warpgroup = entries_per_warpgroup self.interned_names: dict[str, int] = {} if dump_path == "sponge": self.dump_path = os.getenv( "TEST_UNDECLARED_OUTPUTS_DIR", tempfile.gettempdir() ) else: self.dump_path = dump_path def _num_warpgroups( self, grid: tuple[int, ...], block: tuple[int, ...] ) -> int: if math.prod(block) % WARPGROUP_SIZE: raise ValueError("Block size is not a multiple of warpgroup size") return math.prod(grid) * math.prod(block) // WARPGROUP_SIZE def mlir_buffer_type( self, grid: tuple[int, ...], block: tuple[int, ...] ) -> ir.Type: return ir.MemRefType.get( (self._num_warpgroups(grid, block) * self.entries_per_warpgroup,), ir.IntegerType.get_signless(32), ) def jax_buffer_type( self, grid: tuple[int, ...], block: tuple[int, ...] ) -> ir.Type: return jax.ShapeDtypeStruct( (self._num_warpgroups(grid, block) * self.entries_per_warpgroup,), jnp.uint32, ) def smem_i32_elements(self, block: tuple[int, ...]): num_warpgroups = self._num_warpgroups((), block) return int(num_warpgroups * self.entries_per_warpgroup) def smem_bytes(self, block: tuple[int, ...]): bytes_per_entry = 4 return self.smem_i32_elements(block) * bytes_per_entry def intern_name(self, name: str) -> int: if (name_id := self.interned_names.get(name, None)) is not None: return name_id name_id = self.interned_names[name] = len(self.interned_names) if name_id & self.EXIT: raise RuntimeError("Allocated too many names") return name_id def dump(self, buffer, f, grid: tuple[int, ...], block: tuple[int, ...]): buffer = np.asarray(buffer) num_blocks = math.prod(grid) warpgroups_per_block = self._num_warpgroups((), block) entries = buffer.reshape( num_blocks, warpgroups_per_block, self.entries_per_warpgroup ) start_times = entries[..., 0] sm_ids = entries[..., 1] traces_used = entries[..., 2] entries_used = traces_used + 3 if np.any(entries_used > self.entries_per_warpgroup): raise RuntimeError("Insufficient space to capture a full trace") traces = entries[..., 3:] # Estimate the overhead of profiling. time_events = traces[:, :, 1::2] valid_times_mask = np.arange(traces.shape[-1])[1::2] < traces_used[..., None] # 12 cycles is a ballpark estimate for H100 profiling_overhead = (time_events[:, :, 1:] - time_events[:, :, :-1]).min( where=valid_times_mask[:, :, 1:], initial=12 ) profiling_overhead = max(0, profiling_overhead - 1) unintern = {v: k for k, v in self.interned_names.items()} events = [] for block_idx, wg_idx in np.ndindex(num_blocks, warpgroups_per_block): valid_entries = traces_used[block_idx, wg_idx] local_clock_offset = None assert valid_entries % 2 == 0, valid_entries start_time = start_times[block_idx, wg_idx] block_events = [] last_time = float("-inf") for i in range(0, valid_entries, 2): tag = traces[block_idx, wg_idx, i] time = traces[block_idx, wg_idx, i + 1] if local_clock_offset is None: local_clock_offset = time time -= local_clock_offset time -= (i // 2) * profiling_overhead # Account for the overhead of profiling. if time < 0: break # Detect a timer wraparound name_id = tag begin = True if name_id & ProfilerSpec.EXIT: name_id = name_id ^ ProfilerSpec.EXIT begin = False name = unintern[name_id] if last_time >= time: if last_time - time > 10: warnings.warn( "Profiler clock went significantly backwards for event" f" {'start' if begin else 'end'} `{name}`: {last_time} ->" f" {time}" ) time = last_time + 1 last_time = time block_events.append({ "name": name, "ph": "B" if begin else "E", "ts": float(start_time + time) / 1e3, "pid": 1 + int(sm_ids[block_idx, wg_idx]), "tid": 1 + wg_idx + warpgroups_per_block * block_idx, }) else: # If we didn't break if block_events: events.append(block_events) events = sorted(events, key=lambda x: x[0]["ts"]) flat_events = list(itertools.chain.from_iterable(events)) return json.dump({"displayTimeUnit": "ns", "traceEvents": flat_events}, f) @dataclasses.dataclass(frozen=True)
ProfilerSpec
python
milvus-io__pymilvus
pymilvus/bulk_writer/constants.py
{ "start": 3711, "end": 3802 }
class ____(Enum): AUTO = "AUTO" INTERNAL = "INTERNAL" PUBLIC = "PUBLIC"
ConnectType
python
python-openxml__python-docx
src/docx/opc/phys_pkg.py
{ "start": 900, "end": 1075 }
class ____: """Factory for physical package writer objects.""" def __new__(cls, pkg_file): return super(PhysPkgWriter, cls).__new__(_ZipPkgWriter)
PhysPkgWriter
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/testing/pickleable.py
{ "start": 1804, "end": 2062 }
class ____: def __init__(self, x, y): self.x = x self.y = y def __eq__(self, other): return ( other.__class__ is self.__class__ and other.x == self.x and other.y == self.y )
OldSchool
python
getsentry__sentry
tests/sentry/api/endpoints/test_event_attachments.py
{ "start": 245, "end": 4437 }
class ____(APITestCase): def test_simple(self) -> None: self.login_as(user=self.user) min_ago = before_now(minutes=1).isoformat() event1 = self.store_event( data={"fingerprint": ["group1"], "timestamp": min_ago}, project_id=self.project.id ) event2 = self.store_event( data={"fingerprint": ["group1"], "timestamp": min_ago}, project_id=self.project.id ) attachment1 = EventAttachment.objects.create( project_id=event1.project_id, event_id=event1.event_id, type="event.attachment", name="hello.png", content_type="image/png", size=18, sha1="d3f299af02d6abbe92dd8368bab781824a9702ed", blob_path=":File contents here", ) attachment2 = EventAttachment.objects.create( project_id=event2.project_id, event_id=event2.event_id, type="event.attachment", name="hello.png", content_type="image/png", size=1234, sha1="1234", ) path = f"/api/0/projects/{event1.project.organization.slug}/{event1.project.slug}/events/{event1.event_id}/attachments/" with self.feature("organizations:event-attachments"): response = self.client.get(path) assert response.status_code == 200, response.content assert len(response.data) == 1 assert response.data[0]["id"] == str(attachment1.id) assert response.data[0]["event_id"] == attachment1.event_id assert response.data[0]["type"] == "event.attachment" assert response.data[0]["name"] == "hello.png" assert response.data[0]["mimetype"] == "image/png" assert response.data[0]["size"] == 18 assert response.data[0]["sha1"] == "d3f299af02d6abbe92dd8368bab781824a9702ed" assert response.data[0]["headers"] == {"Content-Type": "image/png"} path = f"/api/0/projects/{event2.project.organization.slug}/{event2.project.slug}/events/{event2.event_id}/attachments/" with self.feature("organizations:event-attachments"): response = self.client.get(path) assert response.status_code == 200, response.content assert len(response.data) == 1 assert response.data[0]["id"] == str(attachment2.id) assert response.data[0]["event_id"] == attachment2.event_id assert response.data[0]["type"] == "event.attachment" assert response.data[0]["name"] == "hello.png" assert response.data[0]["mimetype"] == "image/png" assert response.data[0]["size"] == 1234 assert response.data[0]["sha1"] == "1234" assert response.data[0]["headers"] == {"Content-Type": "image/png"} def test_is_screenshot(self) -> None: self.login_as(user=self.user) min_ago = before_now(minutes=1).isoformat() event1 = self.store_event( data={"fingerprint": ["group1"], "timestamp": min_ago}, project_id=self.project.id ) EventAttachment.objects.create( event_id=event1.event_id, project_id=event1.project_id, name="screenshot.png", content_type="image/png", ) EventAttachment.objects.create( event_id=event1.event_id, project_id=event1.project_id, name="crash_screenshot.png", ) EventAttachment.objects.create( event_id=event1.event_id, project_id=event1.project_id, name="foo.png", ) path = f"/api/0/projects/{event1.project.organization.slug}/{event1.project.slug}/events/{event1.event_id}/attachments/" with self.feature("organizations:event-attachments"): response = self.client.get(f"{path}?query=is:screenshot") assert response.status_code == 200, response.content assert len(response.data) == 2 for attachment in response.data: assert attachment["event_id"] == event1.event_id # foo.png will not be included assert attachment["name"] in ["screenshot.png", "crash_screenshot.png"]
EventAttachmentsTest
python
huggingface__transformers
src/transformers/utils/generic.py
{ "start": 26168, "end": 35614 }
class ____: """ Configuration for recording outputs from a model via hooks. Attributes: target_class (Type): The class (e.g., nn.Module) to which the hook will be attached. index (Optional[int]): If the output is a tuple/list, optionally record only at a specific index. layer_name (Optional[str]): Name of the submodule to target (if needed), e.g., "transformer.layer.3.attn". class_name (Optional[str]): Name of the class to which the hook will be attached. Could be the suffix of class name in some cases. """ target_class: "type[torch.nn.Module]" index: int = 0 layer_name: str | None = None class_name: str | None = None def check_model_inputs(tie_last_hidden_states=True): """ Decorator to intercept specific layer outputs without using hooks. Compatible with torch.compile (Dynamo tracing). Args: tie_last_hidden_states (`bool`, *optional*, defaults to `True`): Whether to overwrite `out.hidden_states[-1]` with the `out.last_hidden_state`. This is true for all language models and should be toggled off only if `out.hidden_states[-1]` has to be the hidden state before last layer norm, which is needed for some vision models (e.g. CLIP, SigLIP) """ def wrapped_fn(func): @wraps(func) def wrapper(self, *args, **kwargs): use_cache_arg_index = None if "use_cache" in func.__code__.co_varnames: use_cache_arg_index = func.__code__.co_varnames.index("use_cache") - 1 # -1 for self if ( use_cache_arg_index is not None and len(args) > use_cache_arg_index and args[use_cache_arg_index] is not None ): use_cache = args[use_cache_arg_index] elif kwargs.get("use_cache") is not None: use_cache = kwargs["use_cache"] else: use_cache = getattr(self.config, "use_cache", None) if use_cache is not None: if getattr(self, "gradient_checkpointing", False) and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if use_cache_arg_index is not None and len(args) > use_cache_arg_index: args = list(args) args[use_cache_arg_index] = use_cache args = tuple(args) else: kwargs["use_cache"] = use_cache return_dict = kwargs.pop("return_dict", None) if return_dict is None: return_dict = getattr(self.config, "return_dict", True) all_args = kwargs.copy() if "kwargs" in all_args: for k, v in all_args["kwargs"].items(): all_args[k] = v # _can_record_outputs is None by default capture_flags = _CAN_RECORD_REGISTRY.get(str(self.__class__)) or {} # there is a weak ref for executorch recordable_keys = { f"output_{k}": all_args.get( f"output_{k}", getattr( self.config, f"output_{k}", all_args.get("output_attentions", getattr(self.config, "output_attentions", False)), ), ) for k in capture_flags } # We let cross attentions to be saved separately because some models add `cross-attn` layer # when certain condtions are met. Let's output cross attention if attentions are requested (for BC) if "output_attentions" in recordable_keys: recordable_keys["output_cross_attentions"] = recordable_keys["output_attentions"] collected_outputs = defaultdict(tuple) monkey_patched_layers = [] def make_capture_wrapper(module, orig_forward, key, index): @wraps(orig_forward) def wrapped_forward(*args, **kwargs): if key == "hidden_states" and len(collected_outputs[key]) == 0: collected_outputs[key] += (args[0],) output = orig_forward(*args, **kwargs) if not isinstance(output, tuple): collected_outputs[key] += (output,) elif output[index] is not None: if key not in collected_outputs: collected_outputs[key] = (output[index],) else: collected_outputs[key] += (output[index],) return output return wrapped_forward if any(recordable_keys.values()): capture_tasks = [] for key, layer_specs in capture_flags.items(): if not recordable_keys.get(f"output_{key}", False): continue if not isinstance(layer_specs, list): layer_specs = [layer_specs] for specs in layer_specs: if not isinstance(specs, OutputRecorder): index = 0 if "hidden_states" in key else 1 class_name = None if not isinstance(specs, str) else specs target_class = specs if not isinstance(specs, str) else None specs = OutputRecorder(target_class=target_class, index=index, class_name=class_name) capture_tasks.append((key, specs)) for name, module in self.named_modules(): for key, specs in capture_tasks: # The second check is for multimodals where only backbone layer suffix is available if (specs.target_class is not None and isinstance(module, specs.target_class)) or ( specs.class_name is not None and name.endswith(specs.class_name) ): if specs.layer_name is not None and specs.layer_name not in name: continue # Monkey patch forward original_forward = module.forward module.forward = make_capture_wrapper(module, original_forward, key, specs.index) monkey_patched_layers.append((module, original_forward)) try: if kwargs.get("debug_io", False): with model_addition_debugger_context( self, kwargs.get("debug_io_dir", "model_debug"), kwargs.get("prune_layers") ): outputs = func(self, *args, **kwargs) else: outputs = func(self, *args, **kwargs) except TypeError as original_exception: # If we get a TypeError, it's possible that the model is not receiving the recordable kwargs correctly. # Get a TypeError even after removing the recordable kwargs -> re-raise the original exception # Otherwise -> we're probably missing `**kwargs` in the decorated function kwargs_without_recordable = {k: v for k, v in kwargs.items() if k not in recordable_keys} try: outputs = func(self, *args, **kwargs_without_recordable) except TypeError: raise original_exception raise TypeError( "Missing `**kwargs` in the signature of the `@check_model_inputs`-decorated function " f"({func.__qualname__})" ) # Restore original forward methods for module, original_forward in monkey_patched_layers: module.forward = original_forward # Inject collected outputs into model output for key in collected_outputs: if key == "hidden_states": if not tie_last_hidden_states: pass elif hasattr(outputs, "vision_hidden_states"): collected_outputs[key] = collected_outputs[key][:-1] collected_outputs[key] += (outputs.vision_hidden_states,) elif hasattr(outputs, "last_hidden_state"): collected_outputs[key] = collected_outputs[key][:-1] collected_outputs[key] += (outputs.last_hidden_state,) outputs[key] = collected_outputs[key] elif key == "attentions": if isinstance(capture_flags[key], list) and len(capture_flags[key]) == 2: outputs[key] = collected_outputs[key][0::2] outputs["cross_" + key] = collected_outputs[key][1::2] else: outputs[key] = collected_outputs[key] else: outputs[key] = collected_outputs[key] if return_dict is False: outputs = outputs.to_tuple() return outputs return wrapper return wrapped_fn
OutputRecorder
python
ijl__orjson
test/test_memory.py
{ "start": 975, "end": 8251 }
class ____: @pytest.mark.skipif(psutil is None, reason="psutil not installed") def test_memory_loads(self): """ loads() memory leak """ proc = psutil.Process() gc.collect() val = orjson.loads(FIXTURE) assert val mem = proc.memory_info().rss for _ in range(10000): val = orjson.loads(FIXTURE) assert val gc.collect() assert proc.memory_info().rss <= mem + MAX_INCREASE @pytest.mark.skipif(psutil is None, reason="psutil not installed") def test_memory_loads_memoryview(self): """ loads() memory leak using memoryview """ proc = psutil.Process() gc.collect() fixture = FIXTURE.encode("utf-8") val = orjson.loads(fixture) assert val mem = proc.memory_info().rss for _ in range(10000): val = orjson.loads(memoryview(fixture)) assert val gc.collect() assert proc.memory_info().rss <= mem + MAX_INCREASE @pytest.mark.skipif(psutil is None, reason="psutil not installed") def test_memory_dumps(self): """ dumps() memory leak """ proc = psutil.Process() gc.collect() fixture = orjson.loads(FIXTURE) val = orjson.dumps(fixture) assert val mem = proc.memory_info().rss for _ in range(10000): val = orjson.dumps(fixture) assert val gc.collect() assert proc.memory_info().rss <= mem + MAX_INCREASE assert proc.memory_info().rss <= mem + MAX_INCREASE @pytest.mark.skipif(psutil is None, reason="psutil not installed") def test_memory_loads_exc(self): """ loads() memory leak exception without a GC pause """ proc = psutil.Process() gc.disable() mem = proc.memory_info().rss n = 10000 i = 0 for _ in range(n): try: orjson.loads("") except orjson.JSONDecodeError: i += 1 assert n == i assert proc.memory_info().rss <= mem + MAX_INCREASE gc.enable() @pytest.mark.skipif(psutil is None, reason="psutil not installed") def test_memory_dumps_exc(self): """ dumps() memory leak exception without a GC pause """ proc = psutil.Process() gc.disable() data = Unsupported() mem = proc.memory_info().rss n = 10000 i = 0 for _ in range(n): try: orjson.dumps(data) except orjson.JSONEncodeError: i += 1 assert n == i assert proc.memory_info().rss <= mem + MAX_INCREASE gc.enable() @pytest.mark.skipif(psutil is None, reason="psutil not installed") def test_memory_dumps_default(self): """ dumps() default memory leak """ proc = psutil.Process() gc.collect() fixture = orjson.loads(FIXTURE) class Custom: def __init__(self, name): self.name = name def __str__(self): return f"{self.__class__.__name__}({self.name})" fixture["custom"] = Custom("orjson") val = orjson.dumps(fixture, default=default) mem = proc.memory_info().rss for _ in range(10000): val = orjson.dumps(fixture, default=default) assert val gc.collect() assert proc.memory_info().rss <= mem + MAX_INCREASE @pytest.mark.skipif(psutil is None, reason="psutil not installed") def test_memory_dumps_dataclass(self): """ dumps() dataclass memory leak """ proc = psutil.Process() gc.collect() val = orjson.dumps(DATACLASS_FIXTURE) assert val mem = proc.memory_info().rss for _ in range(100): val = orjson.dumps(DATACLASS_FIXTURE) assert val assert val gc.collect() assert proc.memory_info().rss <= mem + MAX_INCREASE @pytest.mark.skipif( psutil is None or pytz is None, reason="psutil not installed", ) def test_memory_dumps_pytz_tzinfo(self): """ dumps() pytz tzinfo memory leak """ proc = psutil.Process() gc.collect() dt = datetime.datetime.now() val = orjson.dumps(pytz.UTC.localize(dt)) assert val mem = proc.memory_info().rss for _ in range(50000): val = orjson.dumps(pytz.UTC.localize(dt)) assert val assert val gc.collect() assert proc.memory_info().rss <= mem + MAX_INCREASE @pytest.mark.skipif(psutil is None, reason="psutil not installed") def test_memory_loads_keys(self): """ loads() memory leak with number of keys causing cache eviction """ proc = psutil.Process() gc.collect() fixture = {f"key_{idx}": "value" for idx in range(1024)} assert len(fixture) == 1024 val = orjson.dumps(fixture) loaded = orjson.loads(val) assert loaded mem = proc.memory_info().rss for _ in range(100): loaded = orjson.loads(val) assert loaded gc.collect() assert proc.memory_info().rss <= mem + MAX_INCREASE @pytest.mark.skipif(psutil is None, reason="psutil not installed") @pytest.mark.skipif(numpy is None, reason="numpy is not installed") def test_memory_dumps_numpy(self): """ dumps() numpy memory leak """ proc = psutil.Process() gc.collect() fixture = numpy.random.rand(4, 4, 4) # type: ignore val = orjson.dumps(fixture, option=orjson.OPT_SERIALIZE_NUMPY) assert val mem = proc.memory_info().rss for _ in range(100): val = orjson.dumps(fixture, option=orjson.OPT_SERIALIZE_NUMPY) assert val assert val gc.collect() assert proc.memory_info().rss <= mem + MAX_INCREASE @pytest.mark.skipif(psutil is None, reason="psutil not installed") @pytest.mark.skipif(pandas is None, reason="pandas is not installed") def test_memory_dumps_pandas(self): """ dumps() pandas memory leak """ proc = psutil.Process() gc.collect() numpy.random.rand(4, 4, 4) # type: ignore df = pandas.Series(numpy.random.rand(4, 4, 4).tolist()) # type: ignore val = df.map(orjson.dumps) assert not val.empty mem = proc.memory_info().rss for _ in range(100): val = df.map(orjson.dumps) assert not val.empty gc.collect() assert proc.memory_info().rss <= mem + MAX_INCREASE @pytest.mark.skipif(psutil is None, reason="psutil not installed") def test_memory_dumps_fragment(self): """ dumps() Fragment memory leak """ proc = psutil.Process() gc.collect() orjson.dumps(orjson.Fragment(str(0))) mem = proc.memory_info().rss for i in range(10000): orjson.dumps(orjson.Fragment(str(i))) gc.collect() assert proc.memory_info().rss <= mem + MAX_INCREASE
TestMemory
python
prabhupant__python-ds
data_structures/linked_list/linked_list.py
{ "start": 94, "end": 1505 }
class ____(): def __init__(self): self.head = None def print_list(self): curr = self.head while curr: print(curr.val) curr = curr.next def insert_front(self, new_data): new_node = Node(new_data) new_node.next = self.head self.head = new_node def insert_after(self, prev_node, new_data): if prev_node is None: print("Previous node is absent!") return new_node = Node(new_data) new_node.next = prev_node.next prev_node.next = new_node def insert_end(self, new_data): if self.head is None: self.head = Node(new_data) return curr = self.head while curr.next: curr = curr.next curr.next = Node(new_data) def reverse(self): if self.head is None: return None curr = self.head prev = None while curr: next = curr.next curr.next = prev prev = curr curr = next self.head = prev if __name__ == '__main__': llist = LinkedList() llist.insert_end(1) llist.insert_end(2) llist.insert_end(3) llist.insert_after(llist.head.next.next, 4) llist.insert_end(5) llist.insert_front(0) llist.print_list() llist.reverse() llist.print_list()
LinkedList
python
numba__numba
numba/core/typing/mathdecl.py
{ "start": 2039, "end": 2254 }
class ____(ConcreteTemplate): cases = [ signature(types.float32, types.float32, types.float32), signature(types.float64, types.float64, types.float64), ] @infer_global(math.hypot)
Math_copysign
python
dask__distributed
distributed/_concurrent_futures_thread.py
{ "start": 1504, "end": 2864 }
class ____: def __init__(self, future, fn, args, kwargs): self.future = future self.fn = fn self.args = args self.kwargs = kwargs def run(self): if not self.future.set_running_or_notify_cancel(): # pragma: no cover return try: result = self.fn(*self.args, **self.kwargs) except BaseException as e: self.future.set_exception(e) else: self.future.set_result(result) def _worker(executor_reference, work_queue): try: while True: work_item = work_queue.get(block=True) if work_item is not None: work_item.run() # Delete references to object. See issue16284 del work_item continue executor = executor_reference() # Exit if: # - The interpreter is shutting down OR # - The executor that owns the worker has been collected OR # - The executor that owns the worker has been shutdown. if _shutdown or executor is None or executor._shutdown: # Notice other workers work_queue.put(None) return del executor except BaseException: _base.LOGGER.critical("Exception in worker", exc_info=True)
_WorkItem
python
airbytehq__airbyte
airbyte-integrations/connectors/source-hubspot/unit_tests/integrations/response_builder/pagination.py
{ "start": 165, "end": 451 }
class ____(PaginationStrategy): NEXT_PAGE_TOKEN = {"after": "256"} def update(self, response: Dict[str, Any]) -> None: response["paging"] = {"next": {"link": "link_to_the_next_page", **self.NEXT_PAGE_TOKEN}, "prev": {"before": None, "link": None}}
HubspotPaginationStrategy
python
spack__spack
lib/spack/spack/cmd/clean.py
{ "start": 537, "end": 4100 }
class ____(argparse.Action): """Activates flags -s -d -f -m -p and -b simultaneously""" def __call__(self, parser, namespace, values, option_string=None): parser.parse_args(["-sdfmpb"], namespace=namespace) def setup_parser(subparser: argparse.ArgumentParser) -> None: subparser.add_argument( "-s", "--stage", action="store_true", help="remove all temporary build stages (default)" ) subparser.add_argument( "-d", "--downloads", action="store_true", help="remove cached downloads" ) subparser.add_argument( "-f", "--failures", action="store_true", help="force removal of all install failure tracking markers", ) subparser.add_argument( "-m", "--misc-cache", action="store_true", help="remove long-lived caches, like the virtual package index", ) subparser.add_argument( "-p", "--python-cache", action="store_true", help="remove .pyc, .pyo files and __pycache__ folders", ) subparser.add_argument( "-b", "--bootstrap", action="store_true", help="remove software and configuration needed to bootstrap Spack", ) subparser.add_argument( "-a", "--all", action=AllClean, help="equivalent to ``-sdfmpb``", nargs=0 ) arguments.add_common_arguments(subparser, ["specs"]) def remove_python_cache(): for directory in [lib_path, var_path]: for root, dirs, files in os.walk(directory): for f in files: if f.endswith(".pyc") or f.endswith(".pyo"): fname = os.path.join(root, f) tty.debug("Removing {0}".format(fname)) os.remove(fname) for d in dirs: if d == "__pycache__": dname = os.path.join(root, d) tty.debug("Removing {0}".format(dname)) shutil.rmtree(dname) def clean(parser, args): # If nothing was set, activate the default if not any( [ args.specs, args.stage, args.downloads, args.failures, args.misc_cache, args.python_cache, args.bootstrap, ] ): args.stage = True # Then do the cleaning falling through the cases if args.specs: specs = spack.cmd.parse_specs(args.specs, concretize=False) specs = spack.cmd.matching_specs_from_env(specs) for spec in specs: msg = "Cleaning build stage [{0}]" tty.msg(msg.format(spec.short_spec)) spec.package.do_clean() if args.stage: tty.msg("Removing all temporary build stages") spack.stage.purge() if args.downloads: tty.msg("Removing cached downloads") spack.caches.FETCH_CACHE.destroy() if args.failures: tty.msg("Removing install failure marks") spack.store.STORE.failure_tracker.clear_all() if args.misc_cache: tty.msg("Removing cached information on repositories") spack.caches.MISC_CACHE.destroy() if args.python_cache: tty.msg("Removing python cache files") remove_python_cache() if args.bootstrap: bootstrap_prefix = spack.util.path.canonicalize_path(spack.config.get("bootstrap:root")) msg = 'Removing bootstrapped software and configuration in "{0}"' tty.msg(msg.format(bootstrap_prefix)) spack.llnl.util.filesystem.remove_directory_contents(bootstrap_prefix)
AllClean
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/class_interval.py
{ "start": 3037, "end": 3092 }
class ____(B6): def m0(self): super().m0()
D6
python
wandb__wandb
wandb/vendor/pygments/lexers/php.py
{ "start": 556, "end": 3135 }
class ____(RegexLexer): """ For `Zephir language <http://zephir-lang.com/>`_ source code. Zephir is a compiled high level language aimed to the creation of C-extensions for PHP. .. versionadded:: 2.0 """ name = 'Zephir' aliases = ['zephir'] filenames = ['*.zep'] zephir_keywords = ['fetch', 'echo', 'isset', 'empty'] zephir_type = ['bit', 'bits', 'string'] flags = re.DOTALL | re.MULTILINE tokens = { 'commentsandwhitespace': [ (r'\s+', Text), (r'//.*?\n', Comment.Single), (r'/\*.*?\*/', Comment.Multiline) ], 'slashstartsregex': [ include('commentsandwhitespace'), (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gim]+\b|\B)', String.Regex, '#pop'), default('#pop') ], 'badregex': [ (r'\n', Text, '#pop') ], 'root': [ (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'), include('commentsandwhitespace'), (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|' r'(<<|>>>?|==?|!=?|->|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(for|in|while|do|break|return|continue|switch|case|default|if|else|loop|' r'require|inline|throw|try|catch|finally|new|delete|typeof|instanceof|void|' r'namespace|use|extends|this|fetch|isset|unset|echo|fetch|likely|unlikely|' r'empty)\b', Keyword, 'slashstartsregex'), (r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'), (r'(abstract|boolean|bool|char|class|const|double|enum|export|extends|final|' r'native|goto|implements|import|int|string|interface|long|ulong|char|uchar|' r'float|unsigned|private|protected|public|short|static|self|throws|reverse|' r'transient|volatile)\b', Keyword.Reserved), (r'(true|false|null|undefined)\b', Keyword.Constant), (r'(Array|Boolean|Date|_REQUEST|_COOKIE|_SESSION|' r'_GET|_POST|_SERVER|this|stdClass|range|count|iterator|' r'window)\b', Name.Builtin), (r'[$a-zA-Z_][\w\\]*', Name.Other), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'0x[0-9a-fA-F]+', Number.Hex), (r'[0-9]+', Number.Integer), (r'"(\\\\|\\"|[^"])*"', String.Double), (r"'(\\\\|\\'|[^'])*'", String.Single), ] }
ZephirLexer
python
Pylons__pyramid
src/pyramid/registry.py
{ "start": 7476, "end": 9291 }
class ____(dict): order = 0 # mutated by introspector.add action_info = None # mutated by self.register def __init__(self, category_name, discriminator, title, type_name): self.category_name = category_name self.discriminator = discriminator self.title = title self.type_name = type_name self._relations = [] def relate(self, category_name, discriminator): self._relations.append((True, category_name, discriminator)) def unrelate(self, category_name, discriminator): self._relations.append((False, category_name, discriminator)) def _assert_resolved(self): assert undefer(self.discriminator) is self.discriminator @property def discriminator_hash(self): self._assert_resolved() return hash(self.discriminator) def __hash__(self): self._assert_resolved() return hash((self.category_name,) + (self.discriminator,)) def __repr__(self): self._assert_resolved() return '<{} category {!r}, discriminator {!r}>'.format( self.__class__.__name__, self.category_name, self.discriminator, ) def __bool__(self): return True def register(self, introspector, action_info): self.discriminator = undefer(self.discriminator) self.action_info = action_info introspector.add(self) for relate, category_name, discriminator in self._relations: discriminator = undefer(discriminator) if relate: method = introspector.relate else: method = introspector.unrelate method( (self.category_name, self.discriminator), (category_name, discriminator), )
Introspectable
python
pytorch__pytorch
torch/testing/_internal/common_fsdp.py
{ "start": 34032, "end": 40606 }
class ____(nn.Module): """ This can be used for returning multiple outputs from a module (``use_second_linear=True``) or for having an unused module (``False``). """ def __init__(self, dim: int, use_second_linear: bool = True): super().__init__() self.lin1 = nn.Linear(dim, dim) self.lin2 = nn.Linear(dim, dim) self.relu = nn.ReLU() self.use_second_linear = use_second_linear def forward( self, x: torch.Tensor ) -> Union[tuple[torch.Tensor, torch.Tensor], torch.Tensor]: if self.use_second_linear: return self.relu(self.lin1(x)), self.relu(self.lin2(x)) return self.relu(self.lin1(x)) # NOTE: For these patch methods, if we want safety under multi-threading (e.g. # when using multi-threaded process group), then we want: # (1) a barrier immediately after reading the original value to ensure that all # threads see the same original value # (2) a barrier immediately before restoring the original value to ensure that # all threads use the patched value inside the context @contextlib.contextmanager def patch_all_gather(new_all_gather_into_tensor: Callable): orig_all_gather = dist.all_gather_into_tensor dist.barrier() dist.all_gather_into_tensor = new_all_gather_into_tensor try: yield finally: dist.barrier() dist.all_gather_into_tensor = orig_all_gather @contextlib.contextmanager def patch_foreach_all_gather(new_foreach_all_gather: Callable): orig_foreach_all_gather = ( torch.distributed.fsdp._fully_shard._fsdp_param_group.foreach_all_gather ) dist.barrier() torch.distributed.fsdp._fully_shard._fsdp_param_group.foreach_all_gather = ( new_foreach_all_gather ) try: yield finally: dist.barrier() torch.distributed.fsdp._fully_shard._fsdp_param_group.foreach_all_gather = ( orig_foreach_all_gather ) @contextlib.contextmanager def patch_foreach_reduce(new_foreach_reduce: Callable): orig_foreach_foreach_reduce = ( torch.distributed.fsdp._fully_shard._fsdp_param_group.foreach_reduce ) dist.barrier() torch.distributed.fsdp._fully_shard._fsdp_param_group.foreach_reduce = ( new_foreach_reduce ) try: yield finally: dist.barrier() torch.distributed.fsdp._fully_shard._fsdp_param_group.foreach_reduce = ( orig_foreach_foreach_reduce ) @contextlib.contextmanager def patch_reduce_scatter(new_reduce_scatter_tensor: Callable): orig_reduce_scatter = dist.reduce_scatter_tensor dist.barrier() dist.reduce_scatter_tensor = new_reduce_scatter_tensor try: yield finally: dist.barrier() dist.reduce_scatter_tensor = orig_reduce_scatter @contextlib.contextmanager def patch_all_reduce(new_all_reduce: Callable): orig_all_reduce = dist.all_reduce dist.barrier() dist.all_reduce = new_all_reduce try: yield finally: dist.barrier() dist.all_reduce = orig_all_reduce @no_type_check @contextlib.contextmanager def patch_unshard(new_unshard: Callable): orig_unshard = FSDPParamGroup.unshard dist.barrier() FSDPParamGroup.unshard = new_unshard try: yield finally: dist.barrier() FSDPParamGroup.unshard = orig_unshard @no_type_check @contextlib.contextmanager def patch_reshard(new_reshard: Callable): orig_reshard = FSDPParamGroup.reshard dist.barrier() FSDPParamGroup.reshard = new_reshard try: yield finally: dist.barrier() FSDPParamGroup.reshard = orig_reshard @no_type_check @contextlib.contextmanager def patch_post_backward(new_post_backward: Callable): orig_post_backward = FSDPParamGroup.post_backward dist.barrier() FSDPParamGroup.post_backward = new_post_backward try: yield finally: dist.barrier() FSDPParamGroup.post_backward = orig_post_backward @no_type_check @contextlib.contextmanager def patch_register_post_backward_hook_backward(new_backward: Callable): orig_backward = RegisterPostBackwardFunction.backward dist.barrier() RegisterPostBackwardFunction.backward = new_backward try: yield finally: dist.barrier() RegisterPostBackwardFunction.backward = orig_backward def reduce_scatter_with_assert( cls, orig_reduce_scatter: Callable, assert_fn: Callable, # `assert_fn(output: Tensor)` *args: Any, **kwargs: Any, ): if len(args) > 0: output = args[0] elif "output" in kwargs: output = kwargs["output"] else: raise AssertionError( f"Cannot get reduce-scatter output from\nargs: {args}\nkwargs: {kwargs}" ) assert_fn(output) return orig_reduce_scatter(*args, **kwargs) def check_sharded_parity( cls, # unit test class replicated_module: nn.Module, sharded_module: nn.Module, prefixes_to_ignore: tuple[str, ...] = (), ): for (replicated_name, replicated_param), (sharded_name, sharded_param) in zip( replicated_module.named_parameters(), sharded_module.named_parameters(), strict=True, ): clean_sharded_name = sharded_name for prefix in prefixes_to_ignore: clean_sharded_name = clean_sharded_name.replace(prefix, "") cls.assertEqual(replicated_name, clean_sharded_name) cls.assertIsInstance(sharded_param, DTensor) assert isinstance(sharded_param, DTensor) # mypy mesh, placements = sharded_param.device_mesh, sharded_param.placements if tuple(placements) == (Shard(0), Shard(0)): raise AssertionError( "FSDP's (Shard(0), Shard(0)) layout differs from distribute_tensor(), " "so we cannot check for equality using it" ) sharded_ref_param = distribute_tensor(replicated_param, mesh, placements) cls.assertEqual(sharded_param.to_local(), sharded_ref_param.to_local()) if replicated_param.grad is None: cls.assertIsNone(sharded_param.grad) continue cls.assertIsNotNone(sharded_param.grad) sharded_ref_grad = distribute_tensor(replicated_param.grad, mesh, placements) cls.assertIsInstance(sharded_param.grad, DTensor) assert isinstance(sharded_param.grad, DTensor) # mypy cls.assertEqual(sharded_param.grad.to_local(), sharded_ref_grad.to_local()) @unittest.skipIf(TEST_XPU, "not-support-multithread")
DoubleLinear
python
pdm-project__pdm
src/pdm/cli/commands/list.py
{ "start": 12217, "end": 17028 }
class ____: """Wrapper makes sorting and exporting information about a Distribution easier. It also retrieves license information from dist-info metadata. https://packaging.python.org/en/latest/specifications/core-metadata """ # Fields that users are allowed to sort on. KEYS = frozenset(["name", "groups", "version", "homepage", "licenses", "location"]) def __init__(self, dist: im.Distribution, groups: set[str]): self.dist = dist self.name = dist.metadata.get("Name") self.groups = "|".join(groups) self.version = dist.metadata.get("Version") self.version = None if self.version == "UNKNOWN" else self.version self.homepage = dist.metadata.get("Home-Page") self.homepage = None if self.homepage == "UNKNOWN" else self.homepage # If the License metadata field is empty or UNKNOWN then try to # find the license in the Trove classifiers. There may be more than one # so generate a pipe separated list (to avoid complexity with CSV export). self.licenses = dist.metadata.get("License") self.licenses = None if self.licenses == "UNKNOWN" else self.licenses # Sometimes package metadata contains the full license text. # e.g. license = { file="LICENSE" } in pyproject.toml # To identify this, check for newlines or very long strings. # 50 chars is picked because the longest OSI license (WTFPL) full name is 43 characters. is_full_text = (self.licenses and "\n" in self.licenses) or len(self.licenses or "") > 50 # If that is the case, look at the classifiers instead. if not self.licenses or is_full_text: classifier_licenses = [v for v in dist.metadata.get_all("Classifier", []) if v.startswith("License")] alternatives = [parts.split("::") for parts in classifier_licenses] alternatives = [part[-1].strip() for part in alternatives if part] self.licenses = "|".join(alternatives) @property def location(self) -> str: return get_dist_location(self.dist) def license_files(self) -> list[im.PackagePath]: """Path to files inside the package that may contain license information or other legal notices. The implementation is a "best effort" and may contain errors, select incorrect information, or otherwise be error-prone. It is not a substitute for a lawyer. """ if not self.dist.files: return [] # Inconsistency between packages means that we check in several locations # for license files. There may be 0 or more of these. There may be false # positives & negatives. locations = ("**/LICENSE*", "**/LICENCE*", "**/COPYING*", "**/NOTICE*") # Compile a list of all file paths in the distribution that look like # they might contain a license file. paths = [] for path in self.dist.files: paths += [path for loc in locations if path.match(loc)] return paths def __getitem__(self, field: str) -> str: if field not in Listable.KEYS: raise PdmUsageError(f"list field `{field}` not in: {Listable.KEYS}") return getattr(self, field) def json(self, fields: Sequence[str]) -> dict: return {f: self[f] for f in fields} def rich(self, fields: Sequence[str]) -> Sequence[str]: output = [] for field in fields: data = f"{self[field]}" data = data if field != "name" else f"[req]{data}[/]" data = data if field != "version" else f"[warning]{data}[/]" data = data if field != "groups" else f"[error]{data}[/]" output.append(data) return output def markdown(self, fields: Sequence[str]) -> str: nl = "\n" section = "" # Heading section += f"## {self.name}{nl}" section += f"{nl}" # Table section += f"| Name | {self.name} |{nl}" section += f"|----|----|{nl}" for field in fields: if field == "name": continue section += f"| {field.capitalize()} | {self[field]} |{nl}" section += f"{nl}" # Files for path in self.license_files(): section += f"{path}{nl}" section += f"{nl}{nl}" section += f"````{nl}" try: section += path.read_text("utf-8") except UnicodeDecodeError: section += "Problem decoding file as UTF-8" except Exception as err: section += f"Problem finding license text: {err}" section += f"{nl}" section += f"````{nl}" section += f"{nl}" return section
Listable
python
kamyu104__LeetCode-Solutions
Python/minimize-string-length.py
{ "start": 42, "end": 198 }
class ____(object): def minimizedStringLength(self, s): """ :type s: str :rtype: int """ return len(set(s))
Solution
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_default_format01.py
{ "start": 315, "end": 1076 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("default_format01.xlsx") def test_create_file(self): """Test the creation of a file with user defined default format""" workbook = Workbook( self.got_filename, { "default_format_properties": {"font_name": "Calibri", "font_size": 11}, "default_row_height": 20, "default_column_width": 64, }, ) worksheet = workbook.add_worksheet() worksheet.insert_image("E9", self.image_dir + "red.png") workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
falconry__falcon
tests/test_http_method_routing.py
{ "start": 1416, "end": 2347 }
class ____: def __init__(self): self.called = False # Test non-callable attribute self.on_patch = {} # Field names ordered differently than in uri template def on_get(self, req, resp, sid, id): self.called = True self.req, self.resp = req, resp resp.status = falcon.HTTP_204 # Field names ordered the same as in uri template def on_head(self, req, resp, id, sid): self.called = True self.req, self.resp = req, resp resp.status = falcon.HTTP_204 def on_put(self, req, resp, id, sid): self.called = True self.req, self.resp = req, resp resp.status = falcon.HTTP_201 def on_report(self, req, resp, id, sid): self.called = True self.req, self.resp = req, resp resp.status = falcon.HTTP_204 def on_websocket(self, req, resp, id, sid): self.called = True
ThingsResource
python
run-llama__llama_index
llama-index-integrations/embeddings/llama-index-embeddings-ipex-llm/llama_index/embeddings/ipex_llm/base.py
{ "start": 930, "end": 5338 }
class ____(BaseEmbedding): max_length: int = Field( default=DEFAULT_HUGGINGFACE_LENGTH, description="Maximum length of input.", gt=0 ) normalize: bool = Field(default=True, description="Normalize embeddings or not.") query_instruction: Optional[str] = Field( description="Instruction to prepend to query text." ) text_instruction: Optional[str] = Field( description="Instruction to prepend to text." ) cache_folder: Optional[str] = Field( description="Cache folder for Hugging Face files." ) _model: Any = PrivateAttr() _device: str = PrivateAttr() def __init__( self, model_name: str = DEFAULT_HUGGINGFACE_EMBEDDING_MODEL, max_length: Optional[int] = None, query_instruction: Optional[str] = None, text_instruction: Optional[str] = None, normalize: bool = True, embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, cache_folder: Optional[str] = None, trust_remote_code: bool = False, device: str = "cpu", callback_manager: Optional[CallbackManager] = None, **model_kwargs, ): if device not in ["cpu", "xpu"] and not device.startswith("xpu:"): raise ValueError( "IpexLLMEmbedding currently only supports device to be 'cpu', 'xpu', " f"or 'xpu:<device_id>', but you have: {device}." ) device = device cache_folder = cache_folder or get_cache_dir() if model_name is None: raise ValueError("The `model_name` argument must be provided.") if not is_listed_model(model_name, BGE_MODELS): bge_model_list_str = ", ".join(BGE_MODELS) logger.warning( "IpexLLMEmbedding currently only provides optimization for " f"Hugging Face BGE models, which are: {bge_model_list_str}" ) model = SentenceTransformer( model_name, device=device, cache_folder=cache_folder, trust_remote_code=trust_remote_code, prompts={ "query": query_instruction or get_query_instruct_for_model_name(model_name), "text": text_instruction or get_text_instruct_for_model_name(model_name), }, **model_kwargs, ) # Apply ipex-llm optimizations model = _optimize_pre(self._model) model = _optimize_post(self._model) if device == "xpu": # TODO: apply `ipex_llm.optimize_model` model = model.half().to(device) if max_length: model.max_seq_length = max_length else: max_length = model.max_seq_length super().__init__( embed_batch_size=embed_batch_size, callback_manager=callback_manager, model_name=model_name, max_length=max_length, normalize=normalize, query_instruction=query_instruction, text_instruction=text_instruction, ) self._model = model self._device = device @classmethod def class_name(cls) -> str: return "IpexLLMEmbedding" def _embed( self, sentences: List[str], prompt_name: Optional[str] = None, ) -> List[List[float]]: """Embed sentences.""" return self._model.encode( sentences, batch_size=self.embed_batch_size, prompt_name=prompt_name, normalize_embeddings=self.normalize, ).tolist() def _get_query_embedding(self, query: str) -> List[float]: """Get query embedding.""" return self._embed(query, prompt_name="query") async def _aget_query_embedding(self, query: str) -> List[float]: """Get query embedding async.""" return self._get_query_embedding(query) async def _aget_text_embedding(self, text: str) -> List[float]: """Get text embedding async.""" return self._get_text_embedding(text) def _get_text_embedding(self, text: str) -> List[float]: """Get text embedding.""" return self._embed(text, prompt_name="text") def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]: """Get text embeddings.""" return self._embed(texts, prompt_name="text")
IpexLLMEmbedding
python
huggingface__transformers
tests/models/pix2struct/test_modeling_pix2struct.py
{ "start": 7677, "end": 11116 }
class ____: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=12, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, bos_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.d_kv = hidden_size // num_attention_heads self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return Pix2StructTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, d_kv=self.d_kv, ) def create_and_check_model(self, config, input_ids, input_mask): model = Pix2StructTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch
Pix2StructTextModelTester
python
huggingface__transformers
src/transformers/models/mpt/modeling_mpt.py
{ "start": 19835, "end": 25377 }
class ____(MptPreTrainedModel): def __init__(self, config: MptConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = MptModel(config) self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() def set_output_embeddings(self, new_embeddings: torch.Tensor): self.score = new_embeddings @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], SequenceClassifierOutputWithPast]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring
MptForSequenceClassification
python
ray-project__ray
python/ray/util/client/common.py
{ "start": 18246, "end": 20174 }
class ____(ClientStub): """A stub for a method on a remote actor. Can be annotated with execution options. Args: actor_handle: A reference to the ClientActorHandle that generated this method and will have this method called upon it. method_name: The name of this method """ def __init__( self, actor_handle: ClientActorHandle, method_name: str, num_returns: int, signature: inspect.Signature, ): self._actor_handle = actor_handle self._method_name = method_name self._method_num_returns = num_returns self._signature = signature def __call__(self, *args, **kwargs): raise TypeError( "Actor methods cannot be called directly. Instead " f"of running 'object.{self._method_name}()', try " f"'object.{self._method_name}.remote()'." ) def remote(self, *args, **kwargs): self._signature.bind(*args, **kwargs) return return_refs(ray.call_remote(self, *args, **kwargs)) def __repr__(self): return "ClientRemoteMethod(%s, %s, %s)" % ( self._method_name, self._actor_handle, self._method_num_returns, ) def options(self, **kwargs): return OptionWrapper(self, kwargs) def _remote(self, args=None, kwargs=None, **option_args): if args is None: args = [] if kwargs is None: kwargs = {} return self.options(**option_args).remote(*args, **kwargs) def _prepare_client_task(self) -> ray_client_pb2.ClientTask: task = ray_client_pb2.ClientTask() task.type = ray_client_pb2.ClientTask.METHOD task.name = self._method_name task.payload_id = self._actor_handle.actor_ref.id return task def _num_returns(self) -> int: return self._method_num_returns
ClientRemoteMethod
python
rq__rq
rq/group.py
{ "start": 374, "end": 4845 }
class ____: """A Group is a container for tracking multiple jobs with a single identifier.""" REDIS_GROUP_NAME_PREFIX = 'rq:group:' REDIS_GROUP_KEY = 'rq:groups' def __init__(self, connection: Redis, name: Optional[str] = None): self.name = name if name else str(uuid4().hex) self.connection = connection self.key = f'{self.REDIS_GROUP_NAME_PREFIX}{self.name}' def __repr__(self): return f'Group(id={self.name})' def _add_jobs(self, jobs: Iterable[Job], pipeline: Pipeline): """Add jobs to the group""" pipeline.sadd(self.key, *[job.id for job in jobs]) pipeline.sadd(self.REDIS_GROUP_KEY, self.name) pipeline.execute() def cleanup(self): """Delete jobs from the group's job registry that have been deleted or expired from Redis. We assume while running this that alive jobs have all been fetched from Redis in fetch_jobs method""" with self.connection.pipeline() as pipe: # Use a new pipeline job_ids = [as_text(job) for job in list(self.connection.smembers(self.key))] if not job_ids: return expired_job_ids = [] for job in job_ids: pipe.exists(Job.key_for(job)) results = pipe.execute() for i, key_exists in enumerate(results): if not key_exists: expired_job_ids.append(job_ids[i]) if expired_job_ids: pipe.srem(self.key, *expired_job_ids) pipe.execute() def enqueue_many(self, queue: Queue, job_datas: Iterable['EnqueueData'], pipeline: Optional['Pipeline'] = None): pipe = pipeline if pipeline else self.connection.pipeline() jobs = queue.enqueue_many(job_datas, group_id=self.name, pipeline=pipe) self._add_jobs(jobs, pipeline=pipe) if pipeline is None: pipe.execute() return jobs def get_jobs(self) -> list: """Retrieve list of job IDs from the group key in Redis""" self.cleanup() job_ids = [as_text(job) for job in self.connection.smembers(self.key)] return [job for job in Job.fetch_many(job_ids, self.connection) if job is not None] def delete_job(self, job_id: str, pipeline: Optional['Pipeline'] = None): pipe = pipeline if pipeline else self.connection.pipeline() pipe.srem(self.key, job_id) if pipeline is None: pipe.execute() @classmethod def create(cls, connection: Redis, name: Optional[str] = None): return cls(name=name, connection=connection) @classmethod def fetch(cls, name: str, connection: Redis): """Fetch an existing group from Redis""" group = cls(name=name, connection=connection) if not connection.exists(Group.get_key(group.name)): raise NoSuchGroupError return group @classmethod def all(cls, connection: 'Redis') -> list['Group']: "Returns an iterable of all Groups." group_keys = [as_text(key) for key in connection.smembers(cls.REDIS_GROUP_KEY)] groups = [] for key in group_keys: try: groups.append(cls.fetch(key, connection=connection)) except NoSuchGroupError: connection.srem(cls.REDIS_GROUP_KEY, key) return groups @classmethod def get_key(cls, name: str) -> str: """Return the Redis key of the set containing a group's jobs""" return cls.REDIS_GROUP_NAME_PREFIX + name @classmethod def clean_registries(cls, connection: 'Redis'): """Loop through groups and delete those that have been deleted. If group still has jobs in its registry, delete those that have expired""" groups = Group.all(connection=connection) with connection.pipeline() as p: # Remove expired jobs from groups for group in groups: group.cleanup() p.execute() # Remove empty groups from group registry for group in groups: p.exists(group.key) results = p.execute() expired_group_ids = [] for i, key_exists in enumerate(results): if not key_exists: expired_group_ids.append(groups[i].name) if expired_group_ids: p.srem(cls.REDIS_GROUP_KEY, *expired_group_ids) p.execute()
Group
python
django__django
django/contrib/gis/db/models/functions.py
{ "start": 8597, "end": 8668 }
class ____(GeoFunc): output_field = BinaryField() arity = 1
AsWKB
python
getsentry__sentry
src/sentry/sentry_apps/models/sentry_app_avatar.py
{ "start": 472, "end": 552 }
class ____(StrEnum): ICON = "icon" LOGO = "logo"
SentryAppAvatarPhotoTypes
python
django__django
tests/fixtures_regress/models.py
{ "start": 6917, "end": 7015 }
class ____(BaseNKModel): a_set = models.ManyToManyField("M2MSimpleCircularA")
M2MSimpleCircularB
python
PyCQA__pylint
pylint/config/callback_actions.py
{ "start": 2591, "end": 3828 }
class ____(_CallbackAction): """Display the help message of a message.""" def __init__( self, option_strings: Sequence[str], dest: str, nargs: None = None, const: None = None, default: None = None, type: None = None, choices: None = None, required: bool = False, help: str = "", metavar: str = "", **kwargs: Run, ) -> None: self.run = kwargs["Run"] super().__init__( option_strings, dest, "+", const, default, type, choices, required, help, metavar, ) def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: str | Sequence[str] | None, option_string: str | None = "--help-msg", ) -> None: assert isinstance(values, (list, tuple)) values_to_print: list[str] = [] for msg in values: assert isinstance(msg, str) values_to_print += utils._check_csv(msg) self.run.linter.msgs_store.help_message(values_to_print) sys.exit(0)
_MessageHelpAction
python
openai__openai-python
src/openai/_response.py
{ "start": 19309, "end": 19559 }
class ____(TypeError): def __init__(self) -> None: super().__init__( "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `openai._streaming` for reference", )
MissingStreamClassError
python
fluentpython__example-code-2e
20-executors/getflags/slow_server.py
{ "start": 640, "end": 3986 }
class ____(SimpleHTTPRequestHandler): """SlowHTTPRequestHandler adds delays and errors to test HTTP clients. The optional error_rate argument determines how often GET requests receive a 418 status code, "I'm a teapot". If error_rate is .15, there's a 15% probability of each GET request getting that error. When the server believes it is a teapot, it refuses requests to serve files. See: https://tools.ietf.org/html/rfc2324#section-2.3.2 """ def __init__(self, *args, error_rate=0.0, **kwargs): self.error_rate = error_rate super().__init__(*args, **kwargs) def do_GET(self): """Serve a GET request.""" delay = uniform(MIN_DELAY, MAX_DELAY) cc = self.path[-6:-4].upper() print(f'{cc} delay: {delay:0.2}s') time.sleep(delay) if random() < self.error_rate: # HTTPStatus.IM_A_TEAPOT requires Python >= 3.9 try: self.send_error(HTTPStatus.IM_A_TEAPOT, "I'm a Teapot") except BrokenPipeError as exc: print(f'{cc} *** BrokenPipeError: client closed') else: f = self.send_head() if f: try: self.copyfile(f, self.wfile) except BrokenPipeError as exc: print(f'{cc} *** BrokenPipeError: client closed') finally: f.close() # The code in the `if` block below, including comments, was copied # and adapted from the `http.server` module of Python 3.9 # https://github.com/python/cpython/blob/master/Lib/http/server.py if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--bind', '-b', metavar='ADDRESS', help='Specify alternate bind address ' '[default: all interfaces]') parser.add_argument('--directory', '-d', default=os.getcwd(), help='Specify alternative directory ' '[default:current directory]') parser.add_argument('--error-rate', '-e', metavar='PROBABILITY', default=0.0, type=float, help='Error rate; e.g. use .25 for 25%% probability ' '[default:0.0]') parser.add_argument('port', action='store', default=8001, type=int, nargs='?', help='Specify alternate port [default: 8001]') args = parser.parse_args() handler_class = partial(SlowHTTPRequestHandler, directory=args.directory, error_rate=args.error_rate) # ensure dual-stack is not disabled; ref #38907 class DualStackServer(ThreadingHTTPServer): def server_bind(self): # suppress exception when protocol is IPv4 with contextlib.suppress(Exception): self.socket.setsockopt( socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) return super().server_bind() # test is a top-level function in http.server omitted from __all__ server.test( # type: ignore HandlerClass=handler_class, ServerClass=DualStackServer, port=args.port, bind=args.bind, )
SlowHTTPRequestHandler
python
numba__numba
numba/tests/test_parallel_backend.py
{ "start": 19225, "end": 23921 }
class ____(ThreadLayerTestHelper): """ Checks fixes for the issues with threading backends implementation """ _DEBUG = False @skip_no_omp def test_omp_stack_overflow(self): """ Tests that OMP does not overflow stack """ runme = """if 1: from numba import vectorize, threading_layer import numpy as np @vectorize(['f4(f4,f4,f4,f4,f4,f4,f4,f4)'], target='parallel') def foo(a, b, c, d, e, f, g, h): return a+b+c+d+e+f+g+h x = np.ones(2**20, np.float32) foo(*([x]*8)) assert threading_layer() == "omp", "omp not found" """ cmdline = [sys.executable, '-c', runme] env = os.environ.copy() env['NUMBA_THREADING_LAYER'] = "omp" env['OMP_STACKSIZE'] = "100K" self.run_cmd(cmdline, env=env) @skip_no_tbb def test_single_thread_tbb(self): """ Tests that TBB works well with single thread https://github.com/numba/numba/issues/3440 """ runme = """if 1: from numba import njit, prange, threading_layer @njit(parallel=True) def foo(n): acc = 0 for i in prange(n): acc += i return acc foo(100) assert threading_layer() == "tbb", "tbb not found" """ cmdline = [sys.executable, '-c', runme] env = os.environ.copy() env['NUMBA_THREADING_LAYER'] = "tbb" env['NUMBA_NUM_THREADS'] = "1" self.run_cmd(cmdline, env=env) def test_workqueue_aborts_on_nested_parallelism(self): """ Tests workqueue raises sigabrt if a nested parallel call is performed """ runme = """if 1: from numba import njit, prange import numpy as np @njit(parallel=True) def nested(x): for i in prange(len(x)): x[i] += 1 @njit(parallel=True) def main(): Z = np.zeros((5, 10)) for i in prange(Z.shape[0]): nested(Z[i]) return Z main() """ cmdline = [sys.executable, '-c', runme] env = os.environ.copy() env['NUMBA_THREADING_LAYER'] = "workqueue" env['NUMBA_NUM_THREADS'] = "4" try: out, err = self.run_cmd(cmdline, env=env) except AssertionError as e: if self._DEBUG: print(out, err) e_msg = str(e) self.assertIn("failed with code", e_msg) # raised a SIGABRT, but the value is platform specific so just check # the error message expected = ("Numba workqueue threading layer is terminating: " "Concurrent access has been detected.") self.assertIn(expected, e_msg) @unittest.skipUnless(_HAVE_OS_FORK, "Test needs fork(2)") def test_workqueue_handles_fork_from_non_main_thread(self): # For context see #7872, but essentially the multiprocessing pool # implementation has a number of Python threads for handling the worker # processes, one of which calls fork(2), this results in a fork from a # non-main thread. runme = """if 1: from numba import njit, prange, threading_layer import numpy as np import multiprocessing if __name__ == "__main__": # Need for force fork context (OSX default is "spawn") multiprocessing.set_start_method('fork') @njit(parallel=True) def func(x): return 10. * x arr = np.arange(2.) # run in single process to start Numba's thread pool np.testing.assert_allclose(func(arr), func.py_func(arr)) # now run in a multiprocessing pool to get a fork from a # non-main thread with multiprocessing.Pool(10) as p: result = p.map(func, [arr]) np.testing.assert_allclose(result, func.py_func(np.expand_dims(arr, 0))) assert threading_layer() == "workqueue" """ cmdline = [sys.executable, '-c', runme] env = os.environ.copy() env['NUMBA_THREADING_LAYER'] = "workqueue" env['NUMBA_NUM_THREADS'] = "4" self.run_cmd(cmdline, env=env) # 32bit or windows py27 (not that this runs on windows) @skip_parfors_unsupported @skip_unless_gnu_omp
TestMiscBackendIssues
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_dash_address.py
{ "start": 892, "end": 1896 }
class ____(ColumnMapMetricProvider): # This is the id string that will be used to reference your metric. condition_metric_name = "column_values.valid_dash_address" # This method implements the core logic for the PandasExecutionEngine @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): return column.apply(lambda x: is_valid_dash_address(x)) # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @column_condition_partial(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy(cls, column, _dialect, **kwargs): # raise NotImplementedError # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine # @column_condition_partial(engine=SparkDFExecutionEngine) # def _spark(cls, column, **kwargs): # raise NotImplementedError # This class defines the Expectation itself
ColumnValuesToBeValidDashAddress
python
getsentry__sentry
src/sentry/workflow_engine/types.py
{ "start": 9062, "end": 9647 }
class ____: config_schema: ClassVar[dict[str, Any]] data_schema: ClassVar[dict[str, Any]] class Group(StrEnum): NOTIFICATION = "notification" TICKET_CREATION = "ticket_creation" OTHER = "other" group: ClassVar[Group] @classmethod def get_config_transformer(cls) -> ConfigTransformer | None: return None @staticmethod def execute(event_data: WorkflowEventData, action: Action, detector: Detector) -> None: # TODO - do we need to pass all of this data to an action? raise NotImplementedError
ActionHandler
python
pypa__warehouse
tests/unit/test_predicates.py
{ "start": 640, "end": 1442 }
class ____: @pytest.mark.parametrize( ("value", "expected"), [(None, "domain = None"), ("pypi.io", "domain = {!r}".format("pypi.io"))], ) def test_text(self, value, expected): predicate = DomainPredicate(value, None) assert predicate.text() == expected assert predicate.phash() == expected def test_when_not_set(self): predicate = DomainPredicate(None, None) assert predicate(None, None) def test_valid_value(self): predicate = DomainPredicate("upload.pypi.io", None) assert predicate(None, pretend.stub(domain="upload.pypi.io")) def test_invalid_value(self): predicate = DomainPredicate("upload.pyp.io", None) assert not predicate(None, pretend.stub(domain="pypi.io"))
TestDomainPredicate
python
getsentry__sentry
src/sentry/dynamic_sampling/rules/biases/recalibration_bias.py
{ "start": 387, "end": 1865 }
class ____(Bias): """ Correction bias that tries to bring the overall sampling rate for the organization to the desired sampling rate. Various biases boost and shrink different transactions in order to obtain an appropriate number of samples from all areas of the application, doing this changes the overall sampling rate from the desired sampling rate, this bias tries to rectify the overall organization sampling rate and bring it to the desired sampling rate,it uses the previous interval rate to figure out how this should be done. """ def generate_rules(self, project: Project, base_sample_rate: float) -> list[PolymorphicRule]: if is_project_mode_sampling(project.organization): adjusted_factor = get_adjusted_project_factor(project.id) else: adjusted_factor = get_adjusted_factor(project.organization.id) # We don't want to generate any rule in case the factor is 1.0 since we should multiply the factor and 1.0 # is the identity of the multiplication. if adjusted_factor == 1.0: return [] return [ { "samplingValue": {"type": "factor", "value": adjusted_factor}, "type": "trace", "condition": { "op": "and", "inner": [], }, "id": RESERVED_IDS[RuleType.RECALIBRATION_RULE], } ]
RecalibrationBias
python
networkx__networkx
networkx/algorithms/tests/test_threshold.py
{ "start": 421, "end": 9733 }
class ____: def test_threshold_sequence_graph_test(self): G = nx.star_graph(10) assert nxt.is_threshold_graph(G) assert nxt.is_threshold_sequence([d for n, d in G.degree()]) G = nx.complete_graph(10) assert nxt.is_threshold_graph(G) assert nxt.is_threshold_sequence([d for n, d in G.degree()]) deg = [3, 2, 2, 1, 1, 1] assert not nxt.is_threshold_sequence(deg) deg = [3, 2, 2, 1] assert nxt.is_threshold_sequence(deg) G = nx.generators.havel_hakimi_graph(deg) assert nxt.is_threshold_graph(G) def test_creation_sequences(self): deg = [3, 2, 2, 1] G = nx.generators.havel_hakimi_graph(deg) with pytest.raises(ValueError): nxt.creation_sequence(deg, with_labels=True, compact=True) cs0 = nxt.creation_sequence(deg) H0 = nxt.threshold_graph(cs0) assert "".join(cs0) == "ddid" cs1 = nxt.creation_sequence(deg, with_labels=True) H1 = nxt.threshold_graph(cs1) assert cs1 == [(1, "d"), (2, "d"), (3, "i"), (0, "d")] cs2 = nxt.creation_sequence(deg, compact=True) H2 = nxt.threshold_graph(cs2) assert cs2 == [2, 1, 1] assert "".join(nxt.uncompact(cs2)) == "ddid" assert nx.could_be_isomorphic(H0, G) assert nx.could_be_isomorphic(H0, H1) assert nx.could_be_isomorphic(H0, H2) def test_make_compact(self): assert nxt.make_compact(["d", "d", "d", "i", "d", "d"]) == [3, 1, 2] assert nxt.make_compact([3, 1, 2]) == [3, 1, 2] pytest.raises(TypeError, nxt.make_compact, [3.0, 1.0, 2.0]) def test_uncompact(self): assert nxt.uncompact([3, 1, 2]) == ["d", "d", "d", "i", "d", "d"] assert nxt.uncompact(["d", "d", "i", "d"]) == ["d", "d", "i", "d"] assert nxt.uncompact( nxt.uncompact([(1, "d"), (2, "d"), (3, "i"), (0, "d")]) ) == nxt.uncompact([(1, "d"), (2, "d"), (3, "i"), (0, "d")]) pytest.raises(TypeError, nxt.uncompact, [3.0, 1.0, 2.0]) def test_creation_sequence_to_weights(self): assert nxt.creation_sequence_to_weights([3, 1, 2]) == [ 0.5, 0.5, 0.5, 0.25, 0.75, 0.75, ] pytest.raises(TypeError, nxt.creation_sequence_to_weights, [3.0, 1.0, 2.0]) def test_weights_to_creation_sequence(self): deg = [3, 2, 2, 1] with pytest.raises(ValueError): nxt.weights_to_creation_sequence(deg, with_labels=True, compact=True) assert nxt.weights_to_creation_sequence(deg, with_labels=True) == [ (3, "d"), (1, "d"), (2, "d"), (0, "d"), ] assert nxt.weights_to_creation_sequence(deg, compact=True) == [4] def test_find_alternating_4_cycle(self): G = nx.Graph() G.add_edge(1, 2) assert not nxt.find_alternating_4_cycle(G) def test_shortest_path(self): deg = [3, 2, 2, 1] G = nx.generators.havel_hakimi_graph(deg) cs1 = nxt.creation_sequence(deg, with_labels=True) for n, m in [(3, 0), (0, 3), (0, 2), (0, 1), (1, 3), (3, 1), (1, 2), (2, 3)]: assert nxt.shortest_path(cs1, n, m) == nx.shortest_path(G, n, m) spl = nxt.shortest_path_length(cs1, 3) spl2 = nxt.shortest_path_length([t for v, t in cs1], 2) assert spl == spl2 spld = {} for j, pl in enumerate(spl): n = cs1[j][0] spld[n] = pl assert spld == nx.single_source_shortest_path_length(G, 3) assert nxt.shortest_path(["d", "d", "d", "i", "d", "d"], 1, 2) == [1, 2] assert nxt.shortest_path([3, 1, 2], 1, 2) == [1, 2] pytest.raises(TypeError, nxt.shortest_path, [3.0, 1.0, 2.0], 1, 2) pytest.raises(ValueError, nxt.shortest_path, [3, 1, 2], "a", 2) pytest.raises(ValueError, nxt.shortest_path, [3, 1, 2], 1, "b") assert nxt.shortest_path([3, 1, 2], 1, 1) == [1] def test_shortest_path_length(self): assert nxt.shortest_path_length([3, 1, 2], 1) == [1, 0, 1, 2, 1, 1] assert nxt.shortest_path_length(["d", "d", "d", "i", "d", "d"], 1) == [ 1, 0, 1, 2, 1, 1, ] assert nxt.shortest_path_length(("d", "d", "d", "i", "d", "d"), 1) == [ 1, 0, 1, 2, 1, 1, ] pytest.raises(TypeError, nxt.shortest_path, [3.0, 1.0, 2.0], 1) def test_random_threshold_sequence(self): assert len(nxt.random_threshold_sequence(10, 0.5)) == 10 assert nxt.random_threshold_sequence(10, 0.5, seed=42) == [ "d", "i", "d", "d", "d", "i", "i", "i", "d", "d", ] pytest.raises(ValueError, nxt.random_threshold_sequence, 10, 1.5) def test_right_d_threshold_sequence(self): assert nxt.right_d_threshold_sequence(3, 2) == ["d", "i", "d"] pytest.raises(ValueError, nxt.right_d_threshold_sequence, 2, 3) def test_left_d_threshold_sequence(self): assert nxt.left_d_threshold_sequence(3, 2) == ["d", "i", "d"] pytest.raises(ValueError, nxt.left_d_threshold_sequence, 2, 3) def test_weights_thresholds(self): wseq = [3, 4, 3, 3, 5, 6, 5, 4, 5, 6] cs = nxt.weights_to_creation_sequence(wseq, threshold=10) wseq = nxt.creation_sequence_to_weights(cs) cs2 = nxt.weights_to_creation_sequence(wseq) assert cs == cs2 wseq = nxt.creation_sequence_to_weights(nxt.uncompact([3, 1, 2, 3, 3, 2, 3])) assert wseq == [ s * 0.125 for s in [4, 4, 4, 3, 5, 5, 2, 2, 2, 6, 6, 6, 1, 1, 7, 7, 7] ] wseq = nxt.creation_sequence_to_weights([3, 1, 2, 3, 3, 2, 3]) assert wseq == [ s * 0.125 for s in [4, 4, 4, 3, 5, 5, 2, 2, 2, 6, 6, 6, 1, 1, 7, 7, 7] ] wseq = nxt.creation_sequence_to_weights(list(enumerate("ddidiiidididi"))) assert wseq == [s * 0.1 for s in [5, 5, 4, 6, 3, 3, 3, 7, 2, 8, 1, 9, 0]] wseq = nxt.creation_sequence_to_weights("ddidiiidididi") assert wseq == [s * 0.1 for s in [5, 5, 4, 6, 3, 3, 3, 7, 2, 8, 1, 9, 0]] wseq = nxt.creation_sequence_to_weights("ddidiiidididid") ws = [s / 12 for s in [6, 6, 5, 7, 4, 4, 4, 8, 3, 9, 2, 10, 1, 11]] assert sum(abs(c - d) for c, d in zip(wseq, ws)) < 1e-14 def test_finding_routines(self): G = nx.Graph({1: [2], 2: [3], 3: [4], 4: [5], 5: [6]}) G.add_edge(2, 4) G.add_edge(2, 5) G.add_edge(2, 7) G.add_edge(3, 6) G.add_edge(4, 6) # Alternating 4 cycle assert nxt.find_alternating_4_cycle(G) == [1, 2, 3, 6] # Threshold graph TG = nxt.find_threshold_graph(G) assert nxt.is_threshold_graph(TG) assert sorted(TG.nodes()) == [1, 2, 3, 4, 5, 7] cs = nxt.creation_sequence(dict(TG.degree()), with_labels=True) assert nxt.find_creation_sequence(G) == cs def test_fast_versions_properties_threshold_graphs(self): cs = "ddiiddid" G = nxt.threshold_graph(cs) assert nxt.density("ddiiddid") == nx.density(G) assert sorted(nxt.degree_sequence(cs)) == sorted(d for n, d in G.degree()) ts = nxt.triangle_sequence(cs) assert ts == list(nx.triangles(G).values()) assert sum(ts) // 3 == nxt.triangles(cs) c1 = nxt.cluster_sequence(cs) c2 = list(nx.clustering(G).values()) assert sum(abs(c - d) for c, d in zip(c1, c2)) == pytest.approx(0, abs=1e-7) b1 = nx.betweenness_centrality(G).values() b2 = nxt.betweenness_sequence(cs) assert sum(abs(c - d) for c, d in zip(b1, b2)) < 1e-7 assert nxt.eigenvalues(cs) == [0, 1, 3, 3, 5, 7, 7, 8] # Degree Correlation assert abs(nxt.degree_correlation(cs) + 0.593038821954) < 1e-12 assert nxt.degree_correlation("diiiddi") == -0.8 assert nxt.degree_correlation("did") == -1.0 assert nxt.degree_correlation("ddd") == 1.0 assert nxt.eigenvalues("dddiii") == [0, 0, 0, 0, 3, 3] assert nxt.eigenvalues("dddiiid") == [0, 1, 1, 1, 4, 4, 7] def test_tg_creation_routines(self): s = nxt.left_d_threshold_sequence(5, 7) s = nxt.right_d_threshold_sequence(5, 7) def test_eigenvectors(self): np = pytest.importorskip("numpy") eigenval = np.linalg.eigvals pytest.importorskip("scipy") cs = "ddiiddid" G = nxt.threshold_graph(cs) (tgeval, tgevec) = nxt.eigenvectors(cs) np.testing.assert_allclose([np.dot(lv, lv) for lv in tgevec], 1.0, rtol=1e-9) lapl = nx.laplacian_matrix(G) def test_create_using(self): cs = "ddiiddid" G = nxt.threshold_graph(cs) pytest.raises( nx.exception.NetworkXError, nxt.threshold_graph, cs, create_using=nx.DiGraph(), ) MG = nxt.threshold_graph(cs, create_using=nx.MultiGraph()) assert sorted(MG.edges()) == sorted(G.edges())
TestGeneratorThreshold
python
pytorch__pytorch
torch/_inductor/template_heuristics/triton.py
{ "start": 58047, "end": 62582 }
class ____(GemmMaxAutotuneTemplateConfigHeuristics): """ Mixin class that converts config lists to template kwargs. This handles the logic that was previously in choices.get_mm_configs. This mixin expects to be used with BaseConfigHeuristic or its subclasses. """ # Type annotations to ensure the mixin works with BaseConfigHeuristic get_mm_configs: Callable[[], partial[Generator[TritonConfig, None, None]]] get_exhaustive_mm_configs: Callable[ [], partial[Generator[TritonConfig, None, None]] ] _filter_configs: Callable[[list[BaseConfig]], list[BaseConfig]] def get_extra_kwargs( self, kernel_inputs: KernelInputs, op_name: str, ) -> dict[str, Any]: assert isinstance(kernel_inputs, MMKernelInputs) m, n, k = kernel_inputs.mnk_symbolic() # Calculate allow_tf32 allow_tf32 = torch.backends.cuda.matmul.allow_tf32 and ( not inductor_config.force_same_precision or ((m % 16) == 0 and (n % 16) == 0 and (k % 8) == 0) ) return { "ALLOW_TF32": allow_tf32, } def _valid(self, kernel_inputs: KernelInputs) -> bool: return True def _get_config_generator( self, ) -> partial[Generator[TritonConfig, None, None]]: """ Get the appropriate config generator based on search space. Can be overridden by subclasses for template-specific behavior. """ # Handle exhaustive search case if config.max_autotune_gemm_search_space == "EXHAUSTIVE": return self.get_exhaustive_mm_configs() else: return self.get_mm_configs() def _get_template_configs_impl( self, kernel_inputs: KernelInputs, op_name: str, ) -> Generator[dict[str, Any], None, None]: """ Convert config lists to template kwargs. This replaces the logic from choices.get_mm_configs and inlines mm_options. """ assert isinstance(kernel_inputs, MMKernelInputs), ( f"{self.__class__.__name__} requires MMKernelInputs" ) input_nodes = kernel_inputs.nodes() if len(input_nodes) < 2: raise ValueError(f"Need at least 2 input tensors, got {len(input_nodes)}") if not self._valid(kernel_inputs): return # Extract M, N, K from kernel_inputs m, n, k = kernel_inputs.mnk_symbolic() # Extract dtype and device_type from kernel_inputs dtype = kernel_inputs.dtype() # Get the appropriate config generator configs = self._get_config_generator() # Generate and process configs for c in configs(m, n, k, dtype_size=dtype.itemsize, op_name=op_name): template_kwargs = self._convert_config_to_template_kwargs( c, m, n, k, kernel_inputs.out_dtype(), ) yield template_kwargs def _convert_config_to_template_kwargs( self, triton_config: TritonConfig, m: sympy.Integer, n: sympy.Integer, k: sympy.Integer, out_dtype: torch.dtype, ) -> dict[str, Any]: """ Convert triton config to template kwargs. Moved from mm_common.mm_options. """ # Calculate EVEN_K symbolic even_k_symbolic = ( # it isn't worth guarding on this sympy.gcd(k, triton_config.kwargs["BLOCK_K"]) == triton_config.kwargs["BLOCK_K"] ) # Build options dict options_dict = dict( EVEN_K=even_k_symbolic, USE_FAST_ACCUM=False, # Option for _scaled_mm ACC_TYPE=self._get_acc_type(out_dtype), num_stages=triton_config.num_stages, num_warps=triton_config.num_warps, **triton_config.kwargs, ) # If GROUP_M not specified then default to 8 if "GROUP_M" not in triton_config.kwargs: group_m = triton_config.kwargs.get("GROUP_M", 8) options_dict["GROUP_M"] = group_m return options_dict def _get_acc_type(self, dtype: torch.dtype) -> str: """ Get accumulator type for the given dtype. Moved from mm_common.acc_type. """ if dtype in (torch.float16, torch.bfloat16): return "tl.float32" return f"tl.{dtype}".replace("torch.", "") # INT8 specific mixin to filter correctly
MMTemplateConfigMixin
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-motherduck/destination_motherduck/processors/motherduck.py
{ "start": 998, "end": 2726 }
class ____(DuckDBConfig): """Configuration for the MotherDuck cache.""" database: str = Field() api_key: SecretString = Field() db_path: str = Field(default="md:") custom_user_agent: str = Field(default="airbyte") @overrides def get_sql_alchemy_url(self) -> SecretString: """Return the SQLAlchemy URL to use.""" # Suppress warnings from DuckDB about reflection on indices. # https://github.com/Mause/duckdb_engine/issues/905 warnings.filterwarnings( "ignore", message="duckdb-engine doesn't yet support reflection on indices", category=DuckDBEngineWarning, ) # We defer adding schema name and API token until `create_engine()` call. return SecretString(f"duckdb:///md:{self.database}?custom_user_agent={self.custom_user_agent}") @overrides def get_database_name(self) -> str: """Return the name of the database.""" return self.database @overrides def get_sql_engine(self) -> Engine: """ Return a new SQL engine to use. This method is overridden to: - ensure that the database parent directory is created if it doesn't exist. - pass the DuckDB query parameters (such as motherduck_token) via the config """ return create_engine( url=self.get_sql_alchemy_url(), echo=DEBUG_MODE, execution_options={ "schema_translate_map": {None: self.schema_name}, }, future=True, connect_args={ "config": { "motherduck_token": self.api_key, }, }, )
MotherDuckConfig
python
pytorch__pytorch
test/dynamo/test_guard_serialization.py
{ "start": 1078, "end": 1186 }
class ____: f_locals: dict f_globals: dict f_code: types.CodeType f_builtins: dict
_FrameState
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/repository/event.py
{ "start": 1318, "end": 1636 }
class ____(BaseEvent): """Event dispatched when a file is successfully processed.""" file_path: str file_type: str file_size: Optional[int] = None document: Optional[Document] = None @classmethod def class_name(cls) -> str: return "GitHubFileProcessedEvent"
GitHubFileProcessedEvent
python
viewflow__viewflow
viewflow/workflow/nodes/func.py
{ "start": 268, "end": 1057 }
class ____(mixins.NextNodeActivationMixin, Activation): """ Handle Activation. Executes a callback immediately. """ @Activation.status.super() def activate(self): """Perform the callback within current exception propagation strategy.""" with transaction.atomic(savepoint=True), self.exception_guard(): self.task.started = now() task_started.send( sender=self.flow_class, process=self.process, task=self.task ) self.flow_task._func(self) @Activation.status.transition( source=[STATUS.ERROR], target=STATUS.CANCELED, permission=has_manage_permission, ) def cancel(self): self.task.finished = now() self.task.save()
FunctionActivation
python
sympy__sympy
sympy/physics/vector/frame.py
{ "start": 398, "end": 2414 }
class ____(Symbol): """ A coordinate symbol/base scalar associated wrt a Reference Frame. Ideally, users should not instantiate this class. Instances of this class must only be accessed through the corresponding frame as 'frame[index]'. CoordinateSyms having the same frame and index parameters are equal (even though they may be instantiated separately). Parameters ========== name : string The display name of the CoordinateSym frame : ReferenceFrame The reference frame this base scalar belongs to index : 0, 1 or 2 The index of the dimension denoted by this coordinate variable Examples ======== >>> from sympy.physics.vector import ReferenceFrame, CoordinateSym >>> A = ReferenceFrame('A') >>> A[1] A_y >>> type(A[0]) <class 'sympy.physics.vector.frame.CoordinateSym'> >>> a_y = CoordinateSym('a_y', A, 1) >>> a_y == A[1] True """ def __new__(cls, name, frame, index): # We can't use the cached Symbol.__new__ because this class depends on # frame and index, which are not passed to Symbol.__xnew__. assumptions = {} super()._sanitize(assumptions, cls) obj = super().__xnew__(cls, name, **assumptions) _check_frame(frame) if index not in range(0, 3): raise ValueError("Invalid index specified") obj._id = (frame, index) return obj def __getnewargs_ex__(self): return (self.name, *self._id), {} @property def frame(self): return self._id[0] def __eq__(self, other): # Check if the other object is a CoordinateSym of the same frame and # same index if isinstance(other, CoordinateSym): if other._id == self._id: return True return False def __ne__(self, other): return not self == other def __hash__(self): return (self._id[0].__hash__(), self._id[1]).__hash__()
CoordinateSym
python
joke2k__faker
faker/providers/address/ja_JP/__init__.py
{ "start": 45, "end": 11159 }
class ____(AddressProvider): address_formats = ( "{{prefecture}}{{city}}{{town}}{{chome}}{{ban}}{{gou}}", "{{prefecture}}{{city}}{{town}}{{chome}}{{ban}}{{gou}} {{town}}{{building_name}}{{building_number}}", "{{prefecture}}{{city}}{{town}}{{chome}}{{ban}}{{gou}} {{building_name}}{{town}}{{building_number}}", ) building_number_formats = ("###",) countries = ( "アフガニスタン", "アルバニア", "アルジェリア", "アメリカ領サモア", "アンドラ", "アンゴラ", "アンギラ", "南極大陸", "アンティグアバーブーダ", "アルゼンチン", "アルメニア", "アルバ", "オーストラリア", "オーストリア", "アゼルバイジャン", "バハマ", "バーレーン", "バングラデシュ", "バルバドス", "ベラルーシ", "ベルギー", "ベリーズ", "ベナン", "バミューダ島", "ブータン", "ボリビア", "ボスニア・ヘルツェゴビナ", "ボツワナ", "ブーベ島", "ブラジル", "イギリス領インド洋地域", "イギリス領ヴァージン諸島", "ブルネイ", "ブルガリア", "ブルキナファソ", "ブルンジ", "カンボジア", "カメルーン", "カナダ", "カーボベルデ", "ケイマン諸島", "中央アフリカ共和国", "チャド", "チリ", "中国", "クリスマス島", "ココス諸島", "コロンビア", "コモロ", "コンゴ共和国", "クック諸島", "コスタリカ", "コートジボワール", "クロアチア", "キューバ", "キプロス共和国", "チェコ共和国", "デンマーク", "ジブチ共和国", "ドミニカ国", "ドミニカ共和国", "エクアドル", "エジプト", "エルサルバドル", "赤道ギニア共和国", "エリトリア", "エストニア", "エチオピア", "フェロー諸島", "フォークランド諸島", "フィジー共和国", "フィンランド", "フランス", "フランス領ギアナ", "フランス領ポリネシア", "フランス領極南諸島", "ガボン", "ガンビア", "グルジア", "ドイツ", "ガーナ", "ジブラルタル", "ギリシャ", "グリーンランド", "グレナダ", "グアドループ", "グアム", "グアテマラ", "ガーンジー", "ギニア", "ギニアビサウ", "ガイアナ", "ハイチ", "ハード島とマクドナルド諸島", "バチカン市国", "ホンジュラス", "香港", "ハンガリー", "アイスランド", "インド", "インドネシア", "イラン", "イラク", "アイルランド共和国", "マン島", "イスラエル", "イタリア", "ジャマイカ", "日本", "ジャージー島", "ヨルダン", "カザフスタン", "ケニア", "キリバス", "朝鮮", "韓国", "クウェート", "キルギス共和国", "ラオス人民民主共和国", "ラトビア", "レバノン", "レソト", "リベリア", "リビア国", "リヒテンシュタイン", "リトアニア", "ルクセンブルク", "マカオ", "北マケドニア共和国", "マダガスカル", "マラウィ", "マレーシア", "モルディブ", "マリ", "マルタ共和国", "マーシャル諸島", "マルティニーク", "モーリタニア・イスラム共和国", "モーリシャス", "マヨット", "メキシコ", "ミクロネシア連邦", "モルドバ共和国", "モナコ公国", "モンゴル", "モンテネグロ共和国", "モントセラト", "モロッコ", "モザンビーク", "ミャンマー", "ナミビア", "ナウル", "ネパール", "オランダ領アンティル", "オランダ", "ニューカレドニア", "ニュージーランド", "ニカラグア", "ニジェール", "ナイジェリア", "ニース", "ノーフォーク島", "北マリアナ諸島", "ノルウェー", "オマーン", "パキスタン", "パラオ", "パレスチナ自治区", "パナマ", "パプアニューギニア", "パラグアイ", "ペルー", "フィリピン", "ピトケアン諸島", "ポーランド", "ポルトガル", "プエルトリコ", "カタール", "レユニオン", "ルーマニア", "ロシア", "ルワンダ", "サン・バルテルミー島", "セントヘレナ", "セントクリストファー・ネイビス連邦", "セントルシア", "セント・マーチン島", "サンピエール島・ミクロン島", "セントビンセント・グレナディーン", "サモア", "サンマリノ", "サントメプリンシペ", "サウジアラビア", "セネガル", "セルビア", "セイシェル", "シエラレオネ", "シンガポール", "スロバキア", "スロベニア", "ソロモン諸島", "ソマリア", "南アフリカ共和国", "サウスジョージア・サウスサンドウィッチ諸島", "スペイン", "スリランカ", "スーダン", "スリナム", "スヴァールバル諸島およびヤンマイエン島", "スワジランド王国", "スウェーデン", "スイス", "シリア", "台湾", "タジキスタン共和国", "タンザニア", "タイ", "東ティモール", "トーゴ", "トケラウ", "トンガ", "トリニダード・トバゴ", "チュニジア", "トルコ", "トルクメニスタン", "タークス・カイコス諸島", "ツバル", "ウガンダ", "ウクライナ", "アラブ首長国連邦", "イギリス", "アメリカ合衆国", "合衆国領有小離島", "アメリカ領ヴァージン諸島", "ウルグアイ", "ウズベキスタン", "バヌアツ", "ベネズエラ", "ベトナム", "ウォリス・フツナ", "西サハラ", "イエメン", "ザンビア", "ジンバブエ", ) prefectures = ( "北海道", "青森県", "岩手県", "宮城県", "秋田県", "山形県", "福島県", "茨城県", "栃木県", "群馬県", "埼玉県", "千葉県", "東京都", "神奈川県", "新潟県", "富山県", "石川県", "福井県", "山梨県", "長野県", "岐阜県", "静岡県", "愛知県", "三重県", "滋賀県", "京都府", "大阪府", "兵庫県", "奈良県", "和歌山県", "鳥取県", "島根県", "岡山県", "広島県", "山口県", "徳島県", "香川県", "愛媛県", "高知県", "福岡県", "佐賀県", "長崎県", "熊本県", "大分県", "宮崎県", "鹿児島県", "沖縄県", ) cities = ( "八千代市", "我孫子市", "鴨川市", "鎌ケ谷市", "君津市", "富津市", "浦安市", "四街道市", "袖ケ浦市", "八街市", "印西市", "白井市", "富里市", "南房総市", "匝瑳市", "香取市", "山武市", "いすみ市", "大網白里市", "印旛郡酒々井町", "印旛郡印旛村", "印旛郡本埜村", "印旛郡栄町", "香取郡神崎町", "香取郡多古町", "香取郡東庄町", "山武郡九十九里町", "山武郡芝山町", "山武郡横芝光町", "長生郡一宮町", "長生郡睦沢町", "長生郡長生村", "長生郡白子町", "長生郡長柄町", "長生郡長南町", "夷隅郡大多喜町", "夷隅郡御宿町", "安房郡鋸南町", "千代田区", "中央区", "港区", "新宿区", "文京区", "台東区", "墨田区", "江東区", "品川区", "目黒区", "大田区", "世田谷区", "渋谷区", "中野区", "杉並区", "豊島区", "北区", "荒川区", "板橋区", "練馬区", "足立区", "葛飾区", "江戸川区", "八王子市", "立川市", "武蔵野市", "三鷹市", "青梅市", "府中市", "昭島市", "調布市", "町田市", "小金井市", "小平市", "日野市", "東村山市", "国分寺市", "国立市", "福生市", "狛江市", "東大和市", "清瀬市", "東久留米市", "武蔵村山市", "多摩市", "稲城市", "羽村市", "あきる野市", "西東京市", "西多摩郡瑞穂町", "西多摩郡日の出町", "西多摩郡檜原村", "西多摩郡奥多摩町", "大島町", "利島村", "新島村", "神津島村", "三宅島三宅村", "御蔵島村", "八丈島八丈町", "青ヶ島村", "小笠原村", "横浜市鶴見区", "横浜市神奈川区", "横浜市西区", "横浜市中区", "横浜市南区", "横浜市保土ケ谷区", "横浜市磯子区", "横浜市金沢区", "横浜市港北区", "横浜市戸塚区", "横浜市港南区", "横浜市旭区", "横浜市緑区", "横浜市瀬谷区", "横浜市栄区", "横浜市泉区", "横浜市青葉区", "横浜市都筑区", "川崎市川崎区", "川崎市幸区", "川崎市中原区", "川崎市高津区", "川崎市多摩区", "川崎市宮前区", ) towns = ( "丹勢", "中宮祠", "手岡", "東和町", "所野", "土沢", "独鈷沢", "轟", "土呂部", "中小来川", "長畑", "中鉢石町", "中三依", "西小来川", "西川", "日光", "東三島", "東大和町", "蟇沼", "二つ室", "方京", "細竹", "前弥六", "前弥六南町", "松浦町", "南赤田", "南郷屋", "美原町", "無栗屋", "睦", "百村", "箭坪", "山中新田", "油井", "湯宮", "豊町", "湯本塩原", "横林", "四区町", "渡辺", "氏家", "氏家新田", "卯の里", "小入", "大中", "押上", "柿木沢", "柿木沢新田", "鍛冶ケ沢", "上高野", "上吉羽", "木立", "権現堂", "幸手", "下宇和田", "下吉羽", "神明内", "外国府間", "千塚", "天神島", "戸島", "中川崎", "長間", "西関宿", "花島", "平須賀", "細野", "松石", "太田ヶ谷", "上広谷", "五味ヶ谷", "脚折", "脚折町", "鶴ヶ丘", "羽折町", "藤金", "九段南", "皇居外苑", "麹町", "猿楽町", "外神田", "西神田", "隼町", "東神田", "一ツ橋", "日比谷公園", "平河町", "丸の内", "丸の内JPタワー", "四番町", "六番町", "明石町", "勝どき", "京橋", "月島", "北青山", "港南", "芝浦", "芝公園", "芝大門", "白金", "白金台", "台場", "高輪", "虎ノ門", "虎ノ門虎ノ門ヒルズ森タワー", "大京町", "高田馬場", "箪笥町", "津久戸町", "筑土八幡町", "戸塚町", "富久町", "戸山", "秋葉原", "浅草", "浅草橋", "池之端", "今戸", "入谷", "上野公園", "上野桜木", "雷門", "北上野", "蔵前", "千束", "台東", "鳥越", "西浅草", "日本堤", "橋場", "花川戸", "東浅草", "東上野", "松が谷", "三筋", "三ノ輪", "元浅草", "竜泉", "吾妻橋", ) building_names = ( "パレス", "ハイツ", "コーポ", "アーバン", "クレスト", "パーク", "シティ", "シャルム", "コート", ) def administrative_unit(self) -> str: """ :example: '東京都' """ return self.random_element(self.prefectures) prefecture = administrative_unit def city(self) -> str: """ :example: '台東区' """ return self.random_element(self.cities) def town(self) -> str: """ :example: '浅草' """ return self.random_element(self.towns) def chome(self) -> str: """ :example: '1丁目' """ return "%d丁目" % self.generator.random.randint(1, 42) def ban(self) -> str: """ :example: '3番' """ return "%d番" % self.generator.random.randint(1, 27) def gou(self) -> str: """ :example: '10号' """ return "%d号" % self.generator.random.randint(1, 20) def building_name(self) -> str: """ :example: 'コーポ芝浦' """ return self.random_element(self.building_names) def postcode(self) -> str: """ :example: '101-1212' """ return "%03d-%04d" % ( self.generator.random.randint(0, 999), self.generator.random.randint(0, 9999), ) def zipcode(self) -> str: """ :example: '101-1212' """ return self.postcode()
Provider
python
getsentry__sentry
tests/sentry/incidents/endpoints/validators/test_validators.py
{ "start": 1833, "end": 4801 }
class ____(BaseValidatorTest): def setUp(self) -> None: super().setUp() self.valid_data = { "type": Condition.GREATER, "comparison": 100, "conditionResult": DetectorPriorityLevel.HIGH, "conditionGroupId": self.data_condition_group.id, } def test(self) -> None: validator = MetricIssueComparisonConditionValidator(data=self.valid_data) assert validator.is_valid() assert validator.validated_data == { "comparison": 100.0, "condition_result": DetectorPriorityLevel.HIGH, "type": Condition.GREATER, "condition_group_id": self.data_condition_group.id, } def test_invalid_condition(self) -> None: unsupported_condition = Condition.EQUAL data = { **self.valid_data, "type": unsupported_condition, } validator = MetricIssueComparisonConditionValidator(data=data) assert not validator.is_valid() assert validator.errors.get("type") == [ ErrorDetail(string=f"Unsupported type {unsupported_condition}", code="invalid") ] def test_unregistered_condition(self) -> None: validator = MetricIssueComparisonConditionValidator( data={**self.valid_data, "type": "invalid"} ) assert not validator.is_valid() assert validator.errors.get("type") == [ ErrorDetail(string='"invalid" is not a valid choice.', code="invalid_choice") ] def test_invalid_comparison(self) -> None: validator = MetricIssueComparisonConditionValidator( data={ **self.valid_data, "comparison": "not_a_number", } ) assert not validator.is_valid() assert validator.errors.get("comparison") == [ ErrorDetail(string="A valid number or dict is required.", code="invalid") ] def test_invalid_comparison_dict(self) -> None: comparison = {"foo": "bar"} validator = MetricIssueComparisonConditionValidator( data={ **self.valid_data, "comparison": comparison, } ) assert not validator.is_valid() assert validator.errors.get("comparison") == [ ErrorDetail( string=f"Invalid json primitive value: {comparison}. Must be a string, number, or boolean.", code="invalid", ) ] def test_invalid_result(self) -> None: validator = MetricIssueComparisonConditionValidator( data={ **self.valid_data, "conditionResult": 25, } ) assert not validator.is_valid() assert validator.errors.get("conditionResult") == [ ErrorDetail(string="Unsupported condition result", code="invalid") ]
MetricIssueComparisonConditionValidatorTest
python
run-llama__llama_index
llama-index-integrations/llms/llama-index-llms-mistralai/llama_index/llms/mistralai/base.py
{ "start": 5812, "end": 27514 }
class ____(FunctionCallingLLM): """ MistralAI LLM. Examples: `pip install llama-index-llms-mistralai` ```python from llama_index.llms.mistralai import MistralAI # To customize your API key, do this # otherwise it will lookup MISTRAL_API_KEY from your env variable # llm = MistralAI(api_key="<api_key>") # You can specify a custom endpoint by passing the `endpoint` variable or setting # MISTRAL_ENDPOINT in your environment # llm = MistralAI(endpoint="<endpoint>") llm = MistralAI() resp = llm.complete("Paul Graham is ") print(resp) ``` """ model: str = Field( default=DEFAULT_MISTRALAI_MODEL, description="The mistralai model to use." ) temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", ge=0.0, le=1.0, ) max_tokens: int = Field( default=DEFAULT_MISTRALAI_MAX_TOKENS, description="The maximum number of tokens to generate.", gt=0, ) timeout: float = Field( default=120, description="The timeout to use in seconds.", ge=0 ) max_retries: int = Field( default=5, description="The maximum number of API retries.", ge=0 ) random_seed: Optional[int] = Field( default=None, description="The random seed to use for sampling." ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the MistralAI API." ) show_thinking: bool = Field( default=False, description="Whether to show thinking in the final response. Only available for reasoning models.", ) _client: Mistral = PrivateAttr() def __init__( self, model: str = DEFAULT_MISTRALAI_MODEL, temperature: float = DEFAULT_TEMPERATURE, max_tokens: int = DEFAULT_MISTRALAI_MAX_TOKENS, timeout: int = 120, max_retries: int = 5, safe_mode: bool = False, random_seed: Optional[int] = None, api_key: Optional[str] = None, additional_kwargs: Optional[Dict[str, Any]] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, endpoint: Optional[str] = None, show_thinking: bool = False, ) -> None: additional_kwargs = additional_kwargs or {} callback_manager = callback_manager or CallbackManager([]) api_key = get_from_param_or_env("api_key", api_key, "MISTRAL_API_KEY", "") if not api_key: raise ValueError( "You must provide an API key to use mistralai. " "You can either pass it in as an argument or set it `MISTRAL_API_KEY`." ) # Use the custom endpoint if provided, otherwise default to DEFAULT_MISTRALAI_ENDPOINT endpoint = get_from_param_or_env( "endpoint", endpoint, "MISTRAL_ENDPOINT", DEFAULT_MISTRALAI_ENDPOINT ) super().__init__( temperature=temperature, max_tokens=max_tokens, additional_kwargs=additional_kwargs, timeout=timeout, max_retries=max_retries, safe_mode=safe_mode, random_seed=random_seed, model=model, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, show_thinking=show_thinking, ) self._client = Mistral( api_key=api_key, server_url=endpoint, ) @classmethod def class_name(cls) -> str: return "MistralAI_LLM" @property def metadata(self) -> LLMMetadata: return LLMMetadata( context_window=mistralai_modelname_to_contextsize(self.model), num_output=self.max_tokens, is_chat_model=True, model_name=self.model, random_seed=self.random_seed, is_function_calling_model=is_mistralai_function_calling_model(self.model), ) @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "model": self.model, "temperature": self.temperature, "max_tokens": self.max_tokens, "random_seed": self.random_seed, "retries": self.max_retries, "timeout_ms": self.timeout * 1000, } return { **base_kwargs, **self.additional_kwargs, } def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return { **self._model_kwargs, **kwargs, } def _separate_thinking( self, response: Union[str, List[ContentChunk]] ) -> Tuple[str, str]: """Separate the thinking from the response.""" content = "" if isinstance(response, str): content = response else: for chunk in response: if isinstance(chunk, ThinkChunk): for c in chunk.thinking: if isinstance(c, TextChunk): content += c.text + "\n" match = THINKING_REGEX.search(content) if match: return match.group(1), content.replace(match.group(0), "") match = THINKING_START_REGEX.search(content) if match: return match.group(0), "" return "", content @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: # convert messages to mistral ChatMessage messages = to_mistral_chatmessage(messages) all_kwargs = self._get_all_kwargs(**kwargs) response = self._client.chat.complete(messages=messages, **all_kwargs) blocks: List[TextBlock | ThinkingBlock | ToolCallBlock] = [] if self.model in MISTRAL_AI_REASONING_MODELS: thinking_txt, response_txt = self._separate_thinking( response.choices[0].message.content or [] ) if thinking_txt: blocks.append(ThinkingBlock(content=thinking_txt)) response_txt_think_show = "" if response.choices[0].message.content: if isinstance(response.choices[0].message.content, str): response_txt_think_show = response.choices[0].message.content else: for chunk in response.choices[0].message.content: if isinstance(chunk, TextBlock): response_txt_think_show += chunk.text + "\n" if isinstance(chunk, ThinkChunk): for c in chunk.thinking: if isinstance(c, TextChunk): response_txt_think_show += c.text + "\n" response_txt = ( response_txt if not self.show_thinking else response_txt_think_show ) else: response_txt = response.choices[0].message.content blocks.append(TextBlock(text=response_txt)) tool_calls = response.choices[0].message.tool_calls if tool_calls is not None: for tool_call in tool_calls: if isinstance(tool_call, ToolCall): blocks.append( ToolCallBlock( tool_call_id=tool_call.id, tool_kwargs=tool_call.function.arguments, tool_name=tool_call.function.name, ) ) return ChatResponse( message=ChatMessage( role=MessageRole.ASSISTANT, blocks=blocks, ), raw=dict(response), ) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: complete_fn = chat_to_completion_decorator(self.chat) return complete_fn(prompt, **kwargs) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: # convert messages to mistral ChatMessage messages = to_mistral_chatmessage(messages) all_kwargs = self._get_all_kwargs(**kwargs) response = self._client.chat.stream(messages=messages, **all_kwargs) def gen() -> ChatResponseGen: content = "" blocks: List[TextBlock | ThinkingBlock | ToolCallBlock] = [] for chunk in response: delta = chunk.data.choices[0].delta role = delta.role or MessageRole.ASSISTANT # NOTE: Unlike openAI, we are directly injecting the tool calls if delta.tool_calls: for tool_call in delta.tool_calls: if isinstance(tool_call, ToolCall): blocks.append( ToolCallBlock( tool_call_id=tool_call.id, tool_name=tool_call.function.name, tool_kwargs=tool_call.function.arguments, ) ) content_delta = delta.content or "" content_delta_str = "" if isinstance(content_delta, str): content_delta_str = content_delta else: for chunk in content_delta: if isinstance(chunk, TextChunk): content_delta_str += chunk.text + "\n" elif isinstance(chunk, ThinkChunk): for c in chunk.thinking: if isinstance(c, TextChunk): content_delta_str += c.text + "\n" else: continue content += content_delta_str # decide whether to include thinking in deltas/responses if self.model in MISTRAL_AI_REASONING_MODELS: thinking_txt, response_txt = self._separate_thinking(content) if thinking_txt: blocks.append(ThinkingBlock(content=thinking_txt)) content = response_txt if not self.show_thinking else content # If thinking hasn't ended, don't include it in the delta if thinking_txt is None and not self.show_thinking: content_delta = "" blocks.append(TextBlock(text=content)) yield ChatResponse( message=ChatMessage( role=role, blocks=blocks, ), delta=content_delta_str, raw=chunk, ) return gen() @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: stream_complete_fn = stream_chat_to_completion_decorator(self.stream_chat) return stream_complete_fn(prompt, **kwargs) @llm_chat_callback() async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: # convert messages to mistral ChatMessage messages = to_mistral_chatmessage(messages) all_kwargs = self._get_all_kwargs(**kwargs) response = await self._client.chat.complete_async( messages=messages, **all_kwargs ) blocks: List[TextBlock | ThinkingBlock | ToolCallBlock] = [] additional_kwargs = {} if self.model in MISTRAL_AI_REASONING_MODELS: thinking_txt, response_txt = self._separate_thinking( response.choices[0].message.content or [] ) if thinking_txt: blocks.append(ThinkingBlock(content=thinking_txt)) response_txt_think_show = "" if response.choices[0].message.content: if isinstance(response.choices[0].message.content, str): response_txt_think_show = response.choices[0].message.content else: for chunk in response.choices[0].message.content: if isinstance(chunk, TextBlock): response_txt_think_show += chunk.text + "\n" if isinstance(chunk, ThinkChunk): for c in chunk.thinking: if isinstance(c, TextChunk): response_txt_think_show += c.text + "\n" response_txt = ( response_txt if not self.show_thinking else response_txt_think_show ) else: response_txt = response.choices[0].message.content blocks.append(TextBlock(text=response_txt)) tool_calls = response.choices[0].message.tool_calls if tool_calls is not None: for tool_call in tool_calls: if isinstance(tool_call, ToolCall): blocks.append( ToolCallBlock( tool_call_id=tool_call.id, tool_kwargs=tool_call.function.arguments, tool_name=tool_call.function.name, ) ) else: if isinstance(tool_call[1], (str, dict)): blocks.append( ToolCallBlock( tool_kwargs=tool_call[1], tool_name=tool_call[0] ) ) additional_kwargs["tool_calls"] = ( tool_calls # keep this to avoid tool calls loss if tool call does not fall within the validation scenarios above ) return ChatResponse( message=ChatMessage( role=MessageRole.ASSISTANT, blocks=blocks, additional_kwargs=additional_kwargs, ), raw=dict(response), ) @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: acomplete_fn = achat_to_completion_decorator(self.achat) return await acomplete_fn(prompt, **kwargs) @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: # convert messages to mistral ChatMessage messages = to_mistral_chatmessage(messages) all_kwargs = self._get_all_kwargs(**kwargs) response = await self._client.chat.stream_async(messages=messages, **all_kwargs) async def gen() -> ChatResponseAsyncGen: content = "" blocks: List[ThinkingBlock | TextBlock | ToolCallBlock] = [] async for chunk in response: delta = chunk.data.choices[0].delta role = delta.role or MessageRole.ASSISTANT # NOTE: Unlike openAI, we are directly injecting the tool calls if delta.tool_calls: for tool_call in delta.tool_calls: if isinstance(tool_call, ToolCall): blocks.append( ToolCallBlock( tool_call_id=tool_call.id, tool_name=tool_call.function.name, tool_kwargs=tool_call.function.arguments, ) ) content_delta = delta.content or "" content_delta_str = "" if isinstance(content_delta, str): content_delta_str = content_delta else: for chunk in content_delta: if isinstance(chunk, TextChunk): content_delta_str += chunk.text + "\n" elif isinstance(chunk, ThinkChunk): for c in chunk.thinking: if isinstance(c, TextChunk): content_delta_str += c.text + "\n" else: continue content += content_delta_str # decide whether to include thinking in deltas/responses if self.model in MISTRAL_AI_REASONING_MODELS: thinking_txt, response_txt = self._separate_thinking(content) if thinking_txt: blocks.append(ThinkingBlock(content=thinking_txt)) content = response_txt if not self.show_thinking else content # If thinking hasn't ended, don't include it in the delta if thinking_txt is None and not self.show_thinking: content_delta = "" blocks.append(TextBlock(text=content)) yield ChatResponse( message=ChatMessage( role=role, blocks=blocks, ), delta=content_delta_str, raw=chunk, ) return gen() @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: astream_complete_fn = astream_chat_to_completion_decorator(self.astream_chat) return await astream_complete_fn(prompt, **kwargs) def _prepare_chat_with_tools( self, tools: List["BaseTool"], user_msg: Optional[Union[str, ChatMessage]] = None, chat_history: Optional[List[ChatMessage]] = None, verbose: bool = False, allow_parallel_tool_calls: bool = False, tool_required: bool = False, **kwargs: Any, ) -> Dict[str, Any]: """Prepare the chat with tools.""" # misralai uses the same openai tool format tool_specs = [ tool.metadata.to_openai_tool(skip_length_check=True) for tool in tools ] if isinstance(user_msg, str): user_msg = ChatMessage(role=MessageRole.USER, content=user_msg) messages = chat_history or [] if user_msg: messages.append(user_msg) return { "messages": messages, "tools": tool_specs or None, "tool_choice": "required" if tool_required else "auto", **kwargs, } def _validate_chat_with_tools_response( self, response: ChatResponse, tools: List["BaseTool"], allow_parallel_tool_calls: bool = False, **kwargs: Any, ) -> ChatResponse: """Validate the response from chat_with_tools.""" if not allow_parallel_tool_calls: force_single_tool_call(response) return response def get_tool_calls_from_response( self, response: "ChatResponse", error_on_no_tool_call: bool = True, ) -> List[ToolSelection]: """Predict and call the tool.""" tool_calls = [ block for block in response.message.blocks if isinstance(block, ToolCallBlock) ] if len(tool_calls) < 1: if error_on_no_tool_call: raise ValueError( f"Expected at least one tool call, but got {len(tool_calls)} tool calls." ) else: return [] tool_selections = [] for tool_call in tool_calls: if isinstance(tool_call.tool_kwargs, str): argument_dict = json.loads(tool_call.tool_kwargs) else: argument_dict = tool_call.tool_kwargs tool_selections.append( ToolSelection( tool_id=tool_call.tool_call_id or "", tool_name=tool_call.tool_name, tool_kwargs=argument_dict, ) ) return tool_selections def fill_in_middle( self, prompt: str, suffix: str, stop: Optional[List[str]] = None ) -> CompletionResponse: if not is_mistralai_code_model(self.model): raise ValueError( "Please provide code model from MistralAI. Currently supported code model is 'codestral-latest'." ) if stop: response = self._client.fim.complete( model=self.model, prompt=prompt, suffix=suffix, stop=stop ) else: response = self._client.fim.complete( model=self.model, prompt=prompt, suffix=suffix ) return CompletionResponse( text=response.choices[0].message.content, raw=dict(response) )
MistralAI
python
simonw__datasette
datasette/database.py
{ "start": 24815, "end": 25191 }
class ____: __slots__ = ("fn", "task_id", "reply_queue", "isolated_connection", "transaction") def __init__(self, fn, task_id, reply_queue, isolated_connection, transaction): self.fn = fn self.task_id = task_id self.reply_queue = reply_queue self.isolated_connection = isolated_connection self.transaction = transaction
WriteTask
python
bottlepy__bottle
test/test_mount.py
{ "start": 3920, "end": 4404 }
class ____(ServerTestBase): def setUp(self): ServerTestBase.setUp(self) self.subapp = bottle.Bottle() @self.subapp.route('/') @self.subapp.route('/test/<test>') def test(test='foo'): return test def test_merge(self): self.app.merge(self.subapp) self.assertStatus(200, '/') self.assertBody('foo', '/') self.assertStatus(200, '/test/bar') self.assertBody('bar', '/test/bar')
TestAppMerging
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/strings_ops/string_strip_op_test.py
{ "start": 814, "end": 1893 }
class ____(test.TestCase): """ Test cases for tf.strings.strip.""" def test_string_strip(self): strings = ["pigs on the wing", "animals"] with self.cached_session() as sess: output = string_ops.string_strip(strings) output = self.evaluate(output) self.assertAllEqual(output, [b"pigs on the wing", b"animals"]) def test_string_strip_2d(self): strings = [["pigs on the wing", "animals"], [" hello ", "\n\tworld \r \n"]] with self.cached_session() as sess: output = string_ops.string_strip(strings) output = self.evaluate(output) self.assertAllEqual(output, [[b"pigs on the wing", b"animals"], [b"hello", b"world"]]) def test_string_strip_with_empty_strings(self): strings = [" hello ", "", "world ", " \t \r \n "] with self.cached_session() as sess: output = string_ops.string_strip(strings) output = self.evaluate(output) self.assertAllEqual(output, [b"hello", b"", b"world", b""]) if __name__ == "__main__": test.main()
StringStripOpTest
python
yaml__pyyaml
tests/legacy_tests/conftest.py
{ "start": 1425, "end": 4710 }
class ____(pytest.Collector): def __init__(self, name, parent=None, function=None, **kwargs): self._function = function self.fspath = parent.fspath.__class__(function.__code__.co_filename) self.lineno = function.__code__.co_firstlineno # avoid fspath deprecation warnings on pytest < 7 if hasattr(self, 'path') and 'fspath' in kwargs: del kwargs['fspath'] super().__init__(name=name, parent=parent, **kwargs) def collect(self): items = [] unittest = getattr(self._function, 'unittest', None) if unittest is True: # no filenames items.append(PyYAMLItem.from_parent(parent=self, function=self._function, filenames=None)) else: for base, exts in _test_filenames: filenames = [] for ext in unittest: if ext not in exts: break filenames.append(os.path.join(DATA, base + ext)) else: skip_exts = getattr(self._function, 'skip', []) for skip_ext in skip_exts: if skip_ext in exts: break else: items.append(PyYAMLItem.from_parent(parent=self, function=self._function, filenames=filenames)) return items or None def reportinfo(self): return self.fspath, self.lineno, '' @classmethod def from_parent(cls, parent, fspath, **kwargs): return super().from_parent(parent=parent, fspath=fspath, **kwargs) @pytest.hookimpl(hookwrapper=True, trylast=True) def pytest_pycollect_makeitem(collector, name: str, obj: object): outcome = yield outcome.get_result() if not callable(obj): outcome.force_result(None) return unittest = getattr(obj, 'unittest', None) if not unittest: outcome.force_result(None) return if unittest is True: # no file list to run against, just return a test item instead of a collector outcome.force_result(PyYAMLItem.from_parent(name=name, parent=collector, fspath=collector.fspath, function=obj)) return # there's a file list; return a collector to create individual items for each outcome.force_result(PyYAMLCollector.from_parent(name=name, parent=collector, fspath=collector.fspath, function=obj)) return def pytest_collection_modifyitems(session, config, items): pass def pytest_ignore_collect(collection_path: pathlib.Path): basename = collection_path.name # ignore all Python files in this subtree for normal pytest collection if basename not in ['test_yaml.py', 'test_yaml_ext.py']: return True # ignore extension tests (depending on config) if basename == 'test_yaml_ext.py': require_libyaml = os.environ.get('PYYAML_FORCE_LIBYAML', None) if require_libyaml == '1' and not HAS_LIBYAML_EXT: raise RuntimeError('PYYAML_FORCE_LIBYAML envvar is set, but libyaml extension is not available') if require_libyaml == '0': return True if not HAS_LIBYAML_EXT: warnings.warn('libyaml extension is not available, skipping libyaml tests') return True
PyYAMLCollector
python
PyCQA__pylint
tests/functional/d/dataclass/dataclass_with_default_factory.py
{ "start": 1118, "end": 1283 }
class ____: """Test dataclass that puts call to field() in another function call""" attribute: int = cast(int, field(default_factory=dict)) @dc.dataclass
TEST3
python
numpy__numpy
benchmarks/benchmarks/bench_io.py
{ "start": 6153, "end": 6774 }
class ____(Benchmark): # benchmark selective column reading from CSV files # using np.loadtxt params = [2, [1, 3], [1, 3, 5, 7]] param_names = ['usecols'] def setup(self, usecols): num_lines = 5000 data = ['0, 1, 2, 3, 4, 5, 6, 7, 8, 9'] * num_lines self.csv_data = StringIO('\n'.join(data)) def time_loadtxt_usecols_csv(self, usecols): # must rewind StringIO because of state # dependence of file reading np.loadtxt(self.csv_data, delimiter=',', usecols=usecols) self.csv_data.seek(0)
LoadtxtUseColsCSV
python
Textualize__rich
rich/markdown.py
{ "start": 12756, "end": 14066 }
class ____(TextElement): """Renders a placeholder for an image.""" new_line = False @classmethod def create(cls, markdown: Markdown, token: Token) -> MarkdownElement: """Factory to create markdown element, Args: markdown (Markdown): The parent Markdown object. token (Any): A token from markdown-it. Returns: MarkdownElement: A new markdown element """ return cls(str(token.attrs.get("src", "")), markdown.hyperlinks) def __init__(self, destination: str, hyperlinks: bool) -> None: self.destination = destination self.hyperlinks = hyperlinks self.link: str | None = None super().__init__() def on_enter(self, context: MarkdownContext) -> None: self.link = context.current_style.link self.text = Text(justify="left") super().on_enter(context) def __rich_console__( self, console: Console, options: ConsoleOptions ) -> RenderResult: link_style = Style(link=self.link or self.destination or None) title = self.text or Text(self.destination.strip("/").rsplit("/", 1)[-1]) if self.hyperlinks: title.stylize(link_style) text = Text.assemble("🌆 ", title, " ", end="") yield text
ImageItem
python
realpython__materials
python-unittest/test_membership.py
{ "start": 18, "end": 348 }
class ____(unittest.TestCase): def test_value_in_collection(self): a = 1 b = [1, 2, 3, 4, 5] self.assertIn(a, b) def test_value_not_in_collection(self): a = 10 b = [1, 2, 3, 4, 5] self.assertNotIn(a, b) if __name__ == "__main__": unittest.main(verbosity=2)
TestMembership
python
walkccc__LeetCode
solutions/1470. Shuffle the Array/1470.py
{ "start": 0, "end": 184 }
class ____: def shuffle(self, nums: list[int], n: int) -> list[int]: ans = [] for a, b in zip(nums[:n], nums[n:]): ans.append(a) ans.append(b) return ans
Solution
python
joke2k__faker
tests/providers/test_enum.py
{ "start": 208, "end": 272 }
class ____(Enum): A = auto B = auto C = auto
_TestEnum
python
PyCQA__pylint
tests/functional/s/slots_checks.py
{ "start": 1729, "end": 1899 }
class ____: # [invalid-slots] """One valid & one invalid __slots__ value""" x = 1 if x: __slots__ = ("y",) else: __slots__ = None
TwelfthBad
python
kamyu104__LeetCode-Solutions
Python/lfu-cache.py
{ "start": 113, "end": 1908 }
class ____(object): def __init__(self, capacity): """ :type capacity: int """ self.__capa = capacity self.__size = 0 self.__min_freq = float("inf") self.__freq_to_nodes = collections.defaultdict(collections.OrderedDict) self.__key_to_freq = {} def get(self, key): """ :type key: int :rtype: int """ if key not in self.__key_to_freq: return -1 value = self.__freq_to_nodes[self.__key_to_freq[key]][key] self.__update(key, value) return value def put(self, key, value): """ :type key: int :type value: int :rtype: void """ if self.__capa <= 0: return if key not in self.__key_to_freq and self.__size == self.__capa: del self.__key_to_freq[self.__freq_to_nodes[self.__min_freq].popitem(last=False)[0]] if not self.__freq_to_nodes[self.__min_freq]: del self.__freq_to_nodes[self.__min_freq] self.__size -= 1 self.__update(key, value) def __update(self, key, value): freq = 0 if key in self.__key_to_freq: freq = self.__key_to_freq[key] del self.__freq_to_nodes[freq][key] if not self.__freq_to_nodes[freq]: del self.__freq_to_nodes[freq] if self.__min_freq == freq: self.__min_freq += 1 self.__size -= 1 freq += 1 self.__min_freq = min(self.__min_freq, freq) self.__key_to_freq[key] = freq self.__freq_to_nodes[freq][key] = value self.__size += 1 # Time: O(1), per operation # Space: O(k), k is the capacity of cache import collections
LFUCache
python
airbytehq__airbyte
airbyte-integrations/connectors/source-marketo/source_marketo/source.py
{ "start": 12202, "end": 12758 }
class ____(MarketoStream): """ Provides functionality to start Marketo export. Return list with dict, example: [ { "exportId": "1689f995-1397-48b2-b88a-5eed1397299b", "format": "CSV", "status": "Queued", "createdAt": "2021-09-01T10:00:50Z", "queuedAt": "2021-09-01T10:01:07Z" } ] """ http_method = "POST" def path(self, **kwargs) -> str: return f"bulk/v1/{self.stream_name}/export/{self.export_id}/enqueue.json"
MarketoExportStart
python
sphinx-doc__sphinx
sphinx/builders/latex/theming.py
{ "start": 1245, "end": 2021 }
class ____(Theme): """A built-in LaTeX theme.""" def __init__(self, name: str, config: Config) -> None: super().__init__(name) if name == 'howto': self.docclass = config.latex_docclass.get('howto', 'article') else: self.docclass = config.latex_docclass.get('manual', 'report') if name in {'manual', 'howto'}: self.wrapperclass = 'sphinx' + name else: self.wrapperclass = name # we assume LaTeX class provides \chapter command except in case # of non-Japanese 'howto' case if name == 'howto' and not self.docclass.startswith('j'): self.toplevel_sectioning = 'section' else: self.toplevel_sectioning = 'chapter'
BuiltInTheme
python
dask__dask
dask/dataframe/dask_expr/tests/test_core.py
{ "start": 373, "end": 630 }
class ____(Expr): def _simplify_down(self): return ExprB() def _operands_for_repr(self): return [] def test_endless_simplify(): expr = ExprA() with pytest.raises(RuntimeError, match="converge"): expr.simplify()
ExprA
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 641451, "end": 641660 }
class ____(AnyMark): """ Mark schema wrapper. All types of primitive marks. """ _schema = {"$ref": "#/definitions/Mark"} def __init__(self, *args): super().__init__(*args)
Mark