language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | doocs__leetcode | solution/0100-0199/0130.Surrounded Regions/Solution.py | {
"start": 0,
"end": 719
} | class ____:
def solve(self, board: List[List[str]]) -> None:
def dfs(i: int, j: int):
if not (0 <= i < m and 0 <= j < n and board[i][j] == "O"):
return
board[i][j] = "."
for a, b in pairwise((-1, 0, 1, 0, -1)):
dfs(i + a, j + b)
m, n = len(board), len(board[0])
for i in range(m):
dfs(i, 0)
dfs(i, n - 1)
for j in range(n):
dfs(0, j)
dfs(m - 1, j)
for i in range(m):
for j in range(n):
if board[i][j] == ".":
board[i][j] = "O"
elif board[i][j] == "O":
board[i][j] = "X"
| Solution |
python | pytorch__pytorch | test/lazy/test_extract_compiled_graph.py | {
"start": 1759,
"end": 2079
} | class ____(nn.Module):
"""
Handle the corner case that the same tensor appears multiple times in the
returned tuple. torchbench like drq will hit this corner case when running
thru torchdynamo..
"""
def forward(self, a, b):
c = a + b
return a - b, c, a + 1, c
| ModuleReturnDupTensor |
python | plotly__plotly.py | plotly/graph_objs/parcoords/_legendgrouptitle.py | {
"start": 233,
"end": 2953
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "parcoords"
_path_str = "parcoords.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.parcoords.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.parcoords.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.parcoords.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcoords.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Legendgrouptitle |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 241702,
"end": 245101
} | class ____(rv_continuous):
r"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is:
.. math::
f(x, k, \lambda) = \frac{1}{2} \exp(-(\lambda+x)/2)
(x/\lambda)^{(k-2)/4} I_{(k-2)/2}(\sqrt{\lambda x})
for :math:`x >= 0`, :math:`k > 0` and :math:`\lambda \ge 0`.
:math:`k` specifies the degrees of freedom (denoted ``df`` in the
implementation) and :math:`\lambda` is the non-centrality parameter
(denoted ``nc`` in the implementation). :math:`I_\nu` denotes the
modified Bessel function of first order of degree :math:`\nu`
(`scipy.special.iv`).
`ncx2` takes ``df`` and ``nc`` as shape parameters.
This distribution uses routines from the Boost Math C++ library for
the computation of the ``pdf``, ``cdf``, ``ppf``, ``sf`` and ``isf``
methods. [1]_
%(after_notes)s
References
----------
.. [1] The Boost Developers. "Boost C++ Libraries". https://www.boost.org/.
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & np.isfinite(df) & (nc >= 0)
def _shape_info(self):
idf = _ShapeInfo("df", False, (0, np.inf), (False, False))
inc = _ShapeInfo("nc", False, (0, np.inf), (True, False))
return [idf, inc]
def _rvs(self, df, nc, size=None, random_state=None):
return random_state.noncentral_chisquare(df, nc, size)
def _logpdf(self, x, df, nc):
return xpx.apply_where(nc != 0, (x, df, nc), _ncx2_log_pdf,
lambda x, df, _: chi2._logpdf(x, df))
def _pdf(self, x, df, nc):
with np.errstate(over='ignore'): # see gh-17432
return xpx.apply_where(nc != 0, (x, df, nc), scu._ncx2_pdf,
lambda x, df, _: chi2._pdf(x, df))
def _cdf(self, x, df, nc):
with np.errstate(over='ignore'): # see gh-17432
return xpx.apply_where(nc != 0, (x, df, nc), sc.chndtr,
lambda x, df, _: chi2._cdf(x, df))
def _ppf(self, q, df, nc):
with np.errstate(over='ignore'): # see gh-17432
return xpx.apply_where(nc != 0, (q, df, nc), sc.chndtrix,
lambda x, df, _: chi2._ppf(x, df))
def _sf(self, x, df, nc):
with np.errstate(over='ignore'): # see gh-17432
return xpx.apply_where(nc != 0, (x, df, nc), scu._ncx2_sf,
lambda x, df, _: chi2._sf(x, df))
def _isf(self, x, df, nc):
with np.errstate(over='ignore'): # see gh-17432
return xpx.apply_where(nc != 0, (x, df, nc), scu._ncx2_isf,
lambda x, df, _: chi2._isf(x, df))
def _stats(self, df, nc):
_ncx2_mean = df + nc
def k_plus_cl(k, l, c):
return k + c*l
_ncx2_variance = 2.0 * k_plus_cl(df, nc, 2.0)
_ncx2_skewness = (np.sqrt(8.0) * k_plus_cl(df, nc, 3) /
np.sqrt(k_plus_cl(df, nc, 2.0)**3))
_ncx2_kurtosis_excess = (12.0 * k_plus_cl(df, nc, 4.0) /
k_plus_cl(df, nc, 2.0)**2)
return (
_ncx2_mean,
_ncx2_variance,
_ncx2_skewness,
_ncx2_kurtosis_excess,
)
ncx2 = ncx2_gen(a=0.0, name='ncx2')
| ncx2_gen |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 420945,
"end": 422453
} | class ____(ExprNode, ModuleNameMixin):
# Helper class holds Python3 namespace object
#
# All this are not owned by this node
# class_def_node PyClassDefNode PyClassDefNode defining this class
# doc ExprNode or None Doc string (owned)
subexprs = ['doc']
def analyse_types(self, env):
if self.doc:
self.doc = self.doc.analyse_types(env).coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
class_def_node = self.class_def_node
null = "(PyObject *) NULL"
doc_code = self.doc.result() if self.doc else null
mkw = class_def_node.mkw.py_result() if class_def_node.mkw else null
metaclass = class_def_node.metaclass.py_result() if class_def_node.metaclass else null
code.putln(
"%s = __Pyx_Py3MetaclassPrepare(%s, %s, %s, %s, %s, %s, %s); %s" % (
self.result(),
metaclass,
class_def_node.bases.result(),
cname,
qualname,
mkw,
py_mod_name,
doc_code,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
| PyClassNamespaceNode |
python | google__pytype | pytype/abstract/_typing.py | {
"start": 36157,
"end": 36689
} | class ____(_base.BaseValue):
"""Container for a Final annotation."""
def __init__(self, annotation: _base.BaseValue, ctx: "context.Context"):
super().__init__("FinalAnnotation", ctx)
self.annotation = annotation
def __repr__(self) -> str:
return f"Final[{self.annotation}]"
def instantiate(
self,
node: "cfg.CFGNode",
container: (
_instance_base.SimpleValue | abstract_utils.DummyContainer | None
) = None,
) -> "cfg.Variable":
return self.to_variable(node)
| FinalAnnotation |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_set_column04.py | {
"start": 315,
"end": 1424
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("set_column04.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({"bold": 1})
italic = workbook.add_format({"italic": 1})
bold_italic = workbook.add_format({"bold": 1, "italic": 1})
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write("A1", "Foo", italic)
worksheet.write("B1", "Bar", bold)
worksheet.write_column("A2", data[0])
worksheet.write_column("B2", data[1])
worksheet.write_column("C2", data[2])
worksheet.set_row(12, None, italic)
worksheet.set_column("F:F", None, bold)
worksheet.write("F13", None, bold_italic)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | Netflix__metaflow | test/core/tests/card_extension_test.py | {
"start": 72,
"end": 2453
} | class ____(MetaflowTest):
"""
- Requires on tests/extensions/packages to be installed.
"""
PRIORITY = 5
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@tag('card(type="card_ext_init_b",save_errors=False)')
@tag('card(type="card_ext_init_a",save_errors=False)')
@tag('card(type="card_ns_subpackage",save_errors=False)')
@tag('card(type="card_init",save_errors=False)')
@steps(0, ["start"])
def step_start(self):
from metaflow import current
self.task = current.pathspec
@steps(1, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
run = checker.get_run()
if run is None:
# This means CliCheck is in context.
for step in flow:
if step.name != "start":
continue
cli_check_dict = checker.artifact_dict(step.name, "task")
for task_pathspec in cli_check_dict:
full_pathspec = "/".join([flow.name, task_pathspec])
task_id = task_pathspec.split("/")[-1]
cards_info = checker.list_cards(step.name, task_id)
# Just check if the cards are created.
assert_equals(
cards_info is not None
and "cards" in cards_info
and len(cards_info["cards"]) == 4,
True,
)
else:
# This means MetadataCheck is in context.
for step in flow:
if step.name != "start":
continue
meta_check_dict = checker.artifact_dict(step.name, "task")
for task_id in meta_check_dict:
full_pathspec = meta_check_dict[task_id]["task"]
cards_info = checker.list_cards(step.name, task_id)
assert_equals(
cards_info is not None
and "cards" in cards_info
and len(cards_info["cards"]) == 4,
True,
)
| CardExtensionsImportTest |
python | sqlalchemy__sqlalchemy | test/sql/test_selectable.py | {
"start": 133480,
"end": 134529
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_direct_element_hierarchy(self):
t = table("t", column("c"))
a1 = t.alias()
a2 = a1.alias()
a3 = a2.alias()
is_(a1.element, t)
is_(a2.element, a1)
is_(a3.element, a2)
def test_get_children_preserves_multiple_nesting(self):
t = table("t", column("c"))
stmt = select(t)
a1 = stmt.alias()
a2 = a1.alias()
eq_(set(a2.get_children(column_collections=False)), {a1})
def test_correspondence_multiple_nesting(self):
t = table("t", column("c"))
stmt = select(t)
a1 = stmt.alias()
a2 = a1.alias()
is_(a1.corresponding_column(a2.c.c), a1.c.c)
def test_copy_internals_multiple_nesting(self):
t = table("t", column("c"))
stmt = select(t)
a1 = stmt.alias()
a2 = a1.alias()
a3 = a2._clone()
a3._copy_internals()
is_(a1.corresponding_column(a3.c.c), a1.c.c)
| AliasTest |
python | catalyst-team__catalyst | catalyst/contrib/datasets/movielens.py | {
"start": 9294,
"end": 27369
} | class ____(Dataset):
"""
MovieLens data sets (ml-20m) were collected by
the GroupLens Research Project at the University of Minnesota.
This data set consists of:
* 20,000,263 ratings (1-5)
and 465,564 tag applications from 138,493 users on 27,278 movies.
* Each user has rated at least 20 movies.
* Simple demographic info for the users
(age, gender, occupation, zip)
Users were selected at random for inclusion.
All selected users had rated at least 20 movies.
No demographic information is included.
Each user is represented by an id, and no other information is provided.
More details about the contents and use of all these files follows.
This and other GroupLens data sets are publicly available for download
at http://grouplens.org/datasets/.
The data was collected through the MovieLens web site.
(movielens.umn.edu) between January 09, 1995 and March 31, 2015.
This dataset was generated on October 17, 2016.
Neither the University of Minnesota nor any of the researchers involved
can guarantee the correctness of the data, its suitability
for any particular purpose, or the validity of
results based on the use of the data set.
The data set may be used for any research purposes
under the following conditions:
The user may not state or imply any endorsement
from the University of Minnesota or the GroupLens Research Group.
The user must acknowledge the use of the data set in
publications resulting from the use of the data set
(see below for citation information).
The user may not redistribute the data without separate permission.
The user may not use this information for any
commercial or revenue-bearing purposes
without first obtaining permission from a faculty member
of the GroupLens Research Project at the University of Minnesota.
The executable software scripts are provided "as is"
without warranty of any kind, either expressed or implied, including,
but not limited to, the implied warranties of merchantability
and fitness for a particular purpose.
The entire risk as to the quality and performance of them is with you.
Should the program prove defective,
you assume the cost of all necessary servicing, repair or correction.
In no event shall the University of Minnesota,
its affiliates or employees be liable to you for any damages
arising out of the use or inability to use these programs (including
but not limited to loss of data or data being rendered inaccurate).
The data are contained in six files:
1. genome-scores.csv
2. genome-tags.csv
3. links.csv
4. movies.csv
5. ratings.csv
6. tags.csv
Ratings Data File Structure (ratings.csv)
All ratings are contained in the file ratings.csv.
Each line of this file after the header row represents
one rating of one movie by one user,and has the following format:
1. userId,
2. movieId,
3. rating,
4. timestamp
Tags Data File Structure (tags.csv)
1. userId,
2. movieId,
3. tag,
4. timestamp
Movies Data File Structure (movies.csv)
1. movieId,
2. title,
3. genres
Movie titles are entered manually or
imported from https://www.themoviedb.org/, and include the year
of release in parentheses.
Errors and inconsistencies may exist in these titles.
Links Data File Structure (links.csv)
1. movieId,
2. imdbId,
3. tmdbId
Tag Genome (genome-scores.csv and genome-tags.csv)
1. movieId,
2. tagId,
3. relevance
If you have any further questions or comments, please contact GroupLens
<grouplens-info@cs.umn.edu>.
https://files.grouplens.org/datasets/movielens/ml-20m-README.html
"""
resources = (
"https://files.grouplens.org/datasets/movielens/ml-20m.zip",
" cd245b17a1ae2cc31bb14903e1204af3",
)
filename = "ml-20m.zip"
training_file = "training.pt"
test_file = "test.pt"
def __init__(
self,
root,
train=True,
download=False,
min_rating=0.0,
min_items_per_user=1.0,
min_users_per_item=2.0,
test_prop=0.2,
split="users",
sample=False,
n_rows=1000,
):
"""
Args:
root (string): Root directory of dataset where
``MovieLens/processed/training.pt``
and ``MovieLens/processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from
``training.pt``, otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from
the internet and puts it in root directory. If dataset
is already downloaded, it is not downloaded again.
min_rating (float, optional): Minimum rating to include in
the interaction matrix
min_items_per_user (float, optional):
Minimum number of items per user
to include in the interaction matrix
min_users_per_item (float, optional):
Minimum rating to users per itemrs
to include in the interaction matrix
test_prop (float, optional): train-test split
split (string, optional): the splittage method.
`users` – split by users
`ts` - split by timestamp
sample (bool, optional):
If true, then use the sample of the dataset.
If true the `n_rows` shold be provide
n_rows (int, optional): number of rows to retrieve.
Availbale only with `sample = True`
Raises:
RuntimeError: If ``download = False`` and the dataset not found.
RuntimeError: If torch version < `1.7.0`"
"""
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
self.train = train
self.min_rating = min_rating
self.min_items_per_user = min_items_per_user
self.min_users_per_item = min_users_per_item
self.test_prop = test_prop
self.nrows = n_rows
self.sample = sample
self.split = split
if download:
self._download()
self._fetch_movies(split_by=split)
if not self._check_exists():
raise RuntimeError("Dataset not found. Set `download=True`")
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data = torch.load(os.path.join(self.processed_folder, data_file))
def __getitem__(self, user_index):
"""Get item.
Args:
user_index (int): User index
Returns:
tensor: (items) item's ranking for the user with index user_index
"""
return self.data[user_index]
def __len__(self):
"""The length of the loader"""
return self.dimensions[0]
@property
def raw_folder(self):
"""Create raw folder for data download
Returns:
raw_path (path): raw folder path
"""
return os.path.join(self.root, self.__class__.__name__, "raw")
@property
def processed_folder(self):
"""Create the folder for the processed files
Returns:
raw_path (path): processed folder path
"""
return os.path.join(self.root, self.__class__.__name__, "processed")
def _check_exists(self):
"""Check if the path for tarining and testing data exists in
processed folder.
Returns:
raw_path (path): processed folder path
"""
return os.path.exists(
os.path.join(self.processed_folder, self.training_file)
) and os.path.exists(os.path.join(self.processed_folder, self.test_file))
def _download(self):
"""Download and extract files"""
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
url = self.resources[0]
download_and_extract_archive(
url=url,
download_root=self.raw_folder,
filename=self.filename,
remove_finished=True,
)
def _read_raw_movielens_data(self):
"""Read the csv files with pandas.
Returns:
(movies, ratings, genome_scores, genome_tags, tags):
(pd.DataFrame, pd.DataFrame, pd.DataFrame,
pd.DataFrame, pd.DataFrame)
"""
path = self.raw_folder
if self.sample:
movies = pd.read_csv(path + "/ml-20m/movies.csv", nrows=self.nrows)
ratings = pd.read_csv(path + "/ml-20m/ratings.csv", nrows=self.nrows)
genome_scores = pd.read_csv(
path + "/ml-20m/genome-scores.csv", nrows=self.nrows
)
genome_tags = pd.read_csv(path + "/ml-20m/genome-tags.csv", nrows=self.nrows)
tags = pd.read_csv(path + "/ml-20m/tags.csv", nrows=self.nrows)
else:
movies = pd.read_csv(path + "/ml-20m/movies.csv")
ratings = pd.read_csv(path + "/ml-20m/ratings.csv")
genome_scores = pd.read_csv(path + "/ml-20m/genome-scores.csv")
genome_tags = pd.read_csv(path + "/ml-20m/genome-tags.csv")
tags = pd.read_csv(path + "/ml-20m/tags.csv")
return (movies, ratings, genome_scores, genome_tags, tags)
def _build_interaction_matrix(self, ratings):
"""Builds interaction matrix.
Args:
ratings (pd.Dataframe): pandas DataFrame of the following format
userId movieId rating
20 1 924 3.5
19 1 919 3.5
86 1 2683 3.5
61 1 1584 3.5
23 1 1079 4.0
Returns:
interaction_matrix (torch.sparse.Float):
sparse user2item interaction matrix
"""
csr_matrix = sp.coo_matrix(
(
ratings["rating"].astype(np.float32),
(ratings["movieId"], ratings["userId"]),
)
)
interaction_matrix = torch.sparse.LongTensor(
torch.LongTensor([csr_matrix.row.tolist(), csr_matrix.col.tolist()]),
torch.LongTensor(csr_matrix.data.astype(np.int32)),
)
return interaction_matrix
def _parse(
self,
ratings,
rating_cut=True,
user_per_item_cut=True,
item_per_user_cut=True,
ts_cut=False,
):
"""Parses and pre-process the raw data.
Substract one to shift to zero based indexing
To-do add timestamp cut
Args:
ratings (pd.Dataframe): pandas DataFrame of the following format
userId movieId rating timestamp
20 1 924 3.5 1094785598
19 1 919 3.5 1094785621
86 1 2683 3.5 1094785650
61 1 1584 3.5 1094785656
23 1 1079 4.0 1094785665
rating_cut (bool, optional):
If true, filter datafreame on the `min_rating` value
user_per_item_cut (bool, optional):
If true, filter datafreame on the `min_users_per_item` value
item_per_user_cut (bool, optional):
If true, filter datafreame on the `min_items_per_user` value
ts_cut (bool, optional):
If true, filter datafreame on the `min_ts` value [TO-DO]
Returns:
ratings (pd.Dataframe): filtered `ratings` pandas DataFrame
users_activity (pd.DataFrame):
Number of items each user interacted with
items_activity (pd.DataFrame):
Number of users interacted with each item.
"""
if rating_cut:
ratings = ratings[ratings["rating"] >= self.min_rating].sort_values(
["userId", "timestamp"]
)
movie_id = "movieId"
user_cnt_df = (
ratings[[movie_id]]
.groupby(movie_id, as_index=False)
.size()
.rename(columns={"size": "user_cnt"})
)
user_id = "userId"
item_cnt_df = (
ratings[[user_id]]
.groupby(user_id, as_index=False)
.size()
.rename(columns={"size": "item_cnt"})
)
user_not_filtered = True
item_not_filtered = True
while user_not_filtered or item_not_filtered:
ratings = ratings[
ratings[movie_id].isin(
user_cnt_df.index[user_cnt_df["user_cnt"] >= self.min_users_per_item]
)
]
ratings = ratings[
ratings[user_id].isin(
item_cnt_df.index[item_cnt_df["item_cnt"] >= self.min_items_per_user]
)
]
user_cnt_df = (
ratings[[movie_id]]
.groupby(movie_id, as_index=False)
.size()
.rename(columns={"size": "user_cnt"})
)
item_cnt_df = (
ratings[[user_id]]
.groupby(user_id, as_index=False)
.size()
.rename(columns={"size": "item_cnt"})
)
user_not_filtered = (user_cnt_df["user_cnt"] < self.min_users_per_item).any()
item_not_filtered = (item_cnt_df["item_cnt"] < self.min_items_per_user).any()
users_activity = (
ratings[["userId"]]
.groupby("userId", as_index=False)
.size()
.rename(columns={"size": "user_cnt"})
)
items_activity = (
ratings[["movieId"]]
.groupby("movieId", as_index=False)
.size()
.rename(columns={"size": "item_cnt"})
)
return ratings, users_activity, items_activity
def _split_by_users(self, ratings, users_activity):
"""Split the ratings DataFrame into train and test
Randomly shuffle users and split
Args:
ratings (pd.Dataframe): pandas DataFrame of the following format
userId movieId rating timestamp
20 1 924 3.5 1094785598
19 1 919 3.5 1094785621
86 1 2683 3.5 1094785650
61 1 1584 3.5 1094785656
23 1 1079 4.0 1094785665
users_activity (pd.DataFrame):
Number of items each user interacted with
Returns:
train_events (pd.Dataframe): pandas DataFrame for training data
test_events (pd.Dataframe): pandas DataFrame for training data
"""
idx_perm = np.random.permutation(users_activity.index.size)
unique_uid = users_activity.index[idx_perm]
n_users = unique_uid.size
test_users = unique_uid[: int(n_users * self.test_prop)]
train_users = unique_uid[int(n_users * self.test_prop) :]
train_events = ratings.loc[ratings["userId"].isin(train_users)]
test_events = ratings.loc[ratings["userId"].isin(test_users)]
return (train_events, test_events)
def _split_by_time(self, ratings):
"""Split the ratings DataFrame into train and test by timestamp
Ratings[timestamp] extreme values used for the filtering interval
Args:
ratings (pd.Dataframe): pandas DataFrame of the following format
userId movieId rating timestamp
20 1 924 3.5 1094785598
19 1 919 3.5 1094785621
86 1 2683 3.5 1094785650
61 1 1584 3.5 1094785656
23 1 1079 4.0 1094785665
Returns:
train_events (pd.Dataframe): pandas DataFrame for training data
test_events (pd.Dataframe): pandas DataFrame for training data
"""
ts = ratings["timestamp"].sort_values()
ts_max = ts.max()
ts_min = ts.min()
ts_split = ts_min + (ts_max - ts_min) * self.test_prop
train_events = ratings[ratings["timestamp"] > ts_split]
test_events = ratings[ratings["timestamp"] <= ts_split]
return (train_events, test_events)
def _fetch_movies(self, split_by="users"):
"""
Fetch data and save in the pytorch format
1. Read the MovieLens20 data from raw archive
2. Parse the rating dataset
3. Split dataset into train and test
4. Build user-item matrix interaction
5. Save in the .pt with torch.save
Args:
split_by (string, optional): the splittage method.
`users` – split by users
`ts` - split by timestamp
Raises:
ValueError: If `split_by` argument is not equal `users` or `ts`
"""
raw_data = self._read_raw_movielens_data()
ratings = raw_data[1]
# TO-DO: add error handling
ratings, users_activity, items_activity = self._parse(ratings)
self.users_activity = users_activity
self.items_activity = items_activity
if split_by == "users":
train_raw, test_raw = self._split_by_users(ratings, users_activity)
if split_by == "ts":
train_raw, test_raw = self._split_by_time(ratings)
if split_by != "users" and split_by != "ts":
raise ValueError("Only splitting by users and ts supported")
train = self._build_interaction_matrix(train_raw)
test = self._build_interaction_matrix(test_raw)
with open(os.path.join(self.processed_folder, self.training_file), "wb") as f:
torch.save(train, f)
with open(os.path.join(self.processed_folder, self.test_file), "wb") as f:
torch.save(test, f)
__all__ = ["MovieLens", "MovieLens20M"]
| MovieLens20M |
python | openai__openai-python | src/openai/types/responses/response_text_delta_event.py | {
"start": 715,
"end": 1374
} | class ____(BaseModel):
content_index: int
"""The index of the content part that the text delta was added to."""
delta: str
"""The text delta that was added."""
item_id: str
"""The ID of the output item that the text delta was added to."""
logprobs: List[Logprob]
"""The log probabilities of the tokens in the delta."""
output_index: int
"""The index of the output item that the text delta was added to."""
sequence_number: int
"""The sequence number for this event."""
type: Literal["response.output_text.delta"]
"""The type of the event. Always `response.output_text.delta`."""
| ResponseTextDeltaEvent |
python | walkccc__LeetCode | solutions/2015. Average Height of Buildings in Each Segment/2015.py | {
"start": 0,
"end": 665
} | class ____:
def averageHeightOfBuildings(self, buildings: list[list[int]]) -> list[list[int]]:
ans = []
events = []
for start, end, height in buildings:
events.append((start, height))
events.append((end, -height))
prev = 0
count = 0
sumHeight = 0
for curr, height in sorted(events):
if sumHeight > 0 and curr > prev:
avgHeight = sumHeight // count
if ans and ans[-1][1] == prev and avgHeight == ans[-1][2]:
ans[-1][1] = curr
else:
ans.append([prev, curr, avgHeight])
sumHeight += height
count += 1 if height > 0 else -1
prev = curr
return ans
| Solution |
python | getsentry__sentry | src/sentry/preprod/pull_request/comment_types.py | {
"start": 3885,
"end": 4175
} | class ____(BaseModel):
"""
Complete comments data for a pull request.
Organizes both general comments and file-specific review comments.
"""
general_comments: list[IssueComment]
file_comments: dict[str, list[ReviewComment]] # Grouped by file path
| PullRequestComments |
python | scrapy__scrapy | tests/test_utils_signal.py | {
"start": 2932,
"end": 3100
} | class ____(TestSendCatchLog):
def _get_result(self, signal, *a, **kw):
return deferred_from_coro(send_catch_log_async(signal, *a, **kw))
| TestSendCatchLogAsync |
python | run-llama__llama_index | llama-index-core/llama_index/core/storage/chat_store/base_db.py | {
"start": 457,
"end": 3069
} | class ____(BaseModel):
"""
Base class for DB-based chat stores.
Meant to implement a FIFO queue to manage short-term memory and
general conversation history.
"""
@abstractmethod
async def get_messages(
self,
key: str,
status: Optional[MessageStatus] = MessageStatus.ACTIVE,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> List[ChatMessage]:
"""
Get all messages for a key with the specified status (async).
Returns a list of messages.
"""
@abstractmethod
async def count_messages(
self,
key: str,
status: Optional[MessageStatus] = MessageStatus.ACTIVE,
) -> int:
"""Count messages for a key with the specified status (async)."""
@abstractmethod
async def add_message(
self,
key: str,
message: ChatMessage,
status: MessageStatus = MessageStatus.ACTIVE,
) -> None:
"""Add a message for a key with the specified status (async)."""
@abstractmethod
async def add_messages(
self,
key: str,
messages: List[ChatMessage],
status: MessageStatus = MessageStatus.ACTIVE,
) -> None:
"""Add a list of messages in batch for the specified key and status (async)."""
@abstractmethod
async def set_messages(
self,
key: str,
messages: List[ChatMessage],
status: MessageStatus = MessageStatus.ACTIVE,
) -> None:
"""Set all messages for a key (replacing existing ones) with the specified status (async)."""
@abstractmethod
async def delete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""Delete a specific message by ID and return it (async)."""
@abstractmethod
async def delete_messages(
self, key: str, status: Optional[MessageStatus] = None
) -> None:
"""Delete all messages for a key with the specified status (async)."""
@abstractmethod
async def delete_oldest_messages(self, key: str, n: int) -> List[ChatMessage]:
"""Delete the oldest n messages for a key and return them (async)."""
@abstractmethod
async def archive_oldest_messages(self, key: str, n: int) -> List[ChatMessage]:
"""Archive the oldest n messages for a key and return them (async)."""
@abstractmethod
async def get_keys(self) -> List[str]:
"""Get all unique keys in the store (async)."""
@classmethod
def class_name(cls) -> str:
"""Return the class name."""
return "AsyncDBChatStore"
| AsyncDBChatStore |
python | ray-project__ray | python/ray/data/_internal/execution/operators/join.py | {
"start": 14812,
"end": 19362
} | class ____(HashShufflingOperatorBase):
def __init__(
self,
data_context: DataContext,
left_input_op: PhysicalOperator,
right_input_op: PhysicalOperator,
left_key_columns: Tuple[str],
right_key_columns: Tuple[str],
join_type: JoinType,
*,
num_partitions: Optional[int] = None,
left_columns_suffix: Optional[str] = None,
right_columns_suffix: Optional[str] = None,
partition_size_hint: Optional[int] = None,
aggregator_ray_remote_args_override: Optional[Dict[str, Any]] = None,
shuffle_aggregation_type: Optional[Type[StatefulShuffleAggregation]] = None,
):
if shuffle_aggregation_type is not None:
if not issubclass(shuffle_aggregation_type, StatefulShuffleAggregation):
raise TypeError(
f"shuffle_aggregation_type must be a subclass of StatefulShuffleAggregation, "
f"got {shuffle_aggregation_type}"
)
aggregation_class = shuffle_aggregation_type or JoiningShuffleAggregation
super().__init__(
name_factory=(
lambda num_partitions: f"Join(num_partitions={num_partitions})"
),
input_ops=[left_input_op, right_input_op],
data_context=data_context,
key_columns=[left_key_columns, right_key_columns],
num_partitions=num_partitions,
partition_size_hint=partition_size_hint,
partition_aggregation_factory=(
lambda aggregator_id, target_partition_ids: aggregation_class(
aggregator_id=aggregator_id,
join_type=join_type,
left_key_col_names=left_key_columns,
right_key_col_names=right_key_columns,
target_partition_ids=target_partition_ids,
data_context=data_context,
left_columns_suffix=left_columns_suffix,
right_columns_suffix=right_columns_suffix,
)
),
aggregator_ray_remote_args_override=aggregator_ray_remote_args_override,
shuffle_progress_bar_name="Shuffle",
finalize_progress_bar_name="Join",
)
def _get_operator_num_cpus_override(self) -> float:
return self.data_context.join_operator_actor_num_cpus_override
@classmethod
def _estimate_aggregator_memory_allocation(
cls,
*,
num_aggregators: int,
num_partitions: int,
estimated_dataset_bytes: int,
) -> int:
partition_byte_size_estimate = math.ceil(
estimated_dataset_bytes / num_partitions
)
# Estimate of object store memory required to accommodate all partitions
# handled by a single aggregator
aggregator_shuffle_object_store_memory_required: int = math.ceil(
estimated_dataset_bytes / num_aggregators
)
# Estimate of memory required to perform actual (in-memory) join
# operation (inclusive of 50% overhead allocated for Pyarrow join
# implementation)
#
# NOTE:
# - 2x due to budgeted 100% overhead of Arrow's in-memory join
join_memory_required: int = math.ceil(partition_byte_size_estimate * 2)
# Estimate of memory required to accommodate single partition as an output
# (inside Object Store)
#
# NOTE: x2 due to 2 sequences involved in joins
output_object_store_memory_required: int = partition_byte_size_estimate
aggregator_total_memory_required: int = (
# Inputs (object store)
aggregator_shuffle_object_store_memory_required
+
# Join (heap)
join_memory_required
+
# Output (object store)
output_object_store_memory_required
)
logger.info(
f"Estimated memory requirement for joining aggregator "
f"(partitions={num_partitions}, "
f"aggregators={num_aggregators}, "
f"dataset (estimate)={estimated_dataset_bytes / GiB:.1f}GiB): "
f"shuffle={aggregator_shuffle_object_store_memory_required / MiB:.1f}MiB, "
f"joining={join_memory_required / MiB:.1f}MiB, "
f"output={output_object_store_memory_required / MiB:.1f}MiB, "
f"total={aggregator_total_memory_required / MiB:.1f}MiB, "
)
return aggregator_total_memory_required
| JoinOperator |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass11.py | {
"start": 234,
"end": 585
} | class ____(metaclass=MetaA):
pass
# This should generate an error because var0 isn't
# accessible via an instance of this class.
ClassA().var0
reveal_type(ClassA.var0, expected_text="int")
ClassA.var0 = 1
reveal_type(ClassA().var1, expected_text="str")
reveal_type(ClassA.var1, expected_text="str")
ClassA.var1 = "hi"
ClassA().var1 = "hi"
| ClassA |
python | qdrant__qdrant-client | qdrant_client/http/api/service_api.py | {
"start": 4085,
"end": 5586
} | class ____(_ServiceApi):
async def healthz(
self,
) -> str:
"""
An endpoint for health checking used in Kubernetes.
"""
return await self._build_for_healthz()
async def livez(
self,
) -> str:
"""
An endpoint for health checking used in Kubernetes.
"""
return await self._build_for_livez()
async def metrics(
self,
anonymize: bool = None,
) -> str:
"""
Collect metrics data including app info, collections info, cluster info and statistics
"""
return await self._build_for_metrics(
anonymize=anonymize,
)
async def readyz(
self,
) -> str:
"""
An endpoint for health checking used in Kubernetes.
"""
return await self._build_for_readyz()
async def root(
self,
) -> m.VersionInfo:
"""
Returns information about the running Qdrant instance like version and commit id
"""
return await self._build_for_root()
async def telemetry(
self,
anonymize: bool = None,
details_level: int = None,
) -> m.InlineResponse2001:
"""
Collect telemetry data including app info, system info, collections info, cluster info, configs and statistics
"""
return await self._build_for_telemetry(
anonymize=anonymize,
details_level=details_level,
)
| AsyncServiceApi |
python | xlwings__xlwings | xlwings/base_classes.py | {
"start": 16526,
"end": 17385
} | class ____:
@property
def api(self):
raise NotImplementedError()
@property
def bold(self):
raise NotImplementedError()
@bold.setter
def bold(self, value):
raise NotImplementedError()
@property
def italic(self):
raise NotImplementedError()
@italic.setter
def italic(self, value):
raise NotImplementedError()
@property
def size(self):
raise NotImplementedError()
@size.setter
def size(self, value):
raise NotImplementedError()
@property
def color(self):
raise NotImplementedError()
@color.setter
def color(self, color_or_rgb):
raise NotImplementedError()
@property
def name(self):
raise NotImplementedError()
@name.setter
def name(self, value):
raise NotImplementedError()
| Font |
python | openai__openai-python | src/openai/types/beta/realtime/response_function_call_arguments_delta_event.py | {
"start": 217,
"end": 793
} | class ____(BaseModel):
call_id: str
"""The ID of the function call."""
delta: str
"""The arguments delta as a JSON string."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the function call item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
type: Literal["response.function_call_arguments.delta"]
"""The event type, must be `response.function_call_arguments.delta`."""
| ResponseFunctionCallArgumentsDeltaEvent |
python | great-expectations__great_expectations | great_expectations/data_context/data_context/cloud_data_context.py | {
"start": 3906,
"end": 33109
} | class ____(SerializableDataContext):
"""Subclass of AbstractDataContext that contains functionality necessary to work in a GX Cloud-backed environment.""" # noqa: E501 # FIXME CoP
def __init__( # noqa: PLR0913 # FIXME CoP
self,
project_config: Optional[Union[DataContextConfig, Mapping]] = None,
context_root_dir: Optional[PathStr] = None,
project_root_dir: Optional[PathStr] = None,
runtime_environment: Optional[dict] = None,
cloud_base_url: Optional[str] = None,
cloud_access_token: Optional[str] = None,
cloud_organization_id: Optional[str] = None,
cloud_workspace_id: Optional[str] = None,
user_agent_str: Optional[str] = None,
) -> None:
"""
CloudDataContext constructor
Args:
project_config (DataContextConfig): config for CloudDataContext
runtime_environment (dict): a dictionary of config variables that override both those set in
config_variables.yml and the environment
cloud_config (GXCloudConfig): GXCloudConfig corresponding to current CloudDataContext
""" # noqa: E501 # FIXME CoP
self._check_if_latest_version()
self._cloud_user_info: CloudUserInfo | None = None
# We get the cloud_config based on based on passed in parameters or env variables.
self._cloud_config = CloudDataContext.get_cloud_config(
cloud_base_url=cloud_base_url,
cloud_access_token=cloud_access_token,
cloud_organization_id=cloud_organization_id,
cloud_workspace_id=cloud_workspace_id,
)
# The workspace id is not required to be passed in or be in env variable. If we don't have
# it, we try to infer it and set it.
if not self._cloud_config.workspace_id:
warnings.warn(
"Workspace id is not set when instantiating a CloudDataContext. "
f"Please set {GXCloudEnvironmentVariable.WORKSPACE_ID.value} or set it when "
"instantiating the context."
)
if len(self.cloud_user_info().workspaces) == 1:
self._cloud_config.workspace_id = self.cloud_user_info().workspaces[0].id
else:
raise WorkspaceNotSetError()
self._context_root_directory = self.determine_context_root_directory(
context_root_dir=context_root_dir,
project_root_dir=project_root_dir,
)
self._project_config = self._init_project_config(project_config)
# The DataAssetStore is relevant only for CloudDataContexts and is not an explicit part of the project config. # noqa: E501 # FIXME CoP
# As such, it must be instantiated separately.
self._data_asset_store = self._init_data_asset_store()
super().__init__(
context_root_dir=self._context_root_directory,
runtime_environment=runtime_environment,
user_agent_str=user_agent_str,
)
def _check_if_latest_version(self) -> None:
checker = _VersionChecker(__version__)
checker.check_if_using_latest_gx()
@property
@override
def mode(self) -> Literal["cloud"]:
return "cloud"
def _get_cloud_user_info(self) -> CloudUserInfo:
response = self._request_cloud_backend(
cloud_config=self.ge_cloud_config, resource=GXCloudRESTResource.ACCOUNTS_ME
)
data = response.json()
user_id = data.get("user_id") or data.get("id")
if not user_id:
raise NoUserIdError()
response_workspaces = data.get("workspaces", [])
workspaces = [
Workspace(id=response_workspace["id"], role=response_workspace["role"])
for response_workspace in response_workspaces
]
return CloudUserInfo(user_id=uuid.UUID(user_id), workspaces=workspaces)
def cloud_user_info(self, force_refresh: bool = False) -> CloudUserInfo:
if self._cloud_user_info is None or force_refresh:
self._cloud_user_info = self._get_cloud_user_info()
return self._cloud_user_info
@override
def _init_project_config(
self, project_config: Optional[Union[DataContextConfig, Mapping]]
) -> DataContextConfig:
if project_config is None:
project_config = self.retrieve_data_context_config_from_cloud(
cloud_config=self.ge_cloud_config,
)
return CloudDataContext.get_or_create_data_context_config(project_config)
@override
def _register_providers(self, config_provider: _ConfigurationProvider) -> None:
"""
To ensure that Cloud credentials are accessible downstream, we want to ensure that
we register a CloudConfigurationProvider.
Note that it is registered last as it takes the highest precedence.
"""
super()._register_providers(config_provider)
config_provider.register_provider(_CloudConfigurationProvider(self.ge_cloud_config))
@classmethod
def is_cloud_config_available(
cls,
cloud_base_url: Optional[str] = None,
cloud_access_token: Optional[str] = None,
cloud_organization_id: Optional[str] = None,
cloud_workspace_id: Optional[str] = None,
) -> bool:
"""
Helper method called by gx.get_context() method to determine whether all the information needed
to build a cloud_config is available.
If provided as explicit arguments, cloud_base_url, cloud_access_token and
cloud_organization_id will use runtime values instead of environment variables or conf files.
If any of the values are missing but workspace id, the method will return False.
It will return True otherwise.
Args:
cloud_base_url: Optional, you may provide this alternatively via
environment variable GX_CLOUD_BASE_URL or within a config file.
cloud_access_token: Optional, you may provide this alternatively
via environment variable GX_CLOUD_ACCESS_TOKEN or within a config file.
cloud_organization_id: Optional, you may provide this alternatively
via environment variable GX_CLOUD_ORGANIZATION_ID or within a config file.
cloud_workspace_id: Optional, you may provide this alternatively
via environment variable GX_CLOUD_WORKSPACE_ID or within a config file.
Returns:
bool: Is all the information needed to build a cloud_config is available?
""" # noqa: E501 # FIXME CoP
cloud_config_dict = cls._get_cloud_config_dict(
cloud_base_url=cloud_base_url,
cloud_access_token=cloud_access_token,
cloud_organization_id=cloud_organization_id,
cloud_workspace_id=cloud_workspace_id,
)
return all((v for k, v in cloud_config_dict.items() if k not in OPTIONAL_CLOUD_CONFIG_KEYS))
@classmethod
def determine_context_root_directory(
cls,
context_root_dir: Optional[PathStr],
project_root_dir: Optional[PathStr],
) -> str:
context_root_dir = cls._resolve_context_root_dir_and_project_root_dir(
context_root_dir=context_root_dir, project_root_dir=project_root_dir
)
if context_root_dir is None:
context_root_dir = os.getcwd() # noqa: PTH109 # FIXME CoP
logger.debug(
f'context_root_dir was not provided - defaulting to current working directory "'
f'{context_root_dir}".'
)
return os.path.abspath( # noqa: PTH100 # FIXME CoP
os.path.expanduser(context_root_dir) # noqa: PTH111 # FIXME CoP
)
@classmethod
def retrieve_data_context_config_from_cloud(
cls, cloud_config: GXCloudConfig
) -> DataContextConfig:
"""
Utilizes the GXCloudConfig instantiated in the constructor to create a request to the Cloud API.
Given proper authorization, the request retrieves a data context config that is pre-populated with
GX objects specific to the user's Cloud environment (datasources, data connectors, etc).
Please note that substitution for ${VAR} variables is performed in GX Cloud before being sent
over the wire.
:return: the configuration object retrieved from the Cloud API
""" # noqa: E501 # FIXME CoP
response = cls._request_cloud_backend(
cloud_config=cloud_config, resource=GXCloudRESTResource.DATA_CONTEXT
)
config = cls._prepare_v1_config(config=response.json())
return DataContextConfig(**config)
@classmethod
def _prepare_v1_config(cls, config: dict) -> dict:
# FluentDatasources are nested under the "datasources" key and need to be separated
# to prevent downstream issues
# This should be done before datasources are popped from the config below until
# fluent_datasourcse are renamed datasourcess ()
v1_data_sources = config.pop("data_sources", [])
config["fluent_datasources"] = {ds["name"]: ds for ds in v1_data_sources}
# Various context variables are no longer top-level keys in V1
for var in (
"datasources",
"notebooks",
"concurrency",
"include_rendered_content",
"profiler_store_name",
"anonymous_usage_statistics",
"evaluation_parameter_store_name",
"suite_parameter_store_name",
):
val = config.pop(var, None)
if val:
logger.info(f"Removed {var} from DataContextConfig while preparing V1 config")
# V1 renamed Validations to ValidationResults
# so this is a temporary patch until Cloud implements a V1 endpoint for DataContextConfig
cls._change_key_from_v0_to_v1(
config,
"validations_store_name",
DataContextVariableSchema.VALIDATIONS_STORE_NAME,
)
config = cls._prepare_stores_config(config=config)
return config
@classmethod
def _prepare_stores_config(cls, config) -> dict:
stores = config.get("stores")
if not stores:
return config
to_delete: list[str] = []
for name, store in stores.items():
# Certain stores have been renamed in V1
cls._change_value_from_v0_to_v1(
store, "class_name", "ValidationsStore", ValidationResultsStore.__name__
)
# Profiler stores are no longer supported in V1
if store.get("class_name") in [
"ProfilerStore",
"EvaluationParameterStore",
"SuiteParameterStore",
]:
to_delete.append(name)
for name in to_delete:
config["stores"].pop(name)
return config
@staticmethod
def _change_key_from_v0_to_v1(config: dict, v0_key: str, v1_key: str) -> Optional[dict]:
"""Update the key if we have a V0 key and no V1 key in the config.
Mutates the config object and returns the value that was renamed
"""
value = config.pop(v0_key, None)
if value and v1_key not in config:
config[v1_key] = value
return config.get(v1_key)
@staticmethod
def _change_value_from_v0_to_v1(config: dict, key: str, v0_value: str, v1_value: str) -> dict:
if config.get(key) == v0_value:
config[key] = v1_value
return config
@classmethod
def _request_cloud_backend(cls, cloud_config: GXCloudConfig, resource: str) -> Response:
access_token = cloud_config.access_token
base_url = cloud_config.base_url
organization_id = cloud_config.organization_id
workspace_id = cloud_config.workspace_id
if not organization_id:
raise OrganizationIdNotSpecifiedError()
with create_session(access_token=access_token) as session:
if resource == GXCloudRESTResource.ACCOUNTS_ME:
url_workspace_id = None
else:
url_workspace_id = workspace_id
url = GXCloudStoreBackend.construct_versioned_url(
base_url=base_url,
organization_id=organization_id,
resource_name=resource,
workspace_id=url_workspace_id,
)
response = session.get(url)
try:
response.raise_for_status()
except HTTPError:
raise gx_exceptions.GXCloudError( # noqa: TRY003 # FIXME CoP
f"Bad request made to GX Cloud; {response.text}", response=response
)
return response
@classmethod
def get_cloud_config(
cls,
cloud_base_url: Optional[str] = None,
cloud_access_token: Optional[str] = None,
cloud_organization_id: Optional[str] = None,
cloud_workspace_id: Optional[str] = None,
) -> GXCloudConfig:
"""
Build a GXCloudConfig object. Config attributes are collected from any combination of args passed in at
runtime, environment variables, or a global great_expectations.conf file (in order of precedence).
If provided as explicit arguments, cloud_base_url, cloud_access_token and
cloud_organization_id will use runtime values instead of environment variables or conf files.
Args:
cloud_base_url: Optional, you may provide this alternatively via
environment variable GX_CLOUD_BASE_URL or within a config file.
cloud_access_token: Optional, you may provide this alternatively
via environment variable GX_CLOUD_ACCESS_TOKEN or within a config file.
cloud_organization_id: Optional, you may provide this alternatively
via environment variable GX_CLOUD_ORGANIZATION_ID or within a config file.
cloud_workspace_id: Optional, you may provide this alternatively
via environment variable GX_CLOUD_WORKSPACE_ID or within a config file.
Returns:
GXCloudConfig
Raises:
GXCloudError if a GX Cloud variable is missing
""" # noqa: E501 # FIXME CoP
cloud_config_dict = cls._get_cloud_config_dict(
cloud_base_url=cloud_base_url,
cloud_access_token=cloud_access_token,
cloud_organization_id=cloud_organization_id,
cloud_workspace_id=cloud_workspace_id,
)
missing_keys = []
for key, val in cloud_config_dict.items():
if key not in OPTIONAL_CLOUD_CONFIG_KEYS and not val:
missing_keys.append(key)
if len(missing_keys) > 0:
missing_keys_str = [f'"{key}"' for key in missing_keys]
global_config_path_str = [f'"{path}"' for path in super().GLOBAL_CONFIG_PATHS]
raise DataContextError( # noqa: TRY003 # FIXME CoP
f"{(', ').join(missing_keys_str)} arg(s) required for ge_cloud_mode but neither provided nor found in " # noqa: E501 # FIXME CoP
f"environment or in global configs ({(', ').join(global_config_path_str)})."
)
base_url = cloud_config_dict[GXCloudEnvironmentVariable.BASE_URL]
assert base_url is not None
access_token = cloud_config_dict[GXCloudEnvironmentVariable.ACCESS_TOKEN]
organization_id = cloud_config_dict[GXCloudEnvironmentVariable.ORGANIZATION_ID]
workspace_id = cloud_config_dict[GXCloudEnvironmentVariable.WORKSPACE_ID]
return GXCloudConfig(
base_url=base_url,
access_token=access_token,
organization_id=organization_id,
workspace_id=workspace_id,
)
@classmethod
def _get_cloud_config_dict(
cls,
cloud_base_url: Optional[str] = None,
cloud_access_token: Optional[str] = None,
cloud_organization_id: Optional[str] = None,
cloud_workspace_id: Optional[str] = None,
) -> Dict[GXCloudEnvironmentVariable, Optional[str]]:
cloud_base_url = (
cloud_base_url
or cls._get_global_config_value(
environment_variable=GXCloudEnvironmentVariable.BASE_URL,
conf_file_section="ge_cloud_config",
conf_file_option="base_url",
)
or CLOUD_DEFAULT_BASE_URL
)
cloud_organization_id = cloud_organization_id or cls._get_global_config_value(
environment_variable=GXCloudEnvironmentVariable.ORGANIZATION_ID,
conf_file_section="ge_cloud_config",
conf_file_option="organization_id",
)
cloud_access_token = cloud_access_token or cls._get_global_config_value(
environment_variable=GXCloudEnvironmentVariable.ACCESS_TOKEN,
conf_file_section="ge_cloud_config",
conf_file_option="access_token",
)
cloud_workspace_id = cloud_workspace_id or cls._get_global_config_value(
environment_variable=GXCloudEnvironmentVariable.WORKSPACE_ID,
conf_file_section="ge_cloud_config",
conf_file_option="workspace_id",
)
return {
GXCloudEnvironmentVariable.BASE_URL: cloud_base_url,
GXCloudEnvironmentVariable.ORGANIZATION_ID: cloud_organization_id,
GXCloudEnvironmentVariable.ACCESS_TOKEN: cloud_access_token,
GXCloudEnvironmentVariable.WORKSPACE_ID: cloud_workspace_id,
}
@override
def _init_datasources(self) -> None:
# Note that Cloud does NOT populate self._datasources with existing objects on init.
# Objects are retrieved only when requested and are NOT cached (this differs in ephemeral/file-backed contexts). # noqa: E501 # FIXME CoP
self._datasources = DatasourceDict(
context=self,
datasource_store=self._datasource_store,
)
@override
def _init_primary_stores(self, store_configs: Dict[str, StoreConfigTypedDict]) -> None:
for store_config in store_configs.values():
store_config.get("store_backend", {}).get("ge_cloud_credentials", {}).setdefault(
"workspace_id", self.ge_cloud_config.workspace_id
)
super()._init_primary_stores(store_configs)
@override
def _init_datasource_store(self) -> DatasourceStore:
# Never explicitly referenced but adheres
# to the convention set by other internal Stores
store_name = DataContextConfigDefaults.DEFAULT_DATASOURCE_STORE_NAME.value
store_backend: dict = {"class_name": GXCloudStoreBackend.__name__}
runtime_environment: dict = {
"root_directory": self.root_directory,
"ge_cloud_credentials": self.ge_cloud_config.to_dict(),
"ge_cloud_resource_type": GXCloudRESTResource.DATASOURCE,
"ge_cloud_base_url": self.ge_cloud_config.base_url,
}
datasource_store = DatasourceStore(
store_name=store_name,
store_backend=store_backend,
runtime_environment=runtime_environment,
)
return datasource_store
def _init_data_asset_store(self) -> DataAssetStore:
# Never explicitly referenced but adheres
# to the convention set by other internal Stores
store_name = DataContextConfigDefaults.DEFAULT_DATA_ASSET_STORE_NAME.value
store_backend: dict = {"class_name": GXCloudStoreBackend.__name__}
runtime_environment: dict = {
"root_directory": self.root_directory,
"ge_cloud_credentials": self.ge_cloud_config.to_dict(),
"ge_cloud_resource_type": GXCloudRESTResource.DATA_ASSET,
"ge_cloud_base_url": self.ge_cloud_config.base_url,
}
data_asset_store = DataAssetStore(
store_name=store_name,
store_backend=store_backend,
runtime_environment=runtime_environment,
serializer=JsonConfigSerializer(schema=assetConfigSchema),
)
return data_asset_store
def _delete_asset(self, id: str) -> bool:
"""Delete a DataAsset. Cloud will also update the corresponding Datasource."""
key = GXCloudIdentifier(
resource_type=GXCloudRESTResource.DATA_ASSET,
id=id,
)
return self._data_asset_store.remove_key(key)
@property
def ge_cloud_config(self) -> GXCloudConfig:
return self._cloud_config
@override
@property
def _include_rendered_content(self) -> bool:
# Cloud contexts always want rendered content
return True
@override
def _init_variables(self) -> CloudDataContextVariables:
ge_cloud_base_url: str = self.ge_cloud_config.base_url
if not self.ge_cloud_config.organization_id or not self.ge_cloud_config.workspace_id:
raise GXCloudConfigError(missing_keys=["organization_id", "workspace_id"])
ge_cloud_organization_id: str = self.ge_cloud_config.organization_id
ge_cloud_workspace_id: str = self.ge_cloud_config.workspace_id
ge_cloud_access_token: str = self.ge_cloud_config.access_token
variables = CloudDataContextVariables(
config=self._project_config,
config_provider=self.config_provider,
ge_cloud_base_url=ge_cloud_base_url,
ge_cloud_organization_id=ge_cloud_organization_id,
ge_cloud_workspace_id=ge_cloud_workspace_id,
ge_cloud_access_token=ge_cloud_access_token,
)
return variables
@override
def _construct_data_context_id(self) -> uuid.UUID | None:
"""
Choose the id of the currently-configured expectations store, if available and a persistent store.
If not, it should choose the id stored in DataContextConfig.
Returns:
UUID to use as the data_context_id
""" # noqa: E501 # FIXME CoP
org_id = self.ge_cloud_config.organization_id
if org_id:
return uuid.UUID(org_id)
return None
@override
def get_config_with_variables_substituted(
self, config: Optional[DataContextConfig] = None
) -> DataContextConfig:
"""
Substitute vars in config of form ${var} or $(var) with values found in the following places,
in order of precedence: cloud_config (for Data Contexts in GX Cloud mode), runtime_environment,
environment variables, config_variables, or ge_cloud_config_variable_defaults (allows certain variables to
be optional in GX Cloud mode).
""" # noqa: E501 # FIXME CoP
if not config:
config = self.config
substitutions: dict = self.config_provider.get_values()
cloud_config_variable_defaults = {
"plugins_directory": self._normalize_absolute_or_relative_path(
path=DataContextConfigDefaults.DEFAULT_PLUGINS_DIRECTORY.value
),
}
missing_config_vars_and_subs: list[tuple[str, str]] = []
for config_variable, value in cloud_config_variable_defaults.items():
if substitutions.get(config_variable) is None:
substitutions[config_variable] = value
missing_config_vars_and_subs.append((config_variable, value))
if missing_config_vars_and_subs:
missing_config_var_repr = ", ".join(
[f"{var}={sub}" for var, sub in missing_config_vars_and_subs]
)
logger.info(
"Config variables were not found in environment or global config ("
f"{self.GLOBAL_CONFIG_PATHS}). Using default values instead. {missing_config_var_repr} ;" # noqa: E501 # FIXME CoP
" If you would like to "
"use a different value, please specify it in an environment variable or in a "
"great_expectations.conf file located at one of the above paths, in a section named " # noqa: E501 # FIXME CoP
'"ge_cloud_config".'
)
return DataContextConfig(**self.config_provider.substitute_config(config))
@override
def _init_site_builder_for_data_docs_site_creation(
self, site_name: str, site_config: dict
) -> SiteBuilder:
"""
Note that this explicitly overriding the `AbstractDataContext` helper method called
in `self.build_data_docs()`.
The only difference here is the inclusion of `ge_cloud_mode` in the `runtime_environment`
used in `SiteBuilder` instantiation.
"""
site_builder: SiteBuilder = instantiate_class_from_config(
config=site_config,
runtime_environment={
"data_context": self,
"root_directory": self.root_directory,
"site_name": site_name,
"cloud_mode": True,
},
config_defaults={
"class_name": "SiteBuilder",
"module_name": "great_expectations.render.renderer.site_builder",
},
)
return site_builder
@classmethod
def _load_cloud_backed_project_config(
cls,
cloud_config: Optional[GXCloudConfig],
):
assert cloud_config is not None
config = cls.retrieve_data_context_config_from_cloud(cloud_config=cloud_config)
return config
@override
def _save_project_config(self) -> None:
"""
See parent 'AbstractDataContext._save_project_config()` for more information.
Explicitly override base class implementation to retain legacy behavior.
"""
logger.debug(
"CloudDataContext._save_project_config() was called. Base class impl was override to be no-op to retain " # noqa: E501 # FIXME CoP
"legacy behavior."
)
@override
def _view_validation_result(self, result: CheckpointResult) -> None:
for validation_result in result.run_results.values():
url = validation_result.result_url
if url:
self._open_url_in_browser(url)
@override
def _add_datasource(
self,
name: str | None = None,
initialize: bool = True,
datasource: FluentDatasource | None = None,
**kwargs,
) -> FluentDatasource | None:
return super()._add_datasource(
name=name,
initialize=initialize,
datasource=datasource,
**kwargs,
)
@override
def prepare_checkpoint_run(
self,
checkpoint: Checkpoint,
batch_parameters: Dict[str, Any],
expectation_parameters: SuiteParameterDict,
) -> None:
"""CloudContext specific preparation for a checkpoint run.
Actualizes windowed parameters by updating expectation_parameters in place.
"""
if not self._checkpoint_has_windowed_expectations(checkpoint):
return
base_url = self.ge_cloud_config.base_url
org_id = self.ge_cloud_config.organization_id
workspace_id = self.ge_cloud_config.workspace_id
if workspace_id:
expectation_parameters_url = urljoin(
base=base_url,
url=f"/api/v1/organizations/{org_id}/workspaces/{workspace_id}/checkpoints/{checkpoint.id}/expectation-parameters",
)
else:
expectation_parameters_url = urljoin(
base=base_url,
url=f"/api/v1/organizations/{org_id}/checkpoints/{checkpoint.id}/expectation-parameters",
)
with create_session(access_token=self.ge_cloud_config.access_token) as session:
response = session.get(url=expectation_parameters_url)
if not response.ok:
raise gx_exceptions.GXCloudError(
message="Unable to retrieve expectation_parameters for Checkpoint with "
f"ID={checkpoint.id}.",
response=response,
)
data = response.json()
try:
overlapping_keys = set(expectation_parameters.keys()) & set(
data["data"]["expectation_parameters"].keys()
)
if overlapping_keys:
logger.warning(
"Passed in expectation_parameters also found in GX Cloud. Overwriting "
f"passed in values with GX Cloud values for keys: {overlapping_keys}"
)
expectation_parameters.update(data["data"]["expectation_parameters"])
except KeyError as e:
raise gx_exceptions.GXCloudError(
message="Malformed expectation_parameters response received from GX Cloud",
response=response,
) from e
def _checkpoint_has_windowed_expectations(self, checkpoint: Checkpoint) -> bool:
# Check if we have a windowed parameter
for validation_def in checkpoint.validation_definitions:
for expectation in validation_def.suite.expectations:
if expectation.windows is not None:
return True
return False
| CloudDataContext |
python | huggingface__transformers | src/transformers/pipelines/text_to_audio.py | {
"start": 1387,
"end": 12139
} | class ____(Pipeline):
"""
Text-to-audio generation pipeline using any `AutoModelForTextToWaveform` or `AutoModelForTextToSpectrogram`. This
pipeline generates an audio file from an input text and optional other conditional inputs.
Unless the model you're using explicitly sets these generation parameters in its configuration files
(`generation_config.json`), the following default values will be used:
- max_new_tokens: 256
Example:
```python
>>> from transformers import pipeline
>>> pipe = pipeline(model="suno/bark-small")
>>> output = pipe("Hey it's HuggingFace on the phone!")
>>> audio = output["audio"]
>>> sampling_rate = output["sampling_rate"]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
<Tip>
You can specify parameters passed to the model by using [`TextToAudioPipeline.__call__.forward_params`] or
[`TextToAudioPipeline.__call__.generate_kwargs`].
Example:
```python
>>> from transformers import pipeline
>>> music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small")
>>> # diversify the music generation by adding randomness with a high temperature and set a maximum music length
>>> generate_kwargs = {
... "do_sample": True,
... "temperature": 0.7,
... "max_new_tokens": 35,
... }
>>> outputs = music_generator("Techno music with high melodic riffs", generate_kwargs=generate_kwargs)
```
</Tip>
This pipeline can currently be loaded from [`pipeline`] using the following task identifiers: `"text-to-speech"` or
`"text-to-audio"`.
See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=text-to-speech).
"""
_pipeline_calls_generate = True
_load_processor = None # prioritize processors as some models require it
_load_image_processor = False
_load_feature_extractor = False
_load_tokenizer = True
# Make sure the docstring is updated when the default generation config is changed
_default_generation_config = GenerationConfig(
max_new_tokens=256,
)
def __init__(self, *args, vocoder=None, sampling_rate=None, **kwargs):
super().__init__(*args, **kwargs)
self.vocoder = None
if self.model.__class__ in MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING.values():
self.vocoder = (
SpeechT5HifiGan.from_pretrained(DEFAULT_VOCODER_ID).to(self.model.device)
if vocoder is None
else vocoder
)
if self.model.config.model_type in ["musicgen"]:
# MusicGen expect to use the tokenizer
self.processor = None
self.sampling_rate = sampling_rate
if self.vocoder is not None:
self.sampling_rate = self.vocoder.config.sampling_rate
if self.sampling_rate is None:
# get sampling_rate from config and generation config
config = self.model.config
gen_config = self.model.__dict__.get("generation_config", None)
if gen_config is not None:
config.update(gen_config.to_dict())
for sampling_rate_name in ["sample_rate", "sampling_rate"]:
sampling_rate = getattr(config, sampling_rate_name, None)
if sampling_rate is not None:
self.sampling_rate = sampling_rate
elif getattr(config, "codec_config", None) is not None:
sampling_rate = getattr(config.codec_config, sampling_rate_name, None)
if sampling_rate is not None:
self.sampling_rate = sampling_rate
# last fallback to get the sampling rate based on processor
if self.sampling_rate is None and self.processor is not None and hasattr(self.processor, "feature_extractor"):
self.sampling_rate = self.processor.feature_extractor.sampling_rate
def preprocess(self, text, **kwargs):
if isinstance(text, str):
text = [text]
if self.model.config.model_type == "bark":
# bark Tokenizer is called with BarkProcessor which uses those kwargs
new_kwargs = {
"max_length": self.generation_config.semantic_config.get("max_input_semantic_length", 256),
"add_special_tokens": False,
"return_attention_mask": True,
"return_token_type_ids": False,
}
# priority is given to kwargs
new_kwargs.update(kwargs)
kwargs = new_kwargs
preprocessor = self.processor if self.processor is not None else self.tokenizer
if isinstance(text, Chat):
output = preprocessor.apply_chat_template(
text.messages,
tokenize=True,
return_dict=True,
**kwargs,
)
else:
output = preprocessor(text, **kwargs, return_tensors="pt")
return output
def _forward(self, model_inputs, **kwargs):
# we expect some kwargs to be additional tensors which need to be on the right device
kwargs = self._ensure_tensor_on_device(kwargs, device=self.device)
forward_params = kwargs["forward_params"]
generate_kwargs = kwargs["generate_kwargs"]
if self.model.can_generate():
# we expect some kwargs to be additional tensors which need to be on the right device
generate_kwargs = self._ensure_tensor_on_device(generate_kwargs, device=self.device)
# User-defined `generation_config` passed to the pipeline call take precedence
if "generation_config" not in generate_kwargs:
generate_kwargs["generation_config"] = self.generation_config
# generate_kwargs get priority over forward_params
forward_params.update(generate_kwargs)
# ensure dict output to facilitate postprocessing
forward_params.update({"return_dict_in_generate": True})
output = self.model.generate(**model_inputs, **forward_params)
else:
if len(generate_kwargs):
raise ValueError(
"You're using the `TextToAudioPipeline` with a forward-only model, but `generate_kwargs` is non "
"empty. For forward-only TTA models, please use `forward_params` instead of `generate_kwargs`. "
f"For reference, the `generate_kwargs` used here are: {generate_kwargs.keys()}"
)
output = self.model(**model_inputs, **forward_params)[0]
if self.vocoder is not None:
# in that case, the output is a spectrogram that needs to be converted into a waveform
output = self.vocoder(output)
return output
@overload
def __call__(self, text_inputs: str, **forward_params: Any) -> AudioOutput: ...
@overload
def __call__(self, text_inputs: list[str], **forward_params: Any) -> list[AudioOutput]: ...
@overload
def __call__(self, text_inputs: ChatType, **forward_params: Any) -> AudioOutput: ...
@overload
def __call__(self, text_inputs: list[ChatType], **forward_params: Any) -> list[AudioOutput]: ...
def __call__(self, text_inputs, **forward_params):
"""
Generates speech/audio from the inputs. See the [`TextToAudioPipeline`] documentation for more information.
Args:
text_inputs (`str`, `list[str]`, `ChatType`, or `list[ChatType]`):
One or several texts to generate. If strings or a list of string are passed, this pipeline will
generate the corresponding text. Alternatively, a "chat", in the form of a list of dicts with "role"
and "content" keys, can be passed, or a list of such chats. When chats are passed, the model's chat
template will be used to format them before passing them to the model.
forward_params (`dict`, *optional*):
Parameters passed to the model generation/forward method. `forward_params` are always passed to the
underlying model.
generate_kwargs (`dict`, *optional*):
The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a
complete overview of generate, check the [following
guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation). `generate_kwargs` are
only passed to the underlying model if the latter is a generative model.
Return:
`AudioOutput` or a list of `AudioOutput`, which is a `TypedDict` with two keys:
- **audio** (`np.ndarray` of shape `(nb_channels, audio_length)`) -- The generated audio waveform.
- **sampling_rate** (`int`) -- The sampling rate of the generated audio waveform.
"""
return super().__call__(text_inputs, **forward_params)
def _sanitize_parameters(
self,
preprocess_params=None,
forward_params=None,
generate_kwargs=None,
):
if getattr(self, "assistant_model", None) is not None:
generate_kwargs["assistant_model"] = self.assistant_model
if getattr(self, "assistant_tokenizer", None) is not None:
generate_kwargs["tokenizer"] = self.tokenizer
generate_kwargs["assistant_tokenizer"] = self.assistant_tokenizer
params = {
"forward_params": forward_params if forward_params else {},
"generate_kwargs": generate_kwargs if generate_kwargs else {},
}
if preprocess_params is None:
preprocess_params = {}
postprocess_params = {}
return preprocess_params, params, postprocess_params
def postprocess(self, audio):
needs_decoding = False
if isinstance(audio, dict):
if "audio" in audio:
audio = audio["audio"]
else:
needs_decoding = True
audio = audio["sequences"]
elif isinstance(audio, tuple):
audio = audio[0]
if needs_decoding and self.processor is not None:
audio = self.processor.decode(audio)
if isinstance(audio, list):
audio = [el.to(device="cpu", dtype=torch.float).numpy().squeeze() for el in audio]
audio = audio if len(audio) > 1 else audio[0]
else:
audio = audio.to(device="cpu", dtype=torch.float).numpy().squeeze()
return AudioOutput(
audio=audio,
sampling_rate=self.sampling_rate,
)
| TextToAudioPipeline |
python | pandas-dev__pandas | asv_bench/benchmarks/index_object.py | {
"start": 4163,
"end": 5581
} | class ____:
params = ["String", "Float", "Int"]
param_names = ["dtype"]
def setup(self, dtype):
N = 10**6
if dtype == "String":
self.idx = Index([f"i-{i}" for i in range(N)], dtype=object)
elif dtype == "Float":
self.idx = Index(np.arange(N), dtype=np.float64)
elif dtype == "Int":
self.idx = Index(np.arange(N), dtype=np.int64)
self.array_mask = (np.arange(N) % 3) == 0
self.series_mask = Series(self.array_mask)
self.sorted = self.idx.sort_values()
half = N // 2
self.non_unique = self.idx[:half].append(self.idx[:half])
self.non_unique_sorted = self.sorted[:half].repeat(2)
self.key = self.sorted[N // 4]
def time_boolean_array(self, dtype):
self.idx[self.array_mask]
def time_boolean_series(self, dtype):
self.idx[self.series_mask]
def time_get(self, dtype):
self.idx[1]
def time_slice(self, dtype):
self.idx[:-1]
def time_slice_step(self, dtype):
self.idx[::2]
def time_get_loc(self, dtype):
self.idx.get_loc(self.key)
def time_get_loc_sorted(self, dtype):
self.sorted.get_loc(self.key)
def time_get_loc_non_unique(self, dtype):
self.non_unique.get_loc(self.key)
def time_get_loc_non_unique_sorted(self, dtype):
self.non_unique_sorted.get_loc(self.key)
| Indexing |
python | pytest-dev__pytest | testing/test_terminal.py | {
"start": 67825,
"end": 69547
} | class ____:
"""Ensure classic output style works as expected (#3883)"""
@pytest.fixture
def test_files(self, pytester: Pytester) -> None:
pytester.makepyfile(
**{
"test_one.py": "def test_one(): pass",
"test_two.py": "def test_two(): assert 0",
"sub/test_three.py": """
def test_three_1(): pass
def test_three_2(): assert 0
def test_three_3(): pass
""",
}
)
def test_normal_verbosity(self, pytester: Pytester, test_files) -> None:
result = pytester.runpytest("-o", "console_output_style=classic")
result.stdout.fnmatch_lines(
[
f"sub{os.sep}test_three.py .F.",
"test_one.py .",
"test_two.py F",
"*2 failed, 3 passed in*",
]
)
def test_verbose(self, pytester: Pytester, test_files) -> None:
result = pytester.runpytest("-o", "console_output_style=classic", "-v")
result.stdout.fnmatch_lines(
[
f"sub{os.sep}test_three.py::test_three_1 PASSED",
f"sub{os.sep}test_three.py::test_three_2 FAILED",
f"sub{os.sep}test_three.py::test_three_3 PASSED",
"test_one.py::test_one PASSED",
"test_two.py::test_two FAILED",
"*2 failed, 3 passed in*",
]
)
def test_quiet(self, pytester: Pytester, test_files) -> None:
result = pytester.runpytest("-o", "console_output_style=classic", "-q")
result.stdout.fnmatch_lines([".F..F", "*2 failed, 3 passed in*"])
| TestClassicOutputStyle |
python | kamyu104__LeetCode-Solutions | Python/split-two-strings-to-make-palindrome.py | {
"start": 29,
"end": 687
} | class ____(object):
def checkPalindromeFormation(self, a, b):
"""
:type a: str
:type b: str
:rtype: bool
"""
def is_palindrome(s, i, j):
while i < j:
if s[i] != s[j]:
return False
i += 1
j -= 1
return True
def check(a, b):
i, j = 0, len(b)-1
while i < j:
if a[i] != b[j]:
return is_palindrome(a, i, j) or is_palindrome(b, i, j)
i += 1
j -= 1
return True
return check(a, b) or check(b, a)
| Solution |
python | django-crispy-forms__django-crispy-forms | crispy_forms/bootstrap.py | {
"start": 27918,
"end": 30366
} | class ____(ContainerHolder):
"""
Accordion menu object. It wraps `AccordionGroup` objects in a container
Attributes
----------
template : str
The default template which this Layout Object will be rendered
with.
css_class : str, optional
CSS classes to be applied to the ``<div>``. By default None.
Parameters
----------
*accordion_groups : str, LayoutObject
Any number of layout objects as positional arguments to be rendered
within the ``<div>``.
css_id : str, optional
A DOM id for the layout object which will be added to the ``<div>`` if
provided. By default None.
css_class : str, optional
Additional CSS classes to be applied in addition to those declared by
the class itself. By default None.
template : str, optional
Overrides the default template, if provided. By default None.
**kwargs : dict, optional
Additional attributes are passed to ``flatatt`` and converted into
key="value", pairs. These attributes are added to the ``<div>``.
Examples
--------
Example::
Accordion(
AccordionGroup("group name", "form_field_1", "form_field_2"),
AccordionGroup("another group name", "form_field")
)
"""
template = "%s/accordion.html"
def __init__(self, *accordion_groups, css_id=None, css_class=None, template=None, **kwargs):
super().__init__(*accordion_groups, css_id=css_id, css_class=css_class, template=template, **kwargs)
# Accordion needs to have a unique id
if not self.css_id:
self.css_id = "-".join(["accordion", str(randint(1000, 9999))])
# AccordionGroup need to have 'data-parent="#Accordion.id"'
for accordion_group in accordion_groups:
accordion_group.data_parent = self.css_id
def render(self, form, context, template_pack=TEMPLATE_PACK, **kwargs):
content = SafeString("")
# Open the group that should be open.
self.open_target_group_for_form(form)
for group in self.fields:
group.data_parent = self.css_id
content += render_field(group, form, context, template_pack=template_pack, **kwargs)
template = self.get_template_name(template_pack)
context.update({"accordion": self, "content": content})
return render_to_string(template, context.flatten())
| Accordion |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 1053250,
"end": 1053420
} | class ____(sgqlc.types.Union):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__types__ = (Bot, Organization, User)
| AuditEntryActor |
python | has2k1__plotnine | plotnine/ggplot.py | {
"start": 2052,
"end": 24662
} | class ____:
"""
Create a new ggplot object
Parameters
----------
data :
Default data for plot. Every layer that does not
have data of its own will use this one.
mapping :
Default aesthetics mapping for the plot. These will be used
by all layers unless specifically overridden.
Notes
-----
ggplot object only have partial support for pickling. The mappings used
by pickled objects should not reference variables in the namespace.
"""
figure: Figure
axs: list[Axes]
_gridspec: p9GridSpec
def __init__(
self,
data: Optional[DataLike] = None,
mapping: Optional[aes] = None,
):
from .mapping._env import Environment
# Allow some sloppiness
data, mapping = order_as_data_mapping(data, mapping)
self.data = data
self.mapping = mapping if mapping is not None else aes()
self.facet: facet = facet_null()
self.labels = labels_view()
self.layers = Layers()
self.guides = guides()
self.scales = Scales()
self.theme = theme_get()
self.coordinates: coord = coord_cartesian()
self.environment = Environment.capture(1)
self.layout = Layout()
self.watermarks: list[watermark] = []
# build artefacts
self._build_objs = NS()
def __str__(self) -> str:
"""
Return a wrapped display size (in pixels) of the plot
"""
w, h = self.theme._figure_size_px
return f"<ggplot: ({w} x {h})>"
def __repr__(self):
# knitr relies on __repr__ to automatically print the last object
# in a cell.
if is_knitr_engine():
self.show()
return ""
return super().__repr__()
def _repr_mimebundle_(self, include=None, exclude=None) -> MimeBundle:
"""
Return dynamic MIME bundle for plot display
This method is called when a ggplot object is the last in the cell.
Notes
-----
- https://ipython.readthedocs.io/en/stable/config/integrating.html
"""
ip = get_ipython()
format: FigureFormat = (
get_option("figure_format")
or (ip and ip.config.InlineBackend.get("figure_format"))
or "retina"
)
# While jpegs can be displayed as retina, we restrict the output
# of "retina" to png
if format == "retina":
self = copy(self)
self.theme = self.theme.to_retina()
buf = BytesIO()
self.save(buf, "png" if format == "retina" else format, verbose=False)
figure_size_px = self.theme._figure_size_px
return get_mimebundle(buf.getvalue(), format, figure_size_px)
def show(self):
"""
Show plot using the matplotlib backend set by the user
This function is called for its side-effects.
"""
# Prevent against any modifications to the users
# ggplot object. Do the copy here as we may/may not
# assign a default theme
self = deepcopy(self)
if is_inline_backend() or is_quarto_environment():
from IPython.display import display
data, metadata = self._repr_mimebundle_()
display(data, metadata=metadata, raw=True)
else:
self.draw(show=True)
def __deepcopy__(self, memo: dict[Any, Any]) -> ggplot:
"""
Deep copy without copying the dataframe and environment
"""
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
old = self.__dict__
new = result.__dict__
# don't make a deepcopy of data
shallow = {"data", "figure", "gs", "_build_objs"}
for key, item in old.items():
if key in shallow:
new[key] = item
memo[id(new[key])] = new[key]
else:
new[key] = deepcopy(item, memo)
return result
def __iadd__(self, other: PlotAddable | list[PlotAddable] | None) -> Self:
"""
Add other to ggplot object
Parameters
----------
other :
Either an object that knows how to "radd"
itself to a ggplot, or a list of such objects.
"""
if isinstance(other, Sequence):
for item in other:
item.__radd__(self)
elif other is not None:
other.__radd__(self)
return self
@overload
def __add__(
self,
rhs: PlotAddable | list[PlotAddable] | None,
) -> ggplot: ...
@overload
def __add__(self, rhs: ggplot) -> Compose: ...
def __add__(
self,
rhs: PlotAddable | list[PlotAddable] | None | ggplot,
) -> ggplot | Compose:
"""
Add to ggplot
Parameters
----------
other :
Either an object that knows how to "radd"
itself to a ggplot, or a list of such objects.
"""
from .composition import Compose
self = deepcopy(self)
if isinstance(rhs, (ggplot, Compose)):
from .composition import Wrap
return Wrap([self, rhs])
return self.__iadd__(rhs)
def __or__(self, rhs: ggplot | Compose) -> Compose:
"""
Compose 2 plots columnwise
"""
from .composition import Beside
return Beside([self, rhs])
def __truediv__(self, rhs: ggplot | Compose) -> Compose:
"""
Compose 2 plots rowwise
"""
from .composition import Stack
return Stack([self, rhs])
def __sub__(self, rhs: ggplot | Compose) -> Compose:
"""
Compose 2 plots columnwise
"""
from .composition import Beside
return Beside([self, rhs])
def __rrshift__(self, other: DataLike) -> ggplot:
"""
Overload the >> operator to receive a dataframe
"""
other = ungroup(other)
if is_data_like(other):
if self.data is None:
self.data = other
else:
raise PlotnineError("`>>` failed, ggplot object has data.")
else:
msg = "Unknown type of data -- {!r}"
raise TypeError(msg.format(type(other)))
return self
def draw(self, *, show: bool = False) -> Figure:
"""
Render the complete plot
Parameters
----------
show :
Whether to show the plot.
Returns
-------
:
Matplotlib figure
"""
from ._mpl.layout_manager import PlotnineLayoutEngine
with plot_context(self, show=show):
figure = self._setup()
self._build()
# setup
self.axs = self.facet.setup(self)
self.guides._setup(self)
self.theme.setup(self)
# Drawing
self._draw_layers()
self._draw_panel_borders()
self._draw_breaks_and_labels()
self.guides.draw()
self._draw_figure_texts()
self._draw_watermarks()
self._draw_figure_background()
# Artist object theming
self.theme.apply()
figure.set_layout_engine(PlotnineLayoutEngine(self))
return figure
def _setup(self) -> Figure:
"""
Setup this instance for the building process
"""
if not hasattr(self, "figure"):
self._create_figure()
self.labels.add_defaults(self.mapping.labels)
return self.figure
def _create_figure(self):
"""
Create gridspec for the panels
"""
import matplotlib.pyplot as plt
from ._mpl.gridspec import p9GridSpec
self.figure = plt.figure()
self._gridspec = p9GridSpec(1, 1, self.figure)
def _build(self):
"""
Build ggplot for rendering.
Notes
-----
This method modifies the ggplot object. The caller is
responsible for making a copy and using that to make
the method call.
"""
if not self.layers:
self += geom_blank()
layers = self._build_objs.layers = self.layers
scales = self._build_objs.scales = self.scales
layout = self._build_objs.layout = self.layout
# Update the label information for the plot
layers.update_labels(self)
# Give each layer a copy of the data, the mappings and
# the execution environment
layers.setup(self)
# Initialise panels, add extra data for margins & missing
# facetting variables, and add on a PANEL variable to data
layout.setup(layers, self)
# Compute aesthetics to produce data with generalised
# variable names
layers.compute_aesthetics(self)
# Transform data using all scales
layers.transform(scales)
# Make sure missing (but required) aesthetics are added
scales.add_missing(("x", "y"))
# Map and train positions so that statistics have access
# to ranges and all positions are numeric
layout.train_position(layers, scales)
layout.map_position(layers)
# Apply and map statistics
layers.compute_statistic(layout)
layers.map_statistic(self)
# Prepare data in geoms
# e.g. from y and width to ymin and ymax
layers.setup_data()
# Apply position adjustments
layers.compute_position(layout)
# Reset position scales, then re-train and map. This
# ensures that facets have control over the range of
# a plot.
layout.reset_position_scales()
layout.train_position(layers, scales)
layout.map_position(layers)
# Train and map non-position scales
npscales = scales.non_position_scales()
if len(npscales):
layers.train(npscales)
layers.map(npscales)
# Train coordinate system
layout.setup_panel_params(self.coordinates)
# fill in the defaults
layers.use_defaults_after_scale(scales)
# Allow stats to modify the layer data
layers.finish_statistics()
# Allow layout to modify data before rendering
layout.finish_data(layers)
def _draw_panel_borders(self):
"""
Draw Panel boders
"""
# We add a patch rather than use ax.patch because want the
# grid lines below the borders. We leave ax.patch for the
# background only.
if self.theme.T.is_blank("panel_border"):
return
from matplotlib.patches import Rectangle
for ax in self.axs:
rect = Rectangle(
(0, 0),
1,
1,
facecolor="none",
transform=ax.transAxes,
# Adding a clip path but defaulting to no clipping
# gives a fullwidth border that can perfectly overlap
# will with legend borders.
clip_path=ax.patch,
clip_on=False,
)
self.figure.add_artist(rect)
self.theme.targets.panel_border.append(rect)
def _draw_layers(self):
"""
Draw the main plot(s) onto the axes.
"""
# Draw the geoms
self.layers.draw(self.layout, self.coordinates)
def _draw_breaks_and_labels(self):
"""
Draw breaks and labels
"""
# 1. Draw facet labels a.k.a strip text
# 2. Decorate the axes
# - xaxis & yaxis breaks, labels, limits, ...
#
# pidx is the panel index (location left to right, top to bottom)
self.facet.strips.draw()
for layout_info in self.layout.get_details():
pidx = layout_info.panel_index
ax = self.axs[pidx]
panel_params = self.layout.panel_params[pidx]
self.facet.set_limits_breaks_and_labels(panel_params, ax)
# Remove unnecessary ticks and labels
if not layout_info.axis_x:
ax.xaxis.set_tick_params(
which="both", bottom=False, labelbottom=False
)
if not layout_info.axis_y:
ax.yaxis.set_tick_params(
which="both", left=False, labelleft=False
)
if layout_info.axis_x:
ax.xaxis.set_tick_params(which="both", bottom=True)
if layout_info.axis_y:
ax.yaxis.set_tick_params(which="both", left=True)
def _draw_figure_texts(self):
"""
Draw title, x label, y label and caption onto the figure
"""
figure = self.figure
theme = self.theme
targets = theme.targets
title = self.labels.get("title", "")
subtitle = self.labels.get("subtitle", "")
caption = self.labels.get("caption", "")
tag = self.labels.get("tag", "")
# Get the axis labels (default or specified by user)
# and let the coordinate modify them e.g. flip
labels = self.coordinates.labels(
self.layout.set_xy_labels(self.labels)
)
# The locations are handled by the layout manager
if title:
targets.plot_title = figure.text(0, 0, title)
if subtitle:
targets.plot_subtitle = figure.text(0, 0, subtitle)
if caption:
targets.plot_caption = figure.text(0, 0, caption)
if tag:
targets.plot_tag = figure.text(0, 0, tag)
if labels.x:
targets.axis_title_x = figure.text(0, 0, labels.x)
if labels.y:
targets.axis_title_y = figure.text(0, 0, labels.y)
def _draw_watermarks(self):
"""
Draw watermark onto figure
"""
for wm in self.watermarks:
wm.draw(self.figure)
def _draw_figure_background(self):
from matplotlib.patches import Rectangle
rect = Rectangle((0, 0), 0, 0, facecolor="none", zorder=-1000)
self.figure.add_artist(rect)
self._gridspec.patch = rect
self.theme.targets.plot_background = rect
def _save_filename(self, ext: str) -> Path:
"""
Make a filename for use by the save method
Parameters
----------
ext : str
Extension e.g. png, pdf, ...
"""
hash_token = abs(self.__hash__())
return Path(f"plotnine-save-{hash_token}.{ext}")
def save_helper(
self: ggplot,
filename: Optional[str | Path | BytesIO] = None,
format: Optional[str] = None,
path: Optional[str] = None,
width: Optional[float] = None,
height: Optional[float] = None,
units: str = "in",
dpi: Optional[float] = None,
limitsize: bool | None = None,
verbose: bool = True,
**kwargs: Any,
) -> mpl_save_view:
"""
Create MPL figure that will be saved
Notes
-----
This method has the same arguments as [](`~plotnine.ggplot.save`).
Use it to get access to the figure that will be saved.
"""
fig_kwargs: Dict[str, Any] = {"format": format, **kwargs}
if limitsize is None:
limitsize = cast("bool", get_option("limitsize"))
# filename, depends on the object
if filename is None:
ext = format if format else "pdf"
filename = self._save_filename(ext)
if path and isinstance(filename, (Path, str)):
filename = Path(path) / filename
fig_kwargs["fname"] = filename
# Preserve the users object
self = deepcopy(self)
# The figure size should be known by the theme
if width is not None and height is not None:
width = to_inches(width, units)
height = to_inches(height, units)
self += theme(figure_size=(width, height))
elif (width is None and height is not None) or (
width is not None and height is None
):
raise PlotnineError("You must specify both width and height")
else:
width, height = cast(
"tuple[float, float]", self.theme.getp("figure_size")
)
if limitsize and (width > 25 or height > 25):
raise PlotnineError(
f"Dimensions ({width=}, {height=}) exceed 25 inches "
"(height and width are specified in inches/cm/mm, "
"not pixels). If you are sure you want these "
"dimensions, use 'limitsize=False'."
)
if verbose:
_w = from_inches(width, units)
_h = from_inches(height, units)
warn(f"Saving {_w} x {_h} {units} image.", PlotnineWarning)
warn(f"Filename: {filename}", PlotnineWarning)
if dpi is not None:
self.theme = self.theme + theme(dpi=dpi)
figure = self.draw(show=False)
return mpl_save_view(figure, fig_kwargs)
def save(
self,
filename: Optional[str | Path | BytesIO] = None,
format: Optional[str] = None,
path: str = "",
width: Optional[float] = None,
height: Optional[float] = None,
units: str = "in",
dpi: Optional[int] = None,
limitsize: bool | None = None,
verbose: bool = True,
**kwargs: Any,
):
"""
Save a ggplot object as an image file
Parameters
----------
filename :
File name to write the plot to. If not specified, a name
like “plotnine-save-<hash>.<format>” is used.
format :
Image format to use, automatically extract from
file name extension.
path :
Path to save plot to (if you just want to set path and
not filename).
width :
Width (defaults to value set by the theme). If specified
the `height` must also be given.
height :
Height (defaults to value set by the theme). If specified
the `width` must also be given.
units :
Units for width and height when either one is explicitly
specified (in, cm, or mm).
dpi :
DPI to use for raster graphics. If None, defaults to using
the `dpi` of theme, if none is set then a `dpi` of 100.
limitsize :
If `True` (the default), save will not save images
larger than 25x25 inches, to prevent the common error
of specifying dimensions in pixels. The default value
is from the option `plotine.options.limitsize`.
verbose :
If `True`, print the saving information.
kwargs :
Additional arguments to pass to matplotlib `savefig()`.
"""
sv = self.save_helper(
filename=filename,
format=format,
path=path,
width=width,
height=height,
units=units,
dpi=dpi,
limitsize=limitsize,
verbose=verbose,
**kwargs,
)
with plot_context(self).rc_context:
sv.figure.savefig(**sv.kwargs)
ggsave = ggplot.save
def save_as_pdf_pages(
plots: Iterable[ggplot],
filename: Optional[str | Path] = None,
path: str | None = None,
verbose: bool = True,
**kwargs: Any,
):
"""
Save multiple [](`~plotnine.ggplot`) objects to a PDF file, one per page.
Parameters
----------
plots :
Plot objects to write to file. `plots` may be either a
collection such as a [](:class:`list`) or [](:class:`set`)
```python
base_plot = ggplot(…)
plots = [base_plot + ggtitle('%d of 3' % i) for i in range(1, 3)]
save_as_pdf_pages(plots)
```
or, a generator that yields [](`~plotnine.ggplot`) objects:
```python
def myplots():
for i in range(1, 3):
yield ggplot(…) + ggtitle('%d of 3' % i)
save_as_pdf_pages(myplots())
```
filename :
File name to write the plot to. If not specified, a name
like “plotnine-save-<hash>.pdf” is used.
path :
Path to save plot to (if you just want to set path and
not filename).
verbose :
If `True`, print the saving information.
kwargs :
Additional arguments to pass to
[](:meth:`~matplotlib.figure.Figure.savefig`).
Notes
-----
Using pandas [](:meth:`~pandas.DataFrame.groupby`) methods, tidy data
can be "faceted" across pages:
```python
from plotnine.data import mtcars
def facet_pages(column)
base_plot = [
aes(x="wt", y="mpg", label="name"),
geom_text(),
]
for label, group_data in mtcars.groupby(column):
yield ggplot(group_data) + base_plot + ggtitle(label)
save_as_pdf_pages(facet_pages('cyl'))
```
Unlike [](:meth:`~plotnine.ggplot.save`),
[](:meth:`~plotnine.save_as_pdf_pages`)
does not process arguments for `height` or `width`. To set the figure
size, add [](`~plotnine.themes.themeable.figure_size`) to the theme
for some or all of the objects in `plots`:
```python
plot = ggplot(…)
# The following are equivalent
plot.save('filename.pdf', height=6, width=8)
save_as_pdf_pages([plot + theme(figure_size=(8, 6))])
```
"""
from matplotlib.backends.backend_pdf import PdfPages
# as in ggplot.save()
fig_kwargs = {"bbox_inches": "tight"}
fig_kwargs.update(kwargs)
# If plots is already an iterator, this is a no-op; otherwise
# convert a list, etc. to an iterator
plots = iter(plots)
# filename, depends on the object
if filename is None:
# Take the first element from the iterator, store it, and
# use it to generate a file name
peek = [next(plots)]
plots = chain(peek, plots)
filename = peek[0]._save_filename("pdf")
if path:
filename = Path(path) / filename
if verbose:
warn(f"Filename: {filename}", PlotnineWarning)
with PdfPages(filename) as pdf:
# Re-add the first element to the iterator, if it was removed
for plot in plots:
fig = plot.draw()
with plot_context(plot).rc_context:
# Save as a page in the PDF file
pdf.savefig(fig, **fig_kwargs)
| ggplot |
python | ray-project__ray | python/ray/serve/_private/replica.py | {
"start": 56833,
"end": 88635
} | class ____:
"""Wraps a user-provided callable that is used to handle requests to a replica."""
service_unavailable_exceptions = (BackPressureError, DeploymentUnavailableError)
def __init__(
self,
deployment_def: Callable,
init_args: Tuple,
init_kwargs: Dict,
*,
deployment_id: DeploymentID,
run_sync_methods_in_threadpool: bool,
run_user_code_in_separate_thread: bool,
local_testing_mode: bool,
deployment_config: DeploymentConfig,
):
if not (inspect.isfunction(deployment_def) or inspect.isclass(deployment_def)):
raise TypeError(
"deployment_def must be a function or class. Instead, its type was "
f"{type(deployment_def)}."
)
self._deployment_def = deployment_def
self._init_args = init_args
self._init_kwargs = init_kwargs
self._is_function = inspect.isfunction(deployment_def)
self._deployment_id = deployment_id
self._local_testing_mode = local_testing_mode
self._destructor_called = False
self._run_sync_methods_in_threadpool = run_sync_methods_in_threadpool
self._run_user_code_in_separate_thread = run_user_code_in_separate_thread
self._warned_about_sync_method_change = False
self._cached_user_method_info: Dict[str, UserMethodInfo] = {}
# This is for performance optimization https://docs.python.org/3/howto/logging.html#optimization
self._is_enabled_for_debug = logger.isEnabledFor(logging.DEBUG)
# Will be populated in `initialize_callable`.
self._callable = None
self._deployment_config = deployment_config
if self._run_user_code_in_separate_thread:
# All interactions with user code run on this loop to avoid blocking the
# replica's main event loop.
self._user_code_event_loop: asyncio.AbstractEventLoop = (
asyncio.new_event_loop()
)
def _run_user_code_event_loop():
# Required so that calls to get the current running event loop work
# properly in user code.
asyncio.set_event_loop(self._user_code_event_loop)
self._user_code_event_loop.run_forever()
self._user_code_event_loop_thread = threading.Thread(
daemon=True,
target=_run_user_code_event_loop,
)
self._user_code_event_loop_thread.start()
else:
self._user_code_event_loop = asyncio.get_running_loop()
@property
def event_loop(self) -> asyncio.AbstractEventLoop:
return self._user_code_event_loop
def _run_user_code(f: Callable) -> Callable:
"""Decorator to run a coroutine method on the user code event loop.
The method will be modified to be a sync function that returns a
`asyncio.Future` if user code is running in a separate event loop.
Otherwise, it will return the coroutine directly.
"""
assert inspect.iscoroutinefunction(
f
), "_run_user_code can only be used on coroutine functions."
@functools.wraps(f)
def wrapper(self, *args, **kwargs) -> Any:
coro = f(self, *args, **kwargs)
if self._run_user_code_in_separate_thread:
fut = asyncio.run_coroutine_threadsafe(coro, self._user_code_event_loop)
if self._local_testing_mode:
return fut
return asyncio.wrap_future(fut)
else:
return coro
return wrapper
@_run_user_code
async def set_sync_method_threadpool_limit(self, limit: int):
# NOTE(edoakes): the limit is thread local, so this must
# be run on the user code event loop.
to_thread.current_default_thread_limiter().total_tokens = limit
def get_user_method_info(self, method_name: str) -> UserMethodInfo:
"""Get UserMethodInfo for the provided call method name.
This method is cached to avoid repeated expensive calls to `inspect.signature`.
"""
if method_name in self._cached_user_method_info:
return self._cached_user_method_info[method_name]
if self._is_function:
user_method = self._callable
elif hasattr(self._callable, method_name):
user_method = getattr(self._callable, method_name)
else:
# Filter to methods that don't start with '__' prefix.
def callable_method_filter(attr):
if attr.startswith("__"):
return False
elif not callable(getattr(self._callable, attr)):
return False
return True
methods = list(filter(callable_method_filter, dir(self._callable)))
raise RayServeException(
f"Tried to call a method '{method_name}' "
"that does not exist. Available methods: "
f"{methods}."
)
info = UserMethodInfo.from_callable(
user_method,
is_asgi_app=isinstance(self._callable, ASGIAppReplicaWrapper),
)
self._cached_user_method_info[method_name] = info
return info
async def _send_user_result_over_asgi(
self,
result: Any,
asgi_args: ASGIArgs,
):
"""Handle the result from user code and send it over the ASGI interface.
If the result is already a Response type, it is sent directly. Otherwise, it
is converted to a custom Response type that handles serialization for
common Python objects.
"""
scope, receive, send = asgi_args.to_args_tuple()
if isinstance(result, starlette.responses.Response):
await result(scope, receive, send)
else:
await Response(result).send(scope, receive, send)
async def _call_func_or_gen(
self,
callable: Callable,
*,
args: Optional[Tuple[Any]] = None,
kwargs: Optional[Dict[str, Any]] = None,
is_streaming: bool = False,
generator_result_callback: Optional[Callable] = None,
run_sync_methods_in_threadpool_override: Optional[bool] = None,
) -> Tuple[Any, bool]:
"""Call the callable with the provided arguments.
This is a convenience wrapper that will work for `def`, `async def`,
generator, and async generator functions.
Returns the result and a boolean indicating if the result was a sync generator
that has already been consumed.
"""
sync_gen_consumed = False
args = args if args is not None else tuple()
kwargs = kwargs if kwargs is not None else dict()
run_sync_in_threadpool = (
self._run_sync_methods_in_threadpool
if run_sync_methods_in_threadpool_override is None
else run_sync_methods_in_threadpool_override
)
is_sync_method = (
inspect.isfunction(callable) or inspect.ismethod(callable)
) and not (
inspect.iscoroutinefunction(callable)
or inspect.isasyncgenfunction(callable)
)
if is_sync_method and run_sync_in_threadpool:
is_generator = inspect.isgeneratorfunction(callable)
if is_generator:
sync_gen_consumed = True
if not is_streaming:
# TODO(edoakes): make this check less redundant with the one in
# _handle_user_method_result.
raise TypeError(
f"Method '{callable.__name__}' returned a generator. "
"You must use `handle.options(stream=True)` to call "
"generators on a deployment."
)
def run_callable():
result = callable(*args, **kwargs)
if is_generator:
for r in result:
generator_result_callback(r)
result = None
return result
# NOTE(edoakes): we use anyio.to_thread here because it's what Starlette
# uses (and therefore FastAPI too). The max size of the threadpool is
# set to max_ongoing_requests in the replica wrapper.
# anyio.to_thread propagates ContextVars to the worker thread automatically.
result = await to_thread.run_sync(run_callable)
else:
if (
is_sync_method
and not self._warned_about_sync_method_change
and run_sync_methods_in_threadpool_override is None
):
self._warned_about_sync_method_change = True
warnings.warn(
RAY_SERVE_RUN_SYNC_IN_THREADPOOL_WARNING.format(
method_name=callable.__name__,
)
)
result = callable(*args, **kwargs)
if inspect.iscoroutine(result):
result = await result
return result, sync_gen_consumed
@property
def user_callable(self) -> Optional[Callable]:
return self._callable
async def _initialize_asgi_callable(self) -> None:
self._callable: ASGIAppReplicaWrapper
app: Starlette = self._callable.app
# The reason we need to do this is because BackPressureError is a serve internal exception
# and FastAPI doesn't know how to handle it, so it treats it as a 500 error.
# With same reasoning, we are not handling TimeoutError because it's a generic exception
# the FastAPI knows how to handle. See https://www.starlette.io/exceptions/
def handle_exception(_: Request, exc: Exception):
return self.handle_exception(exc)
for exc in self.service_unavailable_exceptions:
app.add_exception_handler(exc, handle_exception)
await self._callable._run_asgi_lifespan_startup()
@_run_user_code
async def initialize_callable(self) -> Optional[ASGIApp]:
"""Initialize the user callable.
If the callable is an ASGI app wrapper (e.g., using @serve.ingress), returns
the ASGI app object, which may be used *read only* by the caller.
"""
if self._callable is not None:
raise RuntimeError("initialize_callable should only be called once.")
# This closure initializes user code and finalizes replica
# startup. By splitting the initialization step like this,
# we can already access this actor before the user code
# has finished initializing.
# The supervising state manager can then wait
# for allocation of this replica by using the `is_allocated`
# method. After that, it calls `reconfigure` to trigger
# user code initialization.
logger.info(
"Started initializing replica.",
extra={"log_to_stderr": False},
)
if self._is_function:
self._callable = self._deployment_def
else:
# This allows deployments to define an async __init__
# method (mostly used for testing).
self._callable = self._deployment_def.__new__(self._deployment_def)
await self._call_func_or_gen(
self._callable.__init__,
args=self._init_args,
kwargs=self._init_kwargs,
# Always run the constructor on the main user code thread.
run_sync_methods_in_threadpool_override=False,
)
if isinstance(self._callable, ASGIAppReplicaWrapper):
await self._initialize_asgi_callable()
if isinstance(self._callable, TaskConsumerWrapper):
self._callable.initialize_callable(
self._deployment_config.max_ongoing_requests
)
ServeUsageTag.NUM_REPLICAS_USING_ASYNCHRONOUS_INFERENCE.record("1")
self._user_health_check = getattr(self._callable, HEALTH_CHECK_METHOD, None)
self._user_record_routing_stats = getattr(
self._callable, REQUEST_ROUTING_STATS_METHOD, None
)
self._user_autoscaling_stats = getattr(
self._callable, "record_autoscaling_stats", None
)
logger.info(
"Finished initializing replica.",
extra={"log_to_stderr": False},
)
return (
self._callable.app
if isinstance(self._callable, ASGIAppReplicaWrapper)
else None
)
def _raise_if_not_initialized(self, method_name: str):
if self._callable is None:
raise RuntimeError(
f"`initialize_callable` must be called before `{method_name}`."
)
def call_user_health_check(self) -> Optional[concurrent.futures.Future]:
self._raise_if_not_initialized("call_user_health_check")
# If the user provided a health check, call it on the user code thread. If user
# code blocks the event loop the health check may time out.
#
# To avoid this issue for basic cases without a user-defined health check, skip
# interacting with the user callable entirely.
if self._user_health_check is not None:
return self._call_user_health_check()
return None
def call_user_record_routing_stats(self) -> Optional[concurrent.futures.Future]:
self._raise_if_not_initialized("call_user_record_routing_stats")
if self._user_record_routing_stats is not None:
return self._call_user_record_routing_stats()
return None
def call_record_autoscaling_stats(self) -> Optional[concurrent.futures.Future]:
self._raise_if_not_initialized("call_record_autoscaling_stats")
if self._user_autoscaling_stats is not None:
return self._call_user_autoscaling_stats()
return None
@_run_user_code
async def _call_user_health_check(self):
await self._call_func_or_gen(self._user_health_check)
@_run_user_code
async def _call_user_record_routing_stats(self) -> Dict[str, Any]:
result, _ = await self._call_func_or_gen(self._user_record_routing_stats)
return result
@_run_user_code
async def _call_user_autoscaling_stats(self) -> Dict[str, Union[int, float]]:
result, _ = await self._call_func_or_gen(self._user_autoscaling_stats)
return result
@_run_user_code
async def call_reconfigure(self, user_config: Optional[Any], rank: ReplicaRank):
self._raise_if_not_initialized("call_reconfigure")
# NOTE(edoakes): there is the possibility of a race condition in user code if
# they don't have any form of concurrency control between `reconfigure` and
# other methods. See https://github.com/ray-project/ray/pull/42159.
# NOTE(abrar): The only way to subscribe to rank changes is to provide some user config.
# We can relax this in the future as more use cases arise for rank. I am reluctant to
# introduce behavior change for a feature we might not need.
user_subscribed_to_rank = False
if not self._is_function and hasattr(self._callable, RECONFIGURE_METHOD):
reconfigure_method = getattr(self._callable, RECONFIGURE_METHOD)
params = inspect.signature(reconfigure_method).parameters
user_subscribed_to_rank = "rank" in params
if user_config is not None or user_subscribed_to_rank:
if self._is_function:
raise ValueError(
"deployment_def must be a class to use user_config or rank"
)
elif not hasattr(self._callable, RECONFIGURE_METHOD):
raise RayServeException(
"user_config or rank specified but deployment "
+ self._deployment_id
+ " missing "
+ RECONFIGURE_METHOD
+ " method"
)
kwargs = {}
if user_subscribed_to_rank:
# For backwards compatibility, only pass rank if it is an argument to the reconfigure method.
kwargs["rank"] = rank
await self._call_func_or_gen(
getattr(self._callable, RECONFIGURE_METHOD),
args=(user_config,),
kwargs=kwargs,
)
async def _handle_user_method_result(
self,
result: Any,
user_method_info: UserMethodInfo,
*,
is_streaming: bool,
is_http_request: bool,
sync_gen_consumed: bool,
generator_result_callback: Optional[Callable],
asgi_args: Optional[ASGIArgs],
) -> Any:
"""Postprocess the result of a user method.
User methods can be regular unary functions or return a sync or async generator.
This method will raise an exception if the result is not of the expected type
(e.g., non-generator for streaming requests or generator for unary requests).
Generator outputs will be written to the `generator_result_callback`.
Note that HTTP requests are an exception: they are *always* streaming requests,
but for ASGI apps (like FastAPI), the actual method will be a regular function
implementing the ASGI `__call__` protocol.
"""
result_is_gen = inspect.isgenerator(result)
result_is_async_gen = inspect.isasyncgen(result)
if is_streaming:
if result_is_gen:
for r in result:
generator_result_callback(r)
elif result_is_async_gen:
async for r in result:
generator_result_callback(r)
elif is_http_request and not user_method_info.is_asgi_app:
# For the FastAPI codepath, the response has already been sent over
# ASGI, but for the vanilla deployment codepath we need to send it.
await self._send_user_result_over_asgi(result, asgi_args)
elif not is_http_request and not sync_gen_consumed:
# If a unary method is called with stream=True for anything EXCEPT
# an HTTP request, raise an error.
# HTTP requests are always streaming regardless of if the method
# returns a generator, because it's provided the result queue as its
# ASGI `send` interface to stream back results.
raise TypeError(
f"Called method '{user_method_info.name}' with "
"`handle.options(stream=True)` but it did not return a "
"generator."
)
else:
assert (
not is_http_request
), "All HTTP requests go through the streaming codepath."
if result_is_gen or result_is_async_gen:
raise TypeError(
f"Method '{user_method_info.name}' returned a generator. "
"You must use `handle.options(stream=True)` to call "
"generators on a deployment."
)
return result
async def call_http_entrypoint(
self,
request_metadata: RequestMetadata,
status_code_callback: StatusCodeCallback,
scope: Scope,
receive: Receive,
) -> Any:
result_queue = MessageQueue()
user_method_info = self.get_user_method_info(request_metadata.call_method)
if self._run_user_code_in_separate_thread:
# `asyncio.Event`s are not thread safe, so `call_soon_threadsafe` must be
# used to interact with the result queue from the user callable thread.
system_event_loop = asyncio.get_running_loop()
async def enqueue(item: Any):
system_event_loop.call_soon_threadsafe(result_queue.put_nowait, item)
call_future = self._call_http_entrypoint(
user_method_info, scope, receive, enqueue
)
else:
async def enqueue(item: Any):
result_queue.put_nowait(item)
call_future = asyncio.create_task(
self._call_http_entrypoint(user_method_info, scope, receive, enqueue)
)
first_message_peeked = False
async for messages in result_queue.fetch_messages_from_queue(call_future):
# HTTP (ASGI) messages are only consumed by the proxy so batch them
# and use vanilla pickle (we know it's safe because these messages
# only contain primitive Python types).
# Peek the first ASGI message to determine the status code.
if not first_message_peeked:
msg = messages[0]
first_message_peeked = True
if msg["type"] == "http.response.start":
# HTTP responses begin with exactly one
# "http.response.start" message containing the "status"
# field. Other response types like WebSockets may not.
status_code_callback(str(msg["status"]))
yield messages
@_run_user_code
async def _call_http_entrypoint(
self,
user_method_info: UserMethodInfo,
scope: Scope,
receive: Receive,
send: Send,
) -> Any:
"""Call an HTTP entrypoint.
`send` is used to communicate the results of streaming responses.
Raises any exception raised by the user code so it can be propagated as a
`RayTaskError`.
"""
self._raise_if_not_initialized("_call_http_entrypoint")
if self._is_enabled_for_debug:
logger.debug(
f"Started executing request to method '{user_method_info.name}'.",
extra={"log_to_stderr": False, "serve_access_log": True},
)
if user_method_info.is_asgi_app:
request_args = (scope, receive, send)
elif not user_method_info.takes_any_args:
# Edge case to support empty HTTP handlers: don't pass the Request
# argument if the callable has no parameters.
request_args = tuple()
else:
# Non-FastAPI HTTP handlers take only the starlette `Request`.
request_args = (starlette.requests.Request(scope, receive, send),)
receive_task = None
try:
if hasattr(receive, "fetch_until_disconnect"):
receive_task = asyncio.create_task(receive.fetch_until_disconnect())
result, sync_gen_consumed = await self._call_func_or_gen(
user_method_info.callable,
args=request_args,
kwargs={},
is_streaming=True,
generator_result_callback=send,
)
final_result = await self._handle_user_method_result(
result,
user_method_info,
is_streaming=True,
is_http_request=True,
sync_gen_consumed=sync_gen_consumed,
generator_result_callback=send,
asgi_args=ASGIArgs(scope, receive, send),
)
if receive_task is not None and not receive_task.done():
receive_task.cancel()
return final_result
except Exception as e:
if not user_method_info.is_asgi_app:
response = self.handle_exception(e)
await self._send_user_result_over_asgi(
response, ASGIArgs(scope, receive, send)
)
if receive_task is not None and not receive_task.done():
receive_task.cancel()
raise
except asyncio.CancelledError:
if receive_task is not None and not receive_task.done():
# Do NOT cancel the receive task if the request has been
# cancelled, but the call is a batched call. This is
# because we cannot guarantee cancelling the batched
# call, so in the case that the call continues executing
# we should continue fetching data from the client.
if not hasattr(user_method_info.callable, "set_max_batch_size"):
receive_task.cancel()
raise
async def call_user_generator(
self,
request_metadata: RequestMetadata,
request_args: Tuple[Any],
request_kwargs: Dict[str, Any],
) -> AsyncGenerator[Any, None]:
"""Calls a user method for a streaming call and yields its results.
The user method is called in an asyncio `Task` and places its results on a
`result_queue`. This method pulls and yields from the `result_queue`.
"""
if not self._run_user_code_in_separate_thread:
gen = await self._call_user_generator(
request_metadata, request_args, request_kwargs
)
async for result in gen:
yield result
else:
result_queue = MessageQueue()
# `asyncio.Event`s are not thread safe, so `call_soon_threadsafe` must be
# used to interact with the result queue from the user callable thread.
system_event_loop = asyncio.get_running_loop()
def _enqueue_thread_safe(item: Any):
system_event_loop.call_soon_threadsafe(result_queue.put_nowait, item)
call_future = self._call_user_generator(
request_metadata,
request_args,
request_kwargs,
enqueue=_enqueue_thread_safe,
)
async for messages in result_queue.fetch_messages_from_queue(call_future):
for msg in messages:
yield msg
@_run_user_code
async def _call_user_generator(
self,
request_metadata: RequestMetadata,
request_args: Tuple[Any],
request_kwargs: Dict[str, Any],
*,
enqueue: Optional[Callable] = None,
) -> Optional[AsyncGenerator[Any, None]]:
"""Call a user generator.
The `generator_result_callback` is used to communicate the results of generator
methods.
Raises any exception raised by the user code so it can be propagated as a
`RayTaskError`.
"""
self._raise_if_not_initialized("_call_user_generator")
request_args = request_args if request_args is not None else tuple()
request_kwargs = request_kwargs if request_kwargs is not None else dict()
user_method_info = self.get_user_method_info(request_metadata.call_method)
callable = user_method_info.callable
is_sync_method = (
inspect.isfunction(callable) or inspect.ismethod(callable)
) and not (
inspect.iscoroutinefunction(callable)
or inspect.isasyncgenfunction(callable)
)
if self._is_enabled_for_debug:
logger.debug(
f"Started executing request to method '{user_method_info.name}'.",
extra={"log_to_stderr": False, "serve_access_log": True},
)
async def _call_generator_async() -> AsyncGenerator[Any, None]:
gen = callable(*request_args, **request_kwargs)
if inspect.iscoroutine(gen):
gen = await gen
if inspect.isgenerator(gen):
for result in gen:
yield result
elif inspect.isasyncgen(gen):
async for result in gen:
yield result
else:
raise TypeError(
f"Called method '{user_method_info.name}' with "
"`handle.options(stream=True)` but it did not return a generator."
)
def _call_generator_sync():
gen = callable(*request_args, **request_kwargs)
if inspect.isgenerator(gen):
for result in gen:
enqueue(result)
else:
raise TypeError(
f"Called method '{user_method_info.name}' with "
"`handle.options(stream=True)` but it did not return a generator."
)
if enqueue and is_sync_method and self._run_sync_methods_in_threadpool:
await to_thread.run_sync(_call_generator_sync)
elif enqueue:
async def gen_coro_wrapper():
async for result in _call_generator_async():
enqueue(result)
await gen_coro_wrapper()
else:
return _call_generator_async()
@_run_user_code
async def call_user_method(
self,
request_metadata: RequestMetadata,
request_args: Tuple[Any],
request_kwargs: Dict[str, Any],
) -> Any:
"""Call a (unary) user method.
Raises any exception raised by the user code so it can be propagated as a
`RayTaskError`.
"""
self._raise_if_not_initialized("call_user_method")
if self._is_enabled_for_debug:
logger.debug(
f"Started executing request to method '{request_metadata.call_method}'.",
extra={"log_to_stderr": False, "serve_access_log": True},
)
user_method_info = self.get_user_method_info(request_metadata.call_method)
result, _ = await self._call_func_or_gen(
user_method_info.callable,
args=request_args,
kwargs=request_kwargs,
is_streaming=False,
)
if inspect.isgenerator(result) or inspect.isasyncgen(result):
raise TypeError(
f"Method '{user_method_info.name}' returned a generator. "
"You must use `handle.options(stream=True)` to call "
"generators on a deployment."
)
return result
def handle_exception(self, exc: Exception):
if isinstance(exc, self.service_unavailable_exceptions):
return starlette.responses.Response(exc.message, status_code=503)
else:
return starlette.responses.Response(
"Internal Server Error", status_code=500
)
@_run_user_code
async def call_destructor(self):
"""Explicitly call the `__del__` method of the user callable.
Calling this multiple times has no effect; only the first call will
actually call the destructor.
"""
if self._callable is None:
logger.debug(
"This replica has not yet started running user code. "
"Skipping __del__."
)
return
# Only run the destructor once. This is safe because there is no `await` between
# checking the flag here and flipping it to `True` below.
if self._destructor_called:
return
self._destructor_called = True
try:
if hasattr(self._callable, "__del__"):
# Make sure to accept `async def __del__(self)` as well.
await self._call_func_or_gen(
self._callable.__del__,
# Always run the destructor on the main user callable thread.
run_sync_methods_in_threadpool_override=False,
)
if hasattr(self._callable, "__serve_multiplex_wrapper"):
await getattr(self._callable, "__serve_multiplex_wrapper").shutdown()
except Exception as e:
logger.exception(f"Exception during graceful shutdown of replica: {e}")
| UserCallableWrapper |
python | dagster-io__dagster | examples/docs_projects/project_ml/src/project_ml/defs/resources.py | {
"start": 4165,
"end": 7077
} | class ____(dg.ConfigurableResource):
"""Compute configuration resource for ML training and inference.
This resource supports both local compute and AWS compute configurations.
For local compute, it manages device selection and batch processing.
For AWS compute, it provides SageMaker configuration and instance management.
"""
# Compute environment
compute_env: str = "local" # Options: "local", "sagemaker"
region: str = "us-east-1" # AWS region for SageMaker
# Local compute settings
device: str = "cpu" # Options: "cpu", "cuda", "mps"
batch_size: int = 32
max_workers: int = 4
# SageMaker settings
instance_type: str = "ml.m5.xlarge" # Default SageMaker instance
instance_count: int = 1
def get_training_config(self):
"""Get compute configuration for model training."""
if self.compute_env == "local":
# Validate and set device
if self.device == "cuda" and not torch.cuda.is_available():
self.device = "cpu"
elif self.device == "mps" and not torch.backends.mps.is_available():
self.device = "cpu"
return {
"device": self.device,
"batch_size": self.batch_size,
"max_workers": self.max_workers,
}
elif self.compute_env == "sagemaker":
return {
"instance_type": self.instance_type,
"instance_count": self.instance_count,
"region": self.region,
"framework": "pytorch",
"py_version": "py39",
"batch_size": self.batch_size,
}
else:
raise ValueError(f"Unsupported compute environment: {self.compute_env}")
def get_inference_config(self):
"""Get compute configuration for model inference."""
if self.compute_env == "local":
return self.get_training_config()
elif self.compute_env == "sagemaker":
return {
"instance_type": "ml.t2.medium", # Default to smaller instance for inference
"instance_count": 1,
"region": self.region,
"framework": "pytorch",
"py_version": "py39",
"batch_size": self.batch_size,
}
def cleanup(self):
"""Cleanup compute resources if necessary."""
if self.compute_env == "sagemaker":
# Add cleanup logic for SageMaker endpoints or training jobs if needed
pass
local_model_storage = LocalModelStoreResource(models_path="./models")
compute_config = ComputeResource(device="cpu", batch_size=32, max_workers=4)
@dg.definitions
def resources():
return dg.Definitions(
resources={
"model_storage": local_model_storage,
"compute": compute_config,
}
)
| ComputeResource |
python | doocs__leetcode | lcof2/剑指 Offer II 064. 神奇的字典/Solution.py | {
"start": 0,
"end": 832
} | class ____:
def __init__(self):
"""
Initialize your data structure here.
"""
def _patterns(self, word):
return [word[:i] + '*' + word[i + 1 :] for i in range(len(word))]
def buildDict(self, dictionary: List[str]) -> None:
self.words = set(dictionary)
self.counter = Counter(p for word in dictionary for p in self._patterns(word))
def search(self, searchWord: str) -> bool:
for p in self._patterns(searchWord):
if self.counter[p] > 1 or (
self.counter[p] == 1 and searchWord not in self.words
):
return True
return False
# Your MagicDictionary object will be instantiated and called as such:
# obj = MagicDictionary()
# obj.buildDict(dictionary)
# param_2 = obj.search(searchWord)
| MagicDictionary |
python | qdrant__qdrant-client | tools/async_client_generator/transformers/call_transformer.py | {
"start": 48,
"end": 842
} | class ____(ast.NodeTransformer):
def __init__(
self,
class_replace_map: Optional[dict[str, str]] = None,
async_methods: Optional[list[str]] = None,
):
self.class_replace_map = class_replace_map if class_replace_map is not None else {}
self.async_methods = async_methods if async_methods is not None else []
def visit_Call(self, node: ast.Call) -> Union[ast.AST, ast.Await]:
if isinstance(node.func, ast.Name):
if node.func.id in self.class_replace_map:
node.func.id = self.class_replace_map[node.func.id]
if isinstance(node.func, ast.Attribute):
if node.func.attr in self.async_methods:
return ast.Await(value=node)
return self.generic_visit(node)
| CallTransformer |
python | dateutil__dateutil | tests/test_relativedelta.py | {
"start": 27638,
"end": 28325
} | class ____(unittest.TestCase):
"""Test the weeks property getter"""
def test_one_day(self):
rd = relativedelta(days=1)
self.assertEqual(rd.days, 1)
self.assertEqual(rd.weeks, 0)
def test_minus_one_day(self):
rd = relativedelta(days=-1)
self.assertEqual(rd.days, -1)
self.assertEqual(rd.weeks, 0)
def test_height_days(self):
rd = relativedelta(days=8)
self.assertEqual(rd.days, 8)
self.assertEqual(rd.weeks, 1)
def test_minus_height_days(self):
rd = relativedelta(days=-8)
self.assertEqual(rd.days, -8)
self.assertEqual(rd.weeks, -1)
| RelativeDeltaWeeksPropertyGetterTest |
python | apache__airflow | providers/standard/src/airflow/providers/standard/operators/hitl.py | {
"start": 15826,
"end": 18242
} | class ____(HITLOperator, BranchMixIn):
"""BranchOperator based on Human-in-the-loop Response."""
inherits_from_skipmixin = True
def __init__(self, *, options_mapping: dict[str, str] | None = None, **kwargs) -> None:
"""
Initialize HITLBranchOperator.
Args:
options_mapping:
A dictionary mapping option labels (must match entries in `self.options`)
to string values (e.g., task IDs). Defaults to an empty dict if not provided.
Raises:
ValueError:
- If `options_mapping` contains keys not present in `self.options`.
- If any value in `options_mapping` is not a string.
"""
super().__init__(**kwargs)
self.options_mapping = options_mapping or {}
self.validate_options_mapping()
def validate_options_mapping(self) -> None:
"""
Validate that `options_mapping` keys match `self.options` and all values are strings.
Raises:
ValueError: If any key is not in `self.options` or any value is not a string.
"""
if not self.options_mapping:
return
# Validate that the choice options are keys in the mapping are the same
invalid_keys = set(self.options_mapping.keys()) - set(self.options)
if invalid_keys:
raise ValueError(
f"`options_mapping` contains keys that are not in `options`: {sorted(invalid_keys)}"
)
# validate that all values are strings
invalid_entries = {
k: (v, type(v).__name__) for k, v in self.options_mapping.items() if not isinstance(v, str)
}
if invalid_entries:
raise ValueError(
f"`options_mapping` values must be strings (task_ids).\nInvalid entries: {invalid_entries}"
)
def execute_complete(self, context: Context, event: dict[str, Any]) -> Any:
"""Execute the operator and branch based on chosen options."""
ret = super().execute_complete(context=context, event=event)
chosen_options = ret["chosen_options"]
# Map options to task IDs using the mapping, fallback to original option
chosen_options = [self.options_mapping.get(option, option) for option in chosen_options]
return self.do_branch(context=context, branches_to_execute=chosen_options)
| HITLBranchOperator |
python | bottlepy__bottle | test/test_sendfile.py | {
"start": 1535,
"end": 7107
} | class ____(unittest.TestCase):
def setUp(self):
e = dict()
wsgiref.util.setup_testing_defaults(e)
b = Bottle()
request.bind(e)
response.bind()
def test_valid(self):
""" SendFile: Valid requests"""
out = static_file(basename, root=root)
self.assertEqual(open(__file__,'rb').read(), out.body.read())
def test_invalid(self):
""" SendFile: Invalid requests"""
self.assertEqual(404, static_file('not/a/file', root=root).status_code)
f = static_file(os.path.join('./../', basename), root='./views/')
self.assertEqual(403, f.status_code)
def test_file_not_readable(self):
if sys.platform == 'win32':
return
if os.geteuid() == 0:
return # Root can read anything
try:
fp, fn = tempfile.mkstemp()
os.chmod(fn, 0)
self.assertEqual(403, static_file(fn, root='/').status_code)
finally:
os.close(fp)
os.unlink(fn)
def test_mime(self):
""" SendFile: Mime Guessing"""
f = static_file(basename, root=root)
self.assertTrue(f.headers['Content-Type'].split(';')[0] in ('application/x-python-code', 'text/x-python'))
f = static_file(basename, root=root, mimetype='some/type')
self.assertEqual('some/type', f.headers['Content-Type'])
f = static_file(basename, root=root, mimetype='text/foo')
self.assertEqual('text/foo; charset=UTF-8', f.headers['Content-Type'])
f = static_file(basename, root=root, mimetype='text/foo', charset='latin1')
self.assertEqual('text/foo; charset=latin1', f.headers['Content-Type'])
def test_mime_gzip(self):
""" SendFile: Mime Guessing"""
try:
fp, fn = tempfile.mkstemp(suffix=".txt.gz")
os.close(fp) # File needs to be closed before it can be accessed on Windows
f = static_file(fn, root='/')
self.assertTrue(f.headers['Content-Type'][0] in ('application/gzip'))
self.assertFalse('Content-Encoding' in f.headers)
finally:
os.close(fp)
os.unlink(fn)
def test_ims(self):
""" SendFile: If-Modified-Since"""
request.environ['HTTP_IF_MODIFIED_SINCE'] = bottle.http_date(time.time())
res = static_file(basename, root=root)
self.assertEqual(304, res.status_code)
self.assertEqual(int(os.stat(__file__).st_mtime), parse_date(res.headers['Last-Modified']))
self.assertAlmostEqual(int(time.time()), parse_date(res.headers['Date']), delta=2)
request.environ['HTTP_IF_MODIFIED_SINCE'] = bottle.http_date(100)
self.assertEqual(open(__file__,'rb').read(), static_file(basename, root=root).body.read())
def test_ims_empty(self):
""" SendFile: Empty If-Modified-Since"""
request.environ['HTTP_IF_MODIFIED_SINCE'] = ''
self.assertEqual(open(__file__, 'rb').read(), static_file(basename, root=root).body.read())
def test_etag(self):
""" SendFile: If-Modified-Since"""
res = static_file(basename, root=root)
self.assertTrue('ETag' in res.headers)
self.assertEqual(200, res.status_code)
etag = res.headers['ETag']
request.environ['HTTP_IF_NONE_MATCH'] = etag
res = static_file(basename, root=root)
self.assertTrue('ETag' in res.headers)
self.assertEqual(etag, res.headers['ETag'])
self.assertEqual(304, res.status_code)
request.environ['HTTP_IF_NONE_MATCH'] = etag
res = static_file(basename2, root=root2)
self.assertTrue('ETag' in res.headers)
self.assertNotEqual(etag, res.headers['ETag'])
self.assertEqual(200, res.status_code)
def test_download(self):
""" SendFile: Download as attachment """
f = static_file(basename, root=root, download="foo.mp3")
self.assertEqual('audio/mpeg', f.headers['Content-Type'])
f = static_file(basename, root=root, download=True)
self.assertEqual('attachment; filename="%s"' % basename, f.headers['Content-Disposition'])
request.environ['HTTP_IF_MODIFIED_SINCE'] = bottle.http_date(100)
f = static_file(basename, root=root)
self.assertEqual(open(__file__,'rb').read(), f.body.read())
def test_range(self):
request.environ['HTTP_RANGE'] = 'bytes=10-25,-80'
f = static_file(basename, root=root)
c = open(__file__, 'rb'); c.seek(10)
self.assertEqual(c.read(16), tob('').join(f.body))
self.assertEqual('bytes 10-25/%d' % len(open(__file__, 'rb').read()),
f.headers['Content-Range'])
self.assertEqual('bytes', f.headers['Accept-Ranges'])
def test_range_parser(self):
r = lambda rs: list(parse_range_header(rs, 100))
self.assertEqual([(90, 100)], r('bytes=-10'))
self.assertEqual([(10, 100)], r('bytes=10-'))
self.assertEqual([(5, 11)], r('bytes=5-10'))
self.assertEqual([(10, 100), (90, 100), (5, 11)], r('bytes=10-,-10,5-10'))
def test_custom_headers(self):
""" SendFile: Custom headers """
headers = {'X-Custom-Header': 'test-value'}
headers_orig = headers.copy()
res = static_file(basename, root=root, headers=headers)
self.assertTrue('X-Custom-Header' in res.headers)
self.assertEqual('test-value', res.headers['X-Custom-Header'])
# Check the passed in headers dict isn't modified.
self.assertEqual(headers_orig, headers)
| TestSendFile |
python | python-pillow__Pillow | src/PIL/QoiImagePlugin.py | {
"start": 4640,
"end": 8572
} | class ____(ImageFile.PyEncoder):
_pushes_fd = True
_previous_pixel: tuple[int, int, int, int] | None = None
_previously_seen_pixels: dict[int, tuple[int, int, int, int]] = {}
_run = 0
def _write_run(self) -> bytes:
data = o8(0b11000000 | (self._run - 1)) # QOI_OP_RUN
self._run = 0
return data
def _delta(self, left: int, right: int) -> int:
result = (left - right) & 255
if result >= 128:
result -= 256
return result
def encode(self, bufsize: int) -> tuple[int, int, bytes]:
assert self.im is not None
self._previously_seen_pixels = {0: (0, 0, 0, 0)}
self._previous_pixel = (0, 0, 0, 255)
data = bytearray()
w, h = self.im.size
bands = Image.getmodebands(self.mode)
for y in range(h):
for x in range(w):
pixel = self.im.getpixel((x, y))
if bands == 3:
pixel = (*pixel, 255)
if pixel == self._previous_pixel:
self._run += 1
if self._run == 62:
data += self._write_run()
else:
if self._run:
data += self._write_run()
r, g, b, a = pixel
hash_value = (r * 3 + g * 5 + b * 7 + a * 11) % 64
if self._previously_seen_pixels.get(hash_value) == pixel:
data += o8(hash_value) # QOI_OP_INDEX
elif self._previous_pixel:
self._previously_seen_pixels[hash_value] = pixel
prev_r, prev_g, prev_b, prev_a = self._previous_pixel
if prev_a == a:
delta_r = self._delta(r, prev_r)
delta_g = self._delta(g, prev_g)
delta_b = self._delta(b, prev_b)
if (
-2 <= delta_r < 2
and -2 <= delta_g < 2
and -2 <= delta_b < 2
):
data += o8(
0b01000000
| (delta_r + 2) << 4
| (delta_g + 2) << 2
| (delta_b + 2)
) # QOI_OP_DIFF
else:
delta_gr = self._delta(delta_r, delta_g)
delta_gb = self._delta(delta_b, delta_g)
if (
-8 <= delta_gr < 8
and -32 <= delta_g < 32
and -8 <= delta_gb < 8
):
data += o8(
0b10000000 | (delta_g + 32)
) # QOI_OP_LUMA
data += o8((delta_gr + 8) << 4 | (delta_gb + 8))
else:
data += o8(0b11111110) # QOI_OP_RGB
data += bytes(pixel[:3])
else:
data += o8(0b11111111) # QOI_OP_RGBA
data += bytes(pixel)
self._previous_pixel = pixel
if self._run:
data += self._write_run()
data += bytes((0, 0, 0, 0, 0, 0, 0, 1)) # padding
return len(data), 0, data
Image.register_open(QoiImageFile.format, QoiImageFile, _accept)
Image.register_decoder("qoi", QoiDecoder)
Image.register_extension(QoiImageFile.format, ".qoi")
Image.register_save(QoiImageFile.format, _save)
Image.register_encoder("qoi", QoiEncoder)
| QoiEncoder |
python | conda__conda | conda/exception_handler.py | {
"start": 521,
"end": 14976
} | class ____:
# FUTURE: Python 3.10+, use typing.ParamSpec
def __call__(self, func: Callable[..., T], *args, **kwargs) -> T | int:
try:
return func(*args, **kwargs)
except:
_, exc_val, exc_tb = sys.exc_info()
return self.handle_exception(exc_val, exc_tb)
def write_out(self, *content: str) -> None:
from logging import getLogger
from .cli.main import init_loggers
init_loggers()
getLogger("conda.stderr").info("\n".join(content))
@property
def http_timeout(self):
from .base.context import context
return context.remote_connect_timeout_secs, context.remote_read_timeout_secs
@property
def user_agent(self):
from .base.context import context
return context.user_agent
@property
def error_upload_url(self):
from .base.context import context
return context.error_upload_url
def handle_exception(self, exc_val: BaseException, exc_tb: TracebackType) -> int:
from errno import ENOSPC
from .exceptions import (
CondaError,
CondaMemoryError,
NoSpaceLeftError,
)
if isinstance(exc_val, CondaError):
if exc_val.reportable:
return self.handle_reportable_application_exception(exc_val, exc_tb)
else:
return self.handle_application_exception(exc_val, exc_tb)
if isinstance(exc_val, EnvironmentError):
if getattr(exc_val, "errno", None) == ENOSPC:
return self.handle_application_exception(
NoSpaceLeftError(exc_val), exc_tb
)
if isinstance(exc_val, MemoryError):
return self.handle_application_exception(CondaMemoryError(exc_val), exc_tb)
if isinstance(exc_val, KeyboardInterrupt):
self._print_conda_exception(CondaError("KeyboardInterrupt"), exc_tb)
return 1
if isinstance(exc_val, SystemExit):
return exc_val.code
return self.handle_unexpected_exception(exc_val, exc_tb)
def handle_application_exception(
self, exc_val: BaseException, exc_tb: TracebackType
) -> int:
self._print_conda_exception(exc_val, exc_tb)
return exc_val.return_code
def _print_conda_exception(
self,
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
from .exceptions import print_conda_exception
print_conda_exception(exc_val, exc_tb)
def handle_unexpected_exception(
self, exc_val: BaseException, exc_tb: TracebackType
) -> int:
error_report = self.get_error_report(exc_val, exc_tb)
self.print_unexpected_error_report(error_report)
self._upload(error_report)
rc = getattr(exc_val, "return_code", None)
return rc if rc is not None else 1
def handle_reportable_application_exception(
self, exc_val: BaseException, exc_tb: TracebackType
) -> int:
error_report = self.get_error_report(exc_val, exc_tb)
from .base.context import context
if context.json:
error_report.update(exc_val.dump_map())
self.print_expected_error_report(error_report)
self._upload(error_report)
return exc_val.return_code
def get_error_report(
self,
exc_val: BaseException,
exc_tb: TracebackType,
) -> dict[str, str]:
from .exceptions import CondaError, _format_exc
command = " ".join(ensure_text_type(s) for s in sys.argv)
info_dict = {}
if " info" not in command:
# get info_dict, but if we get an exception here too, record it without trampling
# the original exception
try:
from .cli.main_info import get_info_dict
info_dict = get_info_dict()
except Exception as info_e:
info_traceback = _format_exc()
info_dict = {
"error": repr(info_e),
"exception_name": info_e.__class__.__name__,
"exception_type": str(exc_val.__class__),
"traceback": info_traceback,
}
error_report = {
"error": repr(exc_val),
"exception_name": exc_val.__class__.__name__,
"exception_type": str(exc_val.__class__),
"command": command,
"traceback": _format_exc(exc_val, exc_tb),
"conda_info": info_dict,
}
if isinstance(exc_val, CondaError):
error_report["conda_error_components"] = exc_val.dump_map()
return error_report
def print_unexpected_error_report(self, error_report: dict[str, str]) -> None:
from .base.context import context
if context.json:
from .cli.common import stdout_json
stdout_json(error_report)
else:
message_builder = []
message_builder.append("")
message_builder.append(
"# >>>>>>>>>>>>>>>>>>>>>> ERROR REPORT <<<<<<<<<<<<<<<<<<<<<<"
)
message_builder.append("")
message_builder.extend(
" " + line for line in error_report["traceback"].splitlines()
)
message_builder.append("")
message_builder.append("`$ {}`".format(error_report["command"]))
message_builder.append("")
if error_report["conda_info"]:
from .cli.main_info import get_env_vars_str, get_main_info_str
try:
# TODO: Sanitize env vars to remove secrets (e.g credentials for PROXY)
message_builder.append(get_env_vars_str(error_report["conda_info"]))
message_builder.append(
get_main_info_str(error_report["conda_info"])
)
except Exception as e:
log.warning("%r", e, exc_info=True)
message_builder.append("conda info could not be constructed.")
message_builder.append(f"{e!r}")
message_builder.extend(
[
"",
"An unexpected error has occurred. Conda has prepared the above report."
"",
"If you suspect this error is being caused by a malfunctioning plugin,",
"consider using the --no-plugins option to turn off plugins.",
"",
"Example: conda --no-plugins install <package>",
"",
"Alternatively, you can set the CONDA_NO_PLUGINS environment variable on",
"the command line to run the command without plugins enabled.",
"",
"Example: CONDA_NO_PLUGINS=true conda install <package>",
"",
]
)
self.write_out(*message_builder)
def print_expected_error_report(self, error_report: dict[str, str]) -> None:
from .base.context import context
if context.json:
from .cli.common import stdout_json
stdout_json(error_report)
else:
message_builder = []
message_builder.append("")
message_builder.append(
"# >>>>>>>>>>>>>>>>>>>>>> ERROR REPORT <<<<<<<<<<<<<<<<<<<<<<"
)
message_builder.append("")
message_builder.append("`$ {}`".format(error_report["command"]))
message_builder.append("")
if error_report["conda_info"]:
from .cli.main_info import get_env_vars_str, get_main_info_str
try:
# TODO: Sanitize env vars to remove secrets (e.g credentials for PROXY)
message_builder.append(get_env_vars_str(error_report["conda_info"]))
message_builder.append(
get_main_info_str(error_report["conda_info"])
)
except Exception as e:
log.warning("%r", e, exc_info=True)
message_builder.append("conda info could not be constructed.")
message_builder.append(f"{e!r}")
message_builder.append("")
message_builder.append(
"V V V V V V V V V V V V V V V V V V V V V V V V V V V V V V V"
)
message_builder.append("")
message_builder.extend(error_report["error"].splitlines())
message_builder.append("")
message_builder.append(
"A reportable application error has occurred. Conda has prepared the above report."
)
message_builder.append("")
self.write_out(*message_builder)
@cached_property
def _isatty(self) -> bool:
try:
return os.isatty(0) or on_win
except Exception as e:
log.debug("%r", e)
return True
def _upload(self, error_report: dict[str, str]) -> None:
"""Determine whether or not to upload the error report."""
from .base.context import context
post_upload = False
if context.report_errors is False:
# no prompt and no submission
do_upload = False
elif context.report_errors is True or context.always_yes:
# no prompt and submit
do_upload = True
elif context.json or context.quiet or not self._isatty:
# never prompt under these conditions, submit iff always_yes
do_upload = bool(not context.offline and context.always_yes)
else:
# prompt whether to submit
do_upload = self._ask_upload()
post_upload = True
# the upload state is one of the following:
# - True: upload error report
# - False: do not upload error report
# - None: while prompting a timeout occurred
if do_upload:
# user wants report to be submitted
self._execute_upload(error_report)
if post_upload:
# post submission text
self._post_upload(do_upload)
def _ask_upload(self) -> bool:
from .auxlib.type_coercion import boolify
from .common.io import timeout
try:
do_upload = timeout(
40,
partial(
input,
"If submitted, this report will be used by core maintainers to improve\n"
"future releases of conda.\n"
"Would you like conda to send this report to the core maintainers? "
"[y/N]: ",
),
)
return do_upload and boolify(do_upload)
except Exception as e:
log.debug("%r", e)
return False
def _execute_upload(self, error_report: dict[str, Any]) -> None:
import getpass
from .common.serialize import json
headers = {
"User-Agent": self.user_agent,
}
_timeout = self.http_timeout
username = getpass.getuser()
error_report["is_ascii"] = (
True if all(ord(c) < 128 for c in username) else False
)
error_report["has_spaces"] = True if " " in str(username) else False
data = json.dumps(error_report, sort_keys=True) + "\n"
data = data.replace(str(username), "USERNAME_REMOVED")
response = None
try:
# requests does not follow HTTP standards for redirects of non-GET methods
# That is, when following a 301 or 302, it turns a POST into a GET.
# And no way to disable. WTF
import requests
redirect_counter = 0
url = self.error_upload_url
response = requests.post(
url, headers=headers, timeout=_timeout, data=data, allow_redirects=False
)
response.raise_for_status()
while response.status_code in (301, 302) and response.headers.get(
"Location"
):
url = response.headers["Location"]
response = requests.post(
url,
headers=headers,
timeout=_timeout,
data=data,
allow_redirects=False,
)
response.raise_for_status()
redirect_counter += 1
if redirect_counter > 15:
from . import CondaError
raise CondaError("Redirect limit exceeded")
log.debug("upload response status: %s", response and response.status_code)
except Exception as e: # pragma: no cover
log.info("%r", e)
try:
if response and response.ok:
self.write_out("Upload successful.")
else:
self.write_out("Upload did not complete.")
if response and response.status_code:
self.write_out(f" HTTP {response.status_code}")
except Exception as e:
log.debug(f"{e!r}")
def _post_upload(self, do_upload: bool) -> None:
if do_upload is True:
# report was submitted
self.write_out(
"",
"Thank you for helping to improve conda.",
"Opt-in to always sending reports (and not see this message again)",
"by running",
"",
" $ conda config --set report_errors true",
"",
)
elif do_upload is None:
# timeout was reached while prompting user
self.write_out(
"",
"Timeout reached. No report sent.",
"",
)
else:
# no report submitted
self.write_out(
"",
"No report sent. To permanently opt-out, use",
"",
" $ conda config --set report_errors false",
"",
)
def conda_exception_handler(func: Callable[..., T], *args, **kwargs) -> T | int:
exception_handler = ExceptionHandler()
return_value = exception_handler(func, *args, **kwargs)
return return_value
| ExceptionHandler |
python | langchain-ai__langchain | libs/text-splitters/langchain_text_splitters/spacy.py | {
"start": 598,
"end": 2323
} | class ____(TextSplitter):
"""Splitting text using Spacy package.
Per default, Spacy's `en_core_web_sm` model is used and
its default max_length is 1000000 (it is the length of maximum character
this model takes which can be increased for large files). For a faster, but
potentially less accurate splitting, you can use `pipeline='sentencizer'`.
"""
def __init__(
self,
separator: str = "\n\n",
pipeline: str = "en_core_web_sm",
max_length: int = 1_000_000,
*,
strip_whitespace: bool = True,
**kwargs: Any,
) -> None:
"""Initialize the spacy text splitter."""
super().__init__(**kwargs)
self._tokenizer = _make_spacy_pipeline_for_splitting(
pipeline, max_length=max_length
)
self._separator = separator
self._strip_whitespace = strip_whitespace
def split_text(self, text: str) -> list[str]:
"""Split incoming text and return chunks."""
splits = (
s.text if self._strip_whitespace else s.text_with_ws
for s in self._tokenizer(text).sents
)
return self._merge_splits(splits, self._separator)
def _make_spacy_pipeline_for_splitting(
pipeline: str, *, max_length: int = 1_000_000
) -> Language:
if not _HAS_SPACY:
msg = "Spacy is not installed, please install it with `pip install spacy`."
raise ImportError(msg)
if pipeline == "sentencizer":
sentencizer: Language = English()
sentencizer.add_pipe("sentencizer")
else:
sentencizer = spacy.load(pipeline, exclude=["ner", "tagger"])
sentencizer.max_length = max_length
return sentencizer
| SpacyTextSplitter |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 10075,
"end": 10180
} | class ____(_ConfigCreateModel):
generative: Union[GenerativeSearches, _EnumLikeStr]
| _GenerativeProvider |
python | sympy__sympy | sympy/physics/quantum/qubit.py | {
"start": 13462,
"end": 25958
} | class ____(IntQubitState, QubitBra):
"""A qubit bra that store integers as binary numbers in qubit values."""
@classmethod
def dual_class(self):
return IntQubit
#-----------------------------------------------------------------------------
# Qubit <---> Matrix conversion functions
#-----------------------------------------------------------------------------
def matrix_to_qubit(matrix):
"""Convert from the matrix repr. to a sum of Qubit objects.
Parameters
----------
matrix : Matrix, numpy.matrix, scipy.sparse
The matrix to build the Qubit representation of. This works with
SymPy matrices, numpy matrices and scipy.sparse sparse matrices.
Examples
========
Represent a state and then go back to its qubit form:
>>> from sympy.physics.quantum.qubit import matrix_to_qubit, Qubit
>>> from sympy.physics.quantum.represent import represent
>>> q = Qubit('01')
>>> matrix_to_qubit(represent(q))
|01>
"""
# Determine the format based on the type of the input matrix
format = 'sympy'
if isinstance(matrix, numpy_ndarray):
format = 'numpy'
if isinstance(matrix, scipy_sparse_matrix):
format = 'scipy.sparse'
# Make sure it is of correct dimensions for a Qubit-matrix representation.
# This logic should work with sympy, numpy or scipy.sparse matrices.
if matrix.shape[0] == 1:
mlistlen = matrix.shape[1]
nqubits = log(mlistlen, 2)
ket = False
cls = QubitBra
elif matrix.shape[1] == 1:
mlistlen = matrix.shape[0]
nqubits = log(mlistlen, 2)
ket = True
cls = Qubit
else:
raise QuantumError(
'Matrix must be a row/column vector, got %r' % matrix
)
if not isinstance(nqubits, Integer):
raise QuantumError('Matrix must be a row/column vector of size '
'2**nqubits, got: %r' % matrix)
# Go through each item in matrix, if element is non-zero, make it into a
# Qubit item times the element.
result = 0
for i in range(mlistlen):
if ket:
element = matrix[i, 0]
else:
element = matrix[0, i]
if format in ('numpy', 'scipy.sparse'):
element = complex(element)
if element:
# Form Qubit array; 0 in bit-locations where i is 0, 1 in
# bit-locations where i is 1
qubit_array = [int(i & (1 << x) != 0) for x in range(nqubits)]
qubit_array.reverse()
result = result + element*cls(*qubit_array)
# If SymPy simplified by pulling out a constant coefficient, undo that.
if isinstance(result, (Mul, Add, Pow)):
result = result.expand()
return result
def matrix_to_density(mat):
"""
Works by finding the eigenvectors and eigenvalues of the matrix.
We know we can decompose rho by doing:
sum(EigenVal*|Eigenvect><Eigenvect|)
"""
from sympy.physics.quantum.density import Density
eigen = mat.eigenvects()
args = [[matrix_to_qubit(Matrix(
[vector, ])), x[0]] for x in eigen for vector in x[2] if x[0] != 0]
if (len(args) == 0):
return S.Zero
else:
return Density(*args)
def qubit_to_matrix(qubit, format='sympy'):
"""Converts an Add/Mul of Qubit objects into it's matrix representation
This function is the inverse of ``matrix_to_qubit`` and is a shorthand
for ``represent(qubit)``.
"""
return represent(qubit, format=format)
#-----------------------------------------------------------------------------
# Measurement
#-----------------------------------------------------------------------------
def measure_all(qubit, format='sympy', normalize=True):
"""Perform an ensemble measurement of all qubits.
Parameters
==========
qubit : Qubit, Add
The qubit to measure. This can be any Qubit or a linear combination
of them.
format : str
The format of the intermediate matrices to use. Possible values are
('sympy','numpy','scipy.sparse'). Currently only 'sympy' is
implemented.
Returns
=======
result : list
A list that consists of primitive states and their probabilities.
Examples
========
>>> from sympy.physics.quantum.qubit import Qubit, measure_all
>>> from sympy.physics.quantum.gate import H
>>> from sympy.physics.quantum.qapply import qapply
>>> c = H(0)*H(1)*Qubit('00')
>>> c
H(0)*H(1)*|00>
>>> q = qapply(c)
>>> measure_all(q)
[(|00>, 1/4), (|01>, 1/4), (|10>, 1/4), (|11>, 1/4)]
"""
m = qubit_to_matrix(qubit, format)
if format == 'sympy':
results = []
if normalize:
m = m.normalized()
size = max(m.shape) # Max of shape to account for bra or ket
nqubits = int(math.log(size)/math.log(2))
for i in range(size):
if m[i]:
results.append(
(Qubit(IntQubit(i, nqubits=nqubits)), m[i]*conjugate(m[i]))
)
return results
else:
raise NotImplementedError(
"This function cannot handle non-SymPy matrix formats yet"
)
def measure_partial(qubit, bits, format='sympy', normalize=True):
"""Perform a partial ensemble measure on the specified qubits.
Parameters
==========
qubits : Qubit
The qubit to measure. This can be any Qubit or a linear combination
of them.
bits : tuple
The qubits to measure.
format : str
The format of the intermediate matrices to use. Possible values are
('sympy','numpy','scipy.sparse'). Currently only 'sympy' is
implemented.
Returns
=======
result : list
A list that consists of primitive states and their probabilities.
Examples
========
>>> from sympy.physics.quantum.qubit import Qubit, measure_partial
>>> from sympy.physics.quantum.gate import H
>>> from sympy.physics.quantum.qapply import qapply
>>> c = H(0)*H(1)*Qubit('00')
>>> c
H(0)*H(1)*|00>
>>> q = qapply(c)
>>> measure_partial(q, (0,))
[(sqrt(2)*|00>/2 + sqrt(2)*|10>/2, 1/2), (sqrt(2)*|01>/2 + sqrt(2)*|11>/2, 1/2)]
"""
m = qubit_to_matrix(qubit, format)
if isinstance(bits, (SYMPY_INTS, Integer)):
bits = (int(bits),)
if format == 'sympy':
if normalize:
m = m.normalized()
possible_outcomes = _get_possible_outcomes(m, bits)
# Form output from function.
output = []
for outcome in possible_outcomes:
# Calculate probability of finding the specified bits with
# given values.
prob_of_outcome = 0
prob_of_outcome += (outcome.H*outcome)[0]
# If the output has a chance, append it to output with found
# probability.
if prob_of_outcome != 0:
if normalize:
next_matrix = matrix_to_qubit(outcome.normalized())
else:
next_matrix = matrix_to_qubit(outcome)
output.append((
next_matrix,
prob_of_outcome
))
return output
else:
raise NotImplementedError(
"This function cannot handle non-SymPy matrix formats yet"
)
def measure_partial_oneshot(qubit, bits, format='sympy'):
"""Perform a partial oneshot measurement on the specified qubits.
A oneshot measurement is equivalent to performing a measurement on a
quantum system. This type of measurement does not return the probabilities
like an ensemble measurement does, but rather returns *one* of the
possible resulting states. The exact state that is returned is determined
by picking a state randomly according to the ensemble probabilities.
Parameters
----------
qubits : Qubit
The qubit to measure. This can be any Qubit or a linear combination
of them.
bits : tuple
The qubits to measure.
format : str
The format of the intermediate matrices to use. Possible values are
('sympy','numpy','scipy.sparse'). Currently only 'sympy' is
implemented.
Returns
-------
result : Qubit
The qubit that the system collapsed to upon measurement.
"""
import random
m = qubit_to_matrix(qubit, format)
if format == 'sympy':
m = m.normalized()
possible_outcomes = _get_possible_outcomes(m, bits)
# Form output from function
random_number = random.random()
total_prob = 0
for outcome in possible_outcomes:
# Calculate probability of finding the specified bits
# with given values
total_prob += (outcome.H*outcome)[0]
if total_prob >= random_number:
return matrix_to_qubit(outcome.normalized())
else:
raise NotImplementedError(
"This function cannot handle non-SymPy matrix formats yet"
)
def _get_possible_outcomes(m, bits):
"""Get the possible states that can be produced in a measurement.
Parameters
----------
m : Matrix
The matrix representing the state of the system.
bits : tuple, list
Which bits will be measured.
Returns
-------
result : list
The list of possible states which can occur given this measurement.
These are un-normalized so we can derive the probability of finding
this state by taking the inner product with itself
"""
# This is filled with loads of dirty binary tricks...You have been warned
size = max(m.shape) # Max of shape to account for bra or ket
nqubits = int(math.log2(size) + .1) # Number of qubits possible
# Make the output states and put in output_matrices, nothing in them now.
# Each state will represent a possible outcome of the measurement
# Thus, output_matrices[0] is the matrix which we get when all measured
# bits return 0. and output_matrices[1] is the matrix for only the 0th
# bit being true
output_matrices = []
for i in range(1 << len(bits)):
output_matrices.append(zeros(2**nqubits, 1))
# Bitmasks will help sort how to determine possible outcomes.
# When the bit mask is and-ed with a matrix-index,
# it will determine which state that index belongs to
bit_masks = []
for bit in bits:
bit_masks.append(1 << bit)
# Make possible outcome states
for i in range(2**nqubits):
trueness = 0 # This tells us to which output_matrix this value belongs
# Find trueness
for j in range(len(bit_masks)):
if i & bit_masks[j]:
trueness += j + 1
# Put the value in the correct output matrix
output_matrices[trueness][i] = m[i]
return output_matrices
def measure_all_oneshot(qubit, format='sympy'):
"""Perform a oneshot ensemble measurement on all qubits.
A oneshot measurement is equivalent to performing a measurement on a
quantum system. This type of measurement does not return the probabilities
like an ensemble measurement does, but rather returns *one* of the
possible resulting states. The exact state that is returned is determined
by picking a state randomly according to the ensemble probabilities.
Parameters
----------
qubits : Qubit
The qubit to measure. This can be any Qubit or a linear combination
of them.
format : str
The format of the intermediate matrices to use. Possible values are
('sympy','numpy','scipy.sparse'). Currently only 'sympy' is
implemented.
Returns
-------
result : Qubit
The qubit that the system collapsed to upon measurement.
"""
import random
m = qubit_to_matrix(qubit)
if format == 'sympy':
m = m.normalized()
random_number = random.random()
total = 0
result = 0
for i in m:
total += i*i.conjugate()
if total > random_number:
break
result += 1
return Qubit(IntQubit(result, nqubits=int(math.log2(max(m.shape)) + .1)))
else:
raise NotImplementedError(
"This function cannot handle non-SymPy matrix formats yet"
)
| IntQubitBra |
python | Textualize__textual | docs/examples/how-to/containers07.py | {
"start": 278,
"end": 667
} | class ____(App):
"""Simple app to play with containers."""
CSS = """
.with-border {
border: heavy green;
}
"""
def compose(self) -> ComposeResult:
with HorizontalScroll(classes="with-border"):
for n in range(10):
yield Box(label=f"Box {n+1}")
if __name__ == "__main__":
app = ContainerApp()
app.run()
| ContainerApp |
python | scrapy__scrapy | tests/test_http2_client_protocol.py | {
"start": 3894,
"end": 4249
} | class ____(LeafResource):
def render_GET(self, request: TxRequest):
request.setHeader(b"Content-Length", b"1024")
self.deferRequest(request, 0, self._delayed_render, request)
return NOT_DONE_YET
@staticmethod
def _delayed_render(request: TxRequest):
request.write(Data.DATALOSS)
request.finish()
| Dataloss |
python | google__pytype | pytype/tests/test_flow2.py | {
"start": 111,
"end": 2332
} | class ____(test_base.BaseTest):
"""Tests for control flow.
These tests primarily test instruction ordering and CFG traversal of the
bytecode interpreter, i.e., their primary focus isn't the inferred types.
Even though they check the validity of the latter, they're mostly smoke tests.
"""
def test_loop_and_if(self):
self.Check("""
import typing
def foo() -> str:
while True:
y = None
z = None
if __random__:
y = "foo"
z = "foo"
if y:
return z
return "foo"
""")
def test_cfg_cycle_singlestep(self):
self.Check("""
import typing
class Foo:
x = ... # type: typing.Optional[int]
def __init__(self):
self.x = None
def X(self) -> int:
return self.x or 4
def B(self) -> None:
self.x = 5
if __random__:
self.x = 6
def C(self) -> None:
self.x = self.x
""")
def test_unsatisfiable_in_with_block(self):
self.Check("""
import threading
_temporaries = {}
_temporaries_lock = threading.RLock()
def GetResourceFilename(name: str):
with _temporaries_lock:
filename = _temporaries.get(name)
if filename:
return filename
return name
x = GetResourceFilename('a')
assert_type(x, str)
""")
def test_unsatisfiable_in_except_block(self):
self.Check("""
def raise_error(e):
raise(e)
_temporaries = {}
def f():
try:
return "hello"
except Exception as e:
filename = _temporaries.get('hello')
if filename:
return filename
raise_error(e)
f().lower() # f() should be str, not str|None
""")
def test_finally_with_returns(self):
# If both the try and except blocks return, a finally block shouldn't cause
# the code to continue.
self.Check("""
def f() -> int:
try:
return 10
except:
return 42
finally:
x = None
return "hello world"
f()
""")
if __name__ == "__main__":
test_base.main()
| FlowTest |
python | falconry__falcon | falcon/routing/compiled.py | {
"start": 43021,
"end": 43392
} | class ____(_CxChild):
def __init__(self, unique_idx: int) -> None:
self._unique_idx = unique_idx
self.dict_variable_name = 'dict_match_{0}'.format(unique_idx)
def src(self, indentation: int) -> str:
return '{0}{1} = match.groupdict()'.format(
_TAB_STR * indentation, self.dict_variable_name
)
| _CxVariableFromPatternMatch |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/core.py | {
"start": 12604,
"end": 14916
} | class ____(Dropout):
"""Spatial 3D version of Dropout.
This version performs the same function as Dropout, however, it drops
entire 3D feature maps instead of individual elements. If adjacent voxels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout3D will help promote independence
between feature maps and should be used instead.
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension (the depth)
is at index 1, in 'channels_last' mode is it at index 4.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Call arguments:
inputs: A 5D tensor.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
5D tensor with shape:
`(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'
or 5D tensor with shape:
`(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.
Output shape:
Same as input.
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout3D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=5)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, 1, input_shape[4])
| SpatialDropout3D |
python | weaviate__weaviate-python-client | weaviate/users/users.py | {
"start": 440,
"end": 519
} | class ____(UserBase):
user_type: UserTypes
active: bool
@dataclass
| UserDB |
python | pytorch__pytorch | test/ao/sparsity/test_parametrization.py | {
"start": 235,
"end": 1162
} | class ____(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.linear = nn.Linear(16, 16, bias=bias)
self.seq = nn.Sequential(
nn.Linear(16, 16, bias=bias), nn.Linear(16, 16, bias=bias)
)
# Make sure the weights are not random
self.linear.weight = nn.Parameter(torch.zeros_like(self.linear.weight) + 1.0)
self.seq[0].weight = nn.Parameter(torch.zeros_like(self.seq[0].weight) + 2.0)
self.seq[1].weight = nn.Parameter(torch.zeros_like(self.seq[1].weight) + 3.0)
if bias:
self.linear = nn.Parameter(torch.zeros_like(self.linear.bias) + 10.0)
self.seq[0] = nn.Parameter(torch.zeros_like(self.seq[0].bias) + 20.0)
self.seq[0] = nn.Parameter(torch.zeros_like(self.seq[0].bias) + 30.0)
def forward(self, x):
x = self.linear(x)
x = self.seq(x)
return x
| ModelUnderTest |
python | Netflix__metaflow | metaflow/event_logger.py | {
"start": 62,
"end": 780
} | class ____(object):
TYPE = "nullSidecarLogger"
def __init__(self, *args, **kwargs):
# Currently passed flow and env in kwargs
self._sidecar = Sidecar(self.TYPE)
def start(self):
return self._sidecar.start()
def terminate(self):
return self._sidecar.terminate()
def send(self, msg):
# Arbitrary message sending. Useful if you want to override some different
# types of messages.
self._sidecar.send(msg)
def log(self, payload):
if self._sidecar.is_active:
msg = Message(MessageTypes.BEST_EFFORT, payload)
self._sidecar.send(msg)
@classmethod
def get_worker(cls):
return None
| NullEventLogger |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-directory/source_google_directory/api.py | {
"start": 2855,
"end": 3851
} | class ____(ABC):
results_per_page = 100
def __init__(self, api: API, *args, **kwargs):
super().__init__(*args, **kwargs)
self._api = api
def _api_get(self, resource: str, params: Dict = None):
return self._api.get(resource, params=params)
@abstractmethod
def list(self, fields: Sequence[str] = None) -> Iterator[dict]:
"""Iterate over entities"""
@abstractmethod
def process_response(self, response: Dict) -> Iterator[dict]:
"""Process Google Directory API response"""
def read(self, getter: Callable, params: Dict = None) -> Iterator:
"""Read using getter"""
params = params or {}
params["maxResults"] = self.results_per_page
while True:
batch = getter(params={**params})
yield from self.process_response(batch)
if "nextPageToken" in batch:
params["pageToken"] = batch["nextPageToken"]
else:
break
| StreamAPI |
python | doocs__leetcode | solution/1700-1799/1760.Minimum Limit of Balls in a Bag/Solution.py | {
"start": 0,
"end": 266
} | class ____:
def minimumSize(self, nums: List[int], maxOperations: int) -> int:
def check(mx: int) -> bool:
return sum((x - 1) // mx for x in nums) <= maxOperations
return bisect_left(range(1, max(nums) + 1), True, key=check) + 1
| Solution |
python | Pylons__pyramid | tests/pkgs/restbugapp/views.py | {
"start": 166,
"end": 396
} | class ____(BaseRESTView):
"""REST Controller to control action of an avatar"""
def __init__(self, context, request):
super().__init__(context, request)
def GET(self):
return Response('gotten')
| PetRESTView |
python | airbytehq__airbyte | airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/config.py | {
"start": 10437,
"end": 10867
} | class ____(BaseConfig):
format: Optional[str] = Field(
default=None,
description="The default format of the cursor value will be used for all streams except those defined in the streams section",
)
streams: List[FutureStateCursorFormatStreamConfiguration] = Field(
default_factory=list, description="Expected cursor value format for a particular stream"
)
| FutureStateCursorFormatConfiguration |
python | google__jax | tests/pmap_test.py | {
"start": 129221,
"end": 129348
} | class ____(EagerPmapMixin, PmapWithDevicesTest):
pass
@jtu.pytest_mark_if_available('multiaccelerator')
| PmapWithDevicesEagerTest |
python | walkccc__LeetCode | solutions/209. Minimum Size Subarray Sum/209.py | {
"start": 0,
"end": 320
} | class ____:
def minSubArrayLen(self, target: int, nums: list[int]) -> int:
ans = math.inf
summ = 0
j = 0
for i, num in enumerate(nums):
summ += num
while summ >= target:
ans = min(ans, i - j + 1)
summ -= nums[j]
j += 1
return 0 if ans == math.inf else ans
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_combined05.py | {
"start": 315,
"end": 1591
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_combined05.xlsx")
self.ignore_elements = {
"xl/charts/chart1.xml": [
"<c:dispBlanksAs",
"<c:tickLblPos",
"<c:crosses",
"<c:axPos",
]
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart1 = workbook.add_chart({"type": "bar"})
chart2 = workbook.add_chart({"type": "line"})
chart1.axis_ids = [60914304, 78899072]
chart2.axis2_ids = [85542016, 85183872]
data = [
[2, 7, 3, 6, 2],
[20, 25, 10, 10, 20],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
chart1.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart2.add_series({"values": "=Sheet1!$B$1:$B$5", "y2_axis": 1})
chart1.combine(chart2)
worksheet.insert_chart("E9", chart1)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytorch__pytorch | test/test_testing.py | {
"start": 101486,
"end": 102631
} | class ____(TestCase):
@ops(op_db, dtypes=OpDTypes.any_one)
def test_opinfo_sample_generators(self, device, dtype, op):
# Test op.sample_inputs doesn't generate multiple samples when called
samples = op.sample_inputs(device, dtype)
self.assertIsInstance(samples, Iterator)
@ops([op for op in op_db if op.reference_inputs_func is not None], dtypes=OpDTypes.any_one)
def test_opinfo_reference_generators(self, device, dtype, op):
# Test op.reference_inputs doesn't generate multiple samples when called
samples = op.reference_inputs(device, dtype)
self.assertIsInstance(samples, Iterator)
@ops([op for op in op_db if op.error_inputs_func is not None], dtypes=OpDTypes.none)
def test_opinfo_error_generators(self, device, op):
# Test op.error_inputs doesn't generate multiple inputs when called
samples = op.error_inputs(device)
self.assertIsInstance(samples, Iterator)
instantiate_device_type_tests(TestOpInfoSampleFunctions, globals())
instantiate_parametrized_tests(TestImports)
if __name__ == '__main__':
run_tests()
| TestOpInfoSampleFunctions |
python | huggingface__transformers | tests/quantization/quanto_integration/test_quanto.py | {
"start": 4367,
"end": 12074
} | class ____(unittest.TestCase):
"""
Test 8-bit weights only quantization
"""
model_name = "bigscience/bloom-560m"
weights = "int8"
activations = None
device_map = "cpu"
input_text = "Hello my name is"
EXPECTED_OUTPUTS = "Hello my name is John, I am a professional photographer and I"
def setUp(self):
"""
Setup quantized model
"""
quantization_config = QuantoConfig(
weights=self.weights,
activations=self.activations,
)
self.quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name,
device_map=self.device_map,
quantization_config=quantization_config,
dtype=torch.float32,
)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.have_accelerate_hooks = (
getattr(self.quantized_model, "hf_device_map", False) and len(self.quantized_model.hf_device_map) > 1
)
def check_inference_correctness(self, model, device):
r"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
if not self.have_accelerate_hooks:
model.to(device)
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(device), max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
def test_generate_quality_cpu(self):
"""
Simple test to check the quality of the model on cpu by comparing the generated tokens with the expected tokens
"""
self.check_inference_correctness(self.quantized_model, "cpu")
def test_generate_quality_accelerator(self):
"""
Simple test to check the quality of the model on accelerators by comparing the generated tokens with the expected tokens
"""
self.check_inference_correctness(self.quantized_model, torch_device)
def test_quantized_model_layers(self):
from optimum.quanto import QBitsTensor, QModuleMixin, QTensor
"""
Suite of simple test to check if the layers are quantized and are working properly
"""
# Test the type of the quantized layer
self.assertTrue(isinstance(self.quantized_model.transformer.h[0].self_attention.query_key_value, QModuleMixin))
self.assertTrue(
isinstance(self.quantized_model.transformer.h[0].self_attention.query_key_value.weight, QTensor)
)
if self.weights == "int4":
self.assertTrue(
isinstance(self.quantized_model.transformer.h[0].self_attention.query_key_value.weight, QBitsTensor)
)
# check that the lm_head was indeed not quantized, just like bnb
self.assertTrue(
isinstance(self.quantized_model.lm_head, torch.nn.Linear)
and not isinstance(self.quantized_model.lm_head, QModuleMixin)
)
if self.device_map in ["cpu", "cuda"]:
self.assertEqual(
self.quantized_model.transformer.h[0].self_attention.query_key_value.weight._data.device.type,
self.device_map,
)
self.quantized_model.to(0)
self.assertEqual(
self.quantized_model.transformer.h[0].self_attention.query_key_value.weight._data.device.type, torch_device
)
def test_serialization_bin(self):
"""
Test the serialization, the loading and the inference of the quantized weights
"""
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(ValueError) as e:
self.quantized_model.save_pretrained(tmpdirname, safe_serialization=False)
self.assertIn(
"The model is quantized with QuantizationMethod.QUANTO and is not serializable", str(e.exception)
)
# TODO: replace by the following when it works
# quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
# tmpdirname, dtype=torch.float32, device_map="cpu"
# )
# self.check_inference_correctness(quantized_model_from_saved, device="cuda")
def test_serialization_safetensors(self):
"""
Test the serialization, the loading and the inference of the quantized weights
"""
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(ValueError) as e:
self.quantized_model.save_pretrained(tmpdirname)
self.assertIn(
"The model is quantized with QuantizationMethod.QUANTO and is not serializable", str(e.exception)
)
# quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
# tmpdirname, dtype=torch.float32, device_map="cpu"
# )
# self.check_inference_correctness(quantized_model_from_saved, device="cuda")
def check_same_model(self, model1, model2):
d0 = dict(model1.named_parameters())
d1 = dict(model2.named_parameters())
self.assertTrue(d0.keys() == d1.keys())
for k in d0:
self.assertTrue(d0[k].shape == d1[k].shape)
self.assertTrue(d0[k].device.type == d1[k].device.type)
self.assertTrue(d0[k].device == d1[k].device)
self.assertTrue(d0[k].dtype == d1[k].dtype)
self.assertTrue(torch.equal(d0[k], d1[k].to(d0[k].device)))
def test_compare_with_quanto(self):
from optimum.quanto import freeze, qint4, qint8, quantize
w_mapping = {"int8": qint8, "int4": qint4}
model = AutoModelForCausalLM.from_pretrained(
self.model_name,
device_map=self.device_map,
dtype=torch.float32,
)
# we do not quantize the lm_head since we don't do that in transformers
quantize(model.transformer, weights=w_mapping[self.weights])
freeze(model.transformer)
self.check_same_model(model, self.quantized_model)
self.check_inference_correctness(model, device=torch_device)
@unittest.skip
def test_load_from_quanto_saved(self):
from optimum.quanto import freeze, qint4, qint8, quantize
from transformers import QuantoConfig
w_mapping = {"int8": qint8, "int4": qint4}
model = AutoModelForCausalLM.from_pretrained(
self.model_name,
device_map=self.device_map,
dtype=torch.float32,
)
# we do not quantize the lm_head since we don't do that in transformers
quantize(model.transformer, weights=w_mapping[self.weights])
freeze(model.transformer)
with tempfile.TemporaryDirectory() as tmpdirname:
model.config.quantization_config = QuantoConfig(
weights=self.weights, activations=self.activations, modules_to_not_convert=["lm_head"]
)
model.save_pretrained(tmpdirname, safe_serialization=False)
quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
tmpdirname,
device_map=self.device_map,
dtype=torch.float32,
)
self.check_same_model(model, quantized_model_from_saved)
self.check_inference_correctness(quantized_model_from_saved, device="cuda")
| QuantoQuantizationTest |
python | numba__numba | numba/tests/test_intwidth.py | {
"start": 642,
"end": 2703
} | class ____(TestCase):
def check_nullary_func(self, pyfunc, **kwargs):
cfunc = jit(**kwargs)(pyfunc)
self.assertPreciseEqual(cfunc(), pyfunc())
def test_global_uint64(self, nopython=False):
pyfunc = usecase_uint64_global
self.check_nullary_func(pyfunc, nopython=nopython)
def test_global_uint64_npm(self):
self.test_global_uint64(nopython=True)
def test_constant_uint64(self, nopython=False):
pyfunc = usecase_uint64_constant
self.check_nullary_func(pyfunc, nopython=nopython)
def test_constant_uint64_npm(self):
self.test_constant_uint64(nopython=True)
def test_constant_uint64_function_call(self, nopython=False):
pyfunc = usecase_uint64_func
self.check_nullary_func(pyfunc, nopython=nopython)
def test_constant_uint64_function_call_npm(self):
self.test_constant_uint64_function_call(nopython=True)
def test_bit_length(self):
f = utils.bit_length
self.assertEqual(f(0x7f), 7)
self.assertEqual(f(-0x7f), 7)
self.assertEqual(f(0x80), 8)
self.assertEqual(f(-0x80), 7)
self.assertEqual(f(0xff), 8)
self.assertEqual(f(-0xff), 8)
self.assertEqual(f(0x100), 9)
self.assertEqual(f(-0x100), 8)
self.assertEqual(f(-0x101), 9)
self.assertEqual(f(0x7fffffff), 31)
self.assertEqual(f(-0x7fffffff), 31)
self.assertEqual(f(-0x80000000), 31)
self.assertEqual(f(0x80000000), 32)
self.assertEqual(f(0xffffffff), 32)
self.assertEqual(f(0xffffffffffffffff), 64)
self.assertEqual(f(0x10000000000000000), 65)
def test_constant_int64(self, nopython=False):
self.check_nullary_func(usecase_int64_pos, nopython=nopython)
self.check_nullary_func(usecase_int64_neg, nopython=nopython)
self.check_nullary_func(usecase_int64_func, nopython=nopython)
def test_constant_int64_npm(self):
self.test_constant_int64(nopython=True)
if __name__ == '__main__':
unittest.main()
| IntWidthTest |
python | getsentry__sentry | tests/sentry/search/test_utils.py | {
"start": 6111,
"end": 28947
} | class ____(APITestCase, SnubaTestCase):
@property
def rpc_user(self):
return user_service.get_user(user_id=self.user.id)
@property
def current_rpc_user(self):
# This doesn't include useremails. Used in filters
# where the current user is passed back
return serialize_rpc_user(self.user)
def parse_query(self, query):
return parse_query([self.project], query, self.user, [])
def test_simple(self) -> None:
result = self.parse_query("foo bar")
assert result == {"tags": {}, "query": "foo bar"}
def test_useless_prefix(self) -> None:
result = self.parse_query("foo: bar")
assert result == {"tags": {}, "query": "foo: bar"}
def test_useless_prefix_with_symbol(self) -> None:
result = self.parse_query("foo: @ba$r")
assert result == {"tags": {}, "query": "foo: @ba$r"}
def test_useless_prefix_with_colon(self) -> None:
result = self.parse_query("foo: :ba:r::foo:")
assert result == {"tags": {}, "query": "foo: :ba:r::foo:"}
def test_handles_space_separation_after_useless_prefix_exception(self) -> None:
result = self.parse_query("foo: bar foo:bar")
assert result == {"tags": {"foo": "bar"}, "query": "foo: bar"}
def test_handles_period_in_tag_key(self) -> None:
result = self.parse_query("foo.bar:foobar")
assert result == {"tags": {"foo.bar": "foobar"}, "query": ""}
def test_handles_dash_in_tag_key(self) -> None:
result = self.parse_query("foo-bar:foobar")
assert result == {"tags": {"foo-bar": "foobar"}, "query": ""}
# TODO: update docs to include minutes, days, and weeks suffixes
@freeze_time("2016-01-01")
def test_age_tag_negative_value(self) -> None:
start = timezone.now()
expected = start - timedelta(hours=12)
result = self.parse_query("age:-12h")
assert result == {"tags": {}, "query": "", "age_from": expected, "age_from_inclusive": True}
@freeze_time("2016-01-01")
def test_age_tag_positive_value(self) -> None:
start = timezone.now()
expected = start - timedelta(hours=12)
result = self.parse_query("age:+12h")
assert result == {"tags": {}, "query": "", "age_to": expected, "age_to_inclusive": True}
@freeze_time("2016-01-01")
def test_age_tag_weeks(self) -> None:
start = timezone.now()
expected = start - timedelta(days=35)
result = self.parse_query("age:+5w")
assert result == {"tags": {}, "query": "", "age_to": expected, "age_to_inclusive": True}
@freeze_time("2016-01-01")
def test_age_tag_days(self) -> None:
start = timezone.now()
expected = start - timedelta(days=10)
result = self.parse_query("age:+10d")
assert result == {"tags": {}, "query": "", "age_to": expected, "age_to_inclusive": True}
@freeze_time("2016-01-01")
def test_age_tag_hours(self) -> None:
start = timezone.now()
expected = start - timedelta(hours=10)
result = self.parse_query("age:+10h")
assert result == {"tags": {}, "query": "", "age_to": expected, "age_to_inclusive": True}
@freeze_time("2016-01-01")
def test_age_tag_minutes(self) -> None:
start = timezone.now()
expected = start - timedelta(minutes=30)
result = self.parse_query("age:+30m")
assert result == {"tags": {}, "query": "", "age_to": expected, "age_to_inclusive": True}
@freeze_time("2016-01-01")
def test_two_age_tags(self) -> None:
start = timezone.now()
expected_to = start - timedelta(hours=12)
expected_from = start - timedelta(hours=24)
result = self.parse_query("age:+12h age:-24h")
assert result == {
"tags": {},
"query": "",
"age_to": expected_to,
"age_from": expected_from,
"age_to_inclusive": True,
"age_from_inclusive": True,
}
def test_event_timestamp_syntax(self) -> None:
result = self.parse_query("event.timestamp:2016-01-02")
assert result == {
"query": "",
"date_from": datetime(2016, 1, 2, tzinfo=UTC),
"date_from_inclusive": True,
"date_to": datetime(2016, 1, 3, tzinfo=UTC),
"date_to_inclusive": False,
"tags": {},
}
def test_times_seen_syntax(self) -> None:
result = self.parse_query("timesSeen:10")
assert result == {"tags": {}, "times_seen": 10, "query": ""}
# TODO: query parser for '>' timestamp should set inclusive to False.
@pytest.mark.xfail
def test_greater_than_comparator(self) -> None:
result = self.parse_query("timesSeen:>10 event.timestamp:>2016-01-02")
assert result == {
"tags": {},
"query": "",
"times_seen_lower": 10,
"times_seen_lower_inclusive": False,
"date_from": datetime(2016, 1, 2, tzinfo=UTC),
"date_from_inclusive": False,
}
def test_greater_than_equal_comparator(self) -> None:
result = self.parse_query("timesSeen:>=10 event.timestamp:>=2016-01-02")
assert result == {
"tags": {},
"query": "",
"times_seen_lower": 10,
"times_seen_lower_inclusive": True,
"date_from": datetime(2016, 1, 2, tzinfo=UTC),
"date_from_inclusive": True,
}
def test_less_than_comparator(self) -> None:
result = self.parse_query("event.timestamp:<2016-01-02 timesSeen:<10")
assert result == {
"tags": {},
"query": "",
"times_seen_upper": 10,
"times_seen_upper_inclusive": False,
"date_to": datetime(2016, 1, 2, tzinfo=UTC),
"date_to_inclusive": False,
}
# TODO: query parser for '<=' timestamp should set inclusive to True.
@pytest.mark.xfail
def test_less_than_equal_comparator(self) -> None:
result = self.parse_query("event.timestamp:<=2016-01-02 timesSeen:<=10")
assert result == {
"tags": {},
"query": "",
"times_seen_upper": 10,
"times_seen_upper_inclusive": True,
"date_to": datetime(2016, 1, 2, tzinfo=UTC),
"date_to_inclusive": True,
}
def test_handles_underscore_in_tag_key(self) -> None:
result = self.parse_query("foo_bar:foobar")
assert result == {"tags": {"foo_bar": "foobar"}, "query": ""}
def test_mix_tag_and_query(self) -> None:
result = self.parse_query("foo bar key:value")
assert result == {"tags": {"key": "value"}, "query": "foo bar"}
def test_single_tag(self) -> None:
result = self.parse_query("key:value")
assert result == {"tags": {"key": "value"}, "query": ""}
def test_tag_with_colon_in_value(self) -> None:
result = self.parse_query("url:http://example.com")
assert result == {"tags": {"url": "http://example.com"}, "query": ""}
def test_single_space_in_value(self) -> None:
result = self.parse_query('key:"value1 value2"')
assert result == {"tags": {"key": "value1 value2"}, "query": ""}
def test_multiple_spaces_in_value(self) -> None:
result = self.parse_query('key:"value1 value2"')
assert result == {"tags": {"key": "value1 value2"}, "query": ""}
def test_invalid_tag_as_query(self) -> None:
result = self.parse_query("Resque::DirtyExit")
assert result == {"tags": {}, "query": "Resque::DirtyExit"}
def test_colons_in_tag_value(self) -> None:
result = self.parse_query("key:Resque::DirtyExit")
assert result == {"tags": {"key": "Resque::DirtyExit"}, "query": ""}
def test_multiple_tags(self) -> None:
result = self.parse_query("foo:bar key:value")
assert result == {"tags": {"key": "value", "foo": "bar"}, "query": ""}
def test_single_tag_with_quotes(self) -> None:
result = self.parse_query('foo:"bar"')
assert result == {"tags": {"foo": "bar"}, "query": ""}
def test_tag_with_quotes_and_query(self) -> None:
result = self.parse_query('key:"a value" hello')
assert result == {"tags": {"key": "a value"}, "query": "hello"}
def test_is_resolved(self) -> None:
result = self.parse_query("is:resolved")
assert result == {"status": GroupStatus.RESOLVED, "tags": {}, "query": ""}
def test_assigned_me(self) -> None:
result = self.parse_query("assigned:me")
assert result == {"assigned_to": self.current_rpc_user, "tags": {}, "query": ""}
def test_assigned_none(self) -> None:
result = self.parse_query("assigned:none")
assert result == {"assigned_to": None, "tags": {}, "query": ""}
def test_assigned_email(self) -> None:
result = self.parse_query(f"assigned:{self.user.email}")
assert result == {"assigned_to": self.rpc_user, "tags": {}, "query": ""}
def test_assigned_unknown_user(self) -> None:
result = self.parse_query("assigned:fake@example.com")
assert isinstance(result["assigned_to"], RpcUser)
assert result["assigned_to"].id == 0
def test_assigned_valid_team(self) -> None:
result = self.parse_query(f"assigned:#{self.team.slug}")
assert result["assigned_to"] == self.team
def test_assigned_unassociated_team(self) -> None:
team2 = self.create_team(organization=self.organization)
result = self.parse_query(f"assigned:#{team2.slug}")
assert isinstance(result["assigned_to"], Team)
assert result["assigned_to"].id == 0
def test_assigned_invalid_team(self) -> None:
result = self.parse_query("assigned:#invalid")
assert isinstance(result["assigned_to"], Team)
assert result["assigned_to"].id == 0
def test_bookmarks_me(self) -> None:
result = self.parse_query("bookmarks:me")
assert result == {"bookmarked_by": self.current_rpc_user, "tags": {}, "query": ""}
def test_bookmarks_email(self) -> None:
result = self.parse_query(f"bookmarks:{self.user.email}")
assert result == {"bookmarked_by": self.rpc_user, "tags": {}, "query": ""}
def test_bookmarks_unknown_user(self) -> None:
result = self.parse_query("bookmarks:fake@example.com")
assert result["bookmarked_by"].id == 0
def test_first_release(self) -> None:
result = self.parse_query("first-release:bar")
assert result == {"first_release": ["bar"], "tags": {}, "query": ""}
def test_first_release_latest(self) -> None:
result = self.parse_query("first-release:latest")
assert result == {"first_release": [""], "tags": {}, "query": ""}
release = self.create_release(
project=self.project,
version="older_release",
date_added=timezone.now() - timedelta(days=1),
)
result = self.parse_query("first-release:latest")
assert result == {"first_release": [release.version], "tags": {}, "query": ""}
release = self.create_release(
project=self.project, version="new_release", date_added=timezone.now()
)
result = self.parse_query("first-release:latest")
assert result == {"first_release": [release.version], "tags": {}, "query": ""}
def test_release(self) -> None:
result = self.parse_query("release:bar")
assert result == {"tags": {"sentry:release": ["bar"]}, "query": ""}
def test_release_latest(self) -> None:
result = self.parse_query("release:latest")
assert result == {"tags": {"sentry:release": [""]}, "query": ""}
release = self.create_release(
project=self.project,
version="older_release",
date_added=timezone.now() - timedelta(days=1),
)
result = self.parse_query("release:latest")
assert result == {"tags": {"sentry:release": [release.version]}, "query": ""}
release = self.create_release(
project=self.project, version="new_release", date_added=timezone.now()
)
result = self.parse_query("release:latest")
assert result == {"tags": {"sentry:release": [release.version]}, "query": ""}
def test_dist(self) -> None:
result = self.parse_query("dist:123")
assert result == {"tags": {"sentry:dist": "123"}, "query": ""}
def test_padded_spacing(self) -> None:
result = self.parse_query("release:bar foo bar")
assert result == {"tags": {"sentry:release": ["bar"]}, "query": "foo bar"}
def test_unknown_user_with_dot_query(self) -> None:
result = self.parse_query("user.email:fake@example.com")
assert result["tags"]["sentry:user"] == "email:fake@example.com"
def test_unknown_user_value(self) -> None:
result = self.parse_query("user.xxxxxx:example")
assert result["tags"]["sentry:user"] == "xxxxxx:example"
def test_user_lookup_with_dot_query(self) -> None:
self.project.date_added = timezone.now() - timedelta(minutes=10)
self.project.save()
self.store_event(
data={
"user": {
"id": 1,
"email": "foo@example.com",
"username": "foobar",
"ip_address": "127.0.0.1",
},
"timestamp": before_now(seconds=10).isoformat(),
},
project_id=self.project.id,
)
result = self.parse_query("user.username:foobar")
assert result["tags"]["sentry:user"] == "id:1"
def test_unknown_user_legacy_syntax(self) -> None:
result = self.parse_query("user:email:fake@example.com")
assert result["tags"]["sentry:user"] == "email:fake@example.com"
def test_user_lookup_legacy_syntax(self) -> None:
self.project.date_added = timezone.now() - timedelta(minutes=10)
self.project.save()
self.store_event(
data={
"user": {
"id": 1,
"email": "foo@example.com",
"username": "foobar",
"ip_address": "127.0.0.1",
},
"timestamp": before_now(seconds=10).isoformat(),
},
project_id=self.project.id,
)
result = self.parse_query("user:username:foobar")
assert result["tags"]["sentry:user"] == "id:1"
def test_is_unassigned(self) -> None:
result = self.parse_query("is:unassigned")
assert result == {"unassigned": True, "tags": {}, "query": ""}
def test_is_assigned(self) -> None:
result = self.parse_query("is:assigned")
assert result == {"unassigned": False, "tags": {}, "query": ""}
def test_is_inbox(self) -> None:
result = self.parse_query("is:for_review")
assert result == {"for_review": True, "tags": {}, "query": ""}
def test_is_unlinked(self) -> None:
result = self.parse_query("is:unlinked")
assert result == {"linked": False, "tags": {}, "query": ""}
def test_is_linked(self) -> None:
result = self.parse_query("is:linked")
assert result == {"linked": True, "tags": {}, "query": ""}
def test_age_from(self) -> None:
result = self.parse_query("age:-24h")
assert result["age_from"] > timezone.now() - timedelta(hours=25)
assert result["age_from"] < timezone.now() - timedelta(hours=23)
assert not result.get("age_to")
def test_age_to(self) -> None:
result = self.parse_query("age:+24h")
assert result["age_to"] > timezone.now() - timedelta(hours=25)
assert result["age_to"] < timezone.now() - timedelta(hours=23)
assert not result.get("age_from")
def test_age_range(self) -> None:
result = self.parse_query("age:-24h age:+12h")
assert result["age_from"] > timezone.now() - timedelta(hours=25)
assert result["age_from"] < timezone.now() - timedelta(hours=23)
assert result["age_to"] > timezone.now() - timedelta(hours=13)
assert result["age_to"] < timezone.now() - timedelta(hours=11)
def test_first_seen_range(self) -> None:
result = self.parse_query("firstSeen:-24h firstSeen:+12h")
assert result["age_from"] > timezone.now() - timedelta(hours=25)
assert result["age_from"] < timezone.now() - timedelta(hours=23)
assert result["age_to"] > timezone.now() - timedelta(hours=13)
assert result["age_to"] < timezone.now() - timedelta(hours=11)
def test_date_range(self) -> None:
result = self.parse_query("event.timestamp:>2016-01-01 event.timestamp:<2016-01-02")
assert result["date_from"] == datetime(2016, 1, 1, tzinfo=UTC)
assert result["date_from_inclusive"] is False
assert result["date_to"] == datetime(2016, 1, 2, tzinfo=UTC)
assert result["date_to_inclusive"] is False
def test_date_range_with_timezone(self) -> None:
result = self.parse_query(
"event.timestamp:>2016-01-01T10:00:00-03:00 event.timestamp:<2016-01-02T10:00:00+02:00"
)
assert result["date_from"] == datetime(2016, 1, 1, 13, 0, 0, tzinfo=UTC)
assert result["date_from_inclusive"] is False
assert result["date_to"] == datetime(2016, 1, 2, 8, 0, tzinfo=UTC)
assert result["date_to_inclusive"] is False
def test_date_range_with_z_timezone(self) -> None:
result = self.parse_query(
"event.timestamp:>2016-01-01T10:00:00Z event.timestamp:<2016-01-02T10:00:00Z"
)
assert result["date_from"] == datetime(2016, 1, 1, 10, 0, 0, tzinfo=UTC)
assert result["date_from_inclusive"] is False
assert result["date_to"] == datetime(2016, 1, 2, 10, 0, tzinfo=UTC)
assert result["date_to_inclusive"] is False
def test_date_range_inclusive(self) -> None:
result = self.parse_query("event.timestamp:>=2016-01-01 event.timestamp:<=2016-01-02")
assert result["date_from"] == datetime(2016, 1, 1, tzinfo=UTC)
assert result["date_from_inclusive"] is True
assert result["date_to"] == datetime(2016, 1, 2, tzinfo=UTC)
assert result["date_to_inclusive"] is True
def test_date_approx_day(self) -> None:
date_value = datetime(2016, 1, 1, tzinfo=UTC)
result = self.parse_query("event.timestamp:2016-01-01")
assert result["date_from"] == date_value
assert result["date_from_inclusive"]
assert result["date_to"] == date_value + timedelta(days=1)
assert not result["date_to_inclusive"]
def test_date_approx_precise(self) -> None:
date_value = datetime(2016, 1, 1, tzinfo=UTC)
result = self.parse_query("event.timestamp:2016-01-01T00:00:00")
assert result["date_from"] == date_value - timedelta(minutes=5)
assert result["date_from_inclusive"]
assert result["date_to"] == date_value + timedelta(minutes=6)
assert not result["date_to_inclusive"]
def test_date_approx_precise_with_timezone(self) -> None:
date_value = datetime(2016, 1, 1, 5, 0, 0, tzinfo=UTC)
result = self.parse_query("event.timestamp:2016-01-01T00:00:00-05:00")
assert result["date_from"] == date_value - timedelta(minutes=5)
assert result["date_from_inclusive"]
assert result["date_to"] == date_value + timedelta(minutes=6)
assert not result["date_to_inclusive"]
def test_last_seen_range(self) -> None:
result = self.parse_query("lastSeen:-24h lastSeen:+12h")
assert result["last_seen_from"] > timezone.now() - timedelta(hours=25)
assert result["last_seen_from"] < timezone.now() - timedelta(hours=23)
assert result["last_seen_to"] > timezone.now() - timedelta(hours=13)
assert result["last_seen_to"] < timezone.now() - timedelta(hours=11)
def test_has_tag(self) -> None:
result = self.parse_query("has:foo")
assert result["tags"]["foo"] == ANY
result = self.parse_query("has:foo foo:value")
assert result["tags"]["foo"] == "value"
def test_has_user(self) -> None:
result = self.parse_query("has:user")
assert result["tags"]["sentry:user"] == ANY
def test_has_release(self) -> None:
result = self.parse_query("has:release")
assert result["tags"]["sentry:release"] == ANY
def test_quoted_string(self) -> None:
result = self.parse_query('"release:foo"')
assert result == {"tags": {}, "query": "release:foo"}
def test_quoted_tag_value(self) -> None:
result = self.parse_query('event.type:error title:"QueryExecutionError: Code: 141."')
assert result["query"] == ""
assert result["tags"]["title"] == "QueryExecutionError: Code: 141."
assert result["tags"]["event.type"] == "error"
def test_leading_colon(self) -> None:
result = self.parse_query("country:canada :unresolved")
assert result["query"] == ":unresolved"
assert result["tags"]["country"] == "canada"
def test_assigned_or_suggested_me(self) -> None:
result = self.parse_query("assigned_or_suggested:me")
assert result == {"assigned_or_suggested": self.current_rpc_user, "tags": {}, "query": ""}
def test_assigned_or_suggested_none(self) -> None:
result = self.parse_query("assigned_or_suggested:none")
assert result == {
"assigned_or_suggested": None,
"tags": {},
"query": "",
}
def test_owner_email(self) -> None:
result = self.parse_query(f"assigned_or_suggested:{self.user.email}")
assert result == {"assigned_or_suggested": self.rpc_user, "tags": {}, "query": ""}
def test_assigned_or_suggested_unknown_user(self) -> None:
result = self.parse_query("assigned_or_suggested:fake@example.com")
assert isinstance(result["assigned_or_suggested"], RpcUser)
assert result["assigned_or_suggested"].id == 0
def test_owner_valid_team(self) -> None:
result = self.parse_query(f"assigned_or_suggested:#{self.team.slug}")
assert result["assigned_or_suggested"] == self.team
def test_assigned_or_suggested_unassociated_team(self) -> None:
team2 = self.create_team(organization=self.organization)
result = self.parse_query(f"assigned_or_suggested:#{team2.slug}")
assert isinstance(result["assigned_or_suggested"], Team)
assert result["assigned_or_suggested"].id == 0
def test_owner_invalid_team(self) -> None:
result = self.parse_query("assigned_or_suggested:#invalid")
assert isinstance(result["assigned_or_suggested"], Team)
assert result["assigned_or_suggested"].id == 0
| ParseQueryTest |
python | pennersr__django-allauth | allauth/account/models.py | {
"start": 4297,
"end": 6029
} | class ____(EmailConfirmationMixin, models.Model):
email_address = models.ForeignKey(
EmailAddress,
verbose_name=_("email address"),
on_delete=models.CASCADE,
)
created = models.DateTimeField(verbose_name=_("created"), default=timezone.now)
sent = models.DateTimeField(verbose_name=_("sent"), null=True)
key = models.CharField(verbose_name=_("key"), max_length=64, unique=True)
objects = EmailConfirmationManager()
class Meta:
verbose_name = _("email confirmation")
verbose_name_plural = _("email confirmations")
def __str__(self):
return "confirmation for %s" % self.email_address
@classmethod
def create(cls, email_address):
key = get_adapter().generate_emailconfirmation_key(email_address.email)
return cls._default_manager.create(email_address=email_address, key=key)
@classmethod
def from_key(cls, key):
qs = EmailConfirmation.objects.all_valid()
qs = qs.select_related("email_address__user")
emailconfirmation = qs.filter(key=key.lower()).first()
return emailconfirmation
def key_expired(self):
expiration_date = self.sent + datetime.timedelta(
days=app_settings.EMAIL_CONFIRMATION_EXPIRE_DAYS
)
return expiration_date <= timezone.now()
key_expired.boolean = True # type: ignore[attr-defined]
def confirm(self, request) -> Optional[EmailAddress]:
if not self.key_expired():
return super().confirm(request)
return None
def send(self, request=None, signup=False) -> None:
super().send(request=request, signup=signup)
self.sent = timezone.now()
self.save()
| EmailConfirmation |
python | weaviate__weaviate-python-client | weaviate/collections/classes/batch.py | {
"start": 10654,
"end": 12168
} | class ____:
"""This class contains the results of a batch `insert_many_references` operation.
Since the individual references within the batch can error for differing reasons, the data is split up within this class for ease use when performing error checking, handling, and data revalidation.
Attributes:
elapsed_seconds: The time taken to perform the batch operation.
errors: A dictionary of all the failed responses from the batch operation. The keys are the indices of the references in the batch, and the values are the `Error` objects.
has_errors: A boolean indicating whether or not any of the references in the batch failed to be inserted. If this is `True`, then the `errors` dictionary will contain at least one entry.
"""
elapsed_seconds: float = 0.0
errors: Dict[int, ErrorReference] = field(default_factory=dict)
has_errors: bool = False
def __add__(self, other: "BatchReferenceReturn") -> "BatchReferenceReturn":
self.elapsed_seconds += other.elapsed_seconds
prev_max = max(self.errors.keys()) if len(self.errors) > 0 else -1
for key, value in other.errors.items():
self.errors[prev_max + key + 1] = value
self.has_errors = self.has_errors or other.has_errors
return self
def add_errors(self, errors: Dict[int, ErrorReference]) -> None:
"""Add a list of errors to the batch return object."""
self.has_errors = True
self.errors.update(errors)
| BatchReferenceReturn |
python | huggingface__transformers | src/transformers/models/glm4/modeling_glm4.py | {
"start": 8805,
"end": 11930
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Glm4Config, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.rotary_fn = apply_rotary_pos_emb
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Glm4Attention |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_X.py | {
"start": 1495,
"end": 2778
} | class ____(Benchmark):
r"""
Xin-She Yang 2 objective function.
This class defines the Xin-She Yang 2 [1]_ global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{XinSheYang02}}(\x) = \frac{\sum_{i=1}^{n} \lvert{x_{i}}\rvert}
{e^{\sum_{i=1}^{n} \sin\left(x_{i}^{2.0}
\right)}}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-2\pi, 2\pi]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-2 * pi] * self.N,
[2 * pi] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return sum(abs(x)) * exp(-sum(sin(x ** 2.0)))
| XinSheYang02 |
python | ray-project__ray | python/ray/serve/llm/__init__.py | {
"start": 1398,
"end": 1653
} | class ____(_LoraConfig):
"""The configuration for loading an LLM model with LoRA."""
pass
#############
# Deployments
#############
@Deprecated(
old="ray.serve.llm.LLMServer", new="ray.serve.llm.deployment.LLMServer", error=False
)
| LoraConfig |
python | getsentry__sentry | tests/sentry/users/api/endpoints/test_user_details.py | {
"start": 1001,
"end": 1411
} | class ____(APITestCase):
endpoint = "sentry-api-0-user-details"
def setUp(self) -> None:
super().setUp()
self.user = self.create_user(email="a@example.com", is_managed=False, name="example name")
self.superuser = self.create_user(is_superuser=True)
self.staff_user = self.create_user(is_staff=True)
self.login_as(user=self.user)
@control_silo_test
| UserDetailsTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 37596,
"end": 37974
} | class ____(sgqlc.types.Enum):
"""The possible default commit messages for merges.
Enumeration Choices:
* `BLANK`: Default to a blank commit message.
* `PR_BODY`: Default to the pull request's body.
* `PR_TITLE`: Default to the pull request's title.
"""
__schema__ = github_schema
__choices__ = ("BLANK", "PR_BODY", "PR_TITLE")
| MergeCommitMessage |
python | getsentry__sentry | src/sentry/search/events/fields.py | {
"start": 45267,
"end": 45590
} | class ____(DateArg):
def normalize(self, value: str, params: ParamsType, combinator: Combinator | None) -> str:
value = super().normalize(value, params, combinator)
# SnQL interprets string types as string, so strip the
# quotes added in StringArg.normalize.
return value[1:-1]
| SnQLDateArg |
python | networkx__networkx | networkx/algorithms/isomorphism/tests/test_vf2pp_helpers.py | {
"start": 4267,
"end": 12924
} | class ____:
G1_edges = [
(1, 2),
(1, 4),
(1, 5),
(2, 3),
(2, 4),
(3, 4),
(4, 5),
(1, 6),
(6, 7),
(6, 8),
(8, 9),
(7, 9),
]
mapped = {
0: "x",
1: "a",
2: "b",
3: "c",
4: "d",
5: "e",
6: "f",
7: "g",
8: "h",
9: "i",
}
def test_no_covered_neighbors_no_labels(self):
G1 = nx.Graph()
G1.add_edges_from(self.G1_edges)
G1.add_node(0)
G2 = nx.relabel_nodes(G1, self.mapped)
G1_degree = dict(G1.degree)
l1 = dict(G1.nodes(data="label", default=-1))
l2 = dict(G2.nodes(data="label", default=-1))
gparams = _GraphParameters(
G1,
G2,
l1,
l2,
nx.utils.groups(l1),
nx.utils.groups(l2),
nx.utils.groups(dict(G2.degree())),
)
m = {9: self.mapped[9], 1: self.mapped[1]}
m_rev = {self.mapped[9]: 9, self.mapped[1]: 1}
T1 = {7, 8, 2, 4, 5}
T1_tilde = {0, 3, 6}
T2 = {"g", "h", "b", "d", "e"}
T2_tilde = {"x", "c", "f"}
sparams = _StateParameters(
m, m_rev, T1, None, T1_tilde, None, T2, None, T2_tilde, None
)
u = 3
candidates = _find_candidates(u, gparams, sparams, G1_degree)
assert candidates == {self.mapped[u]}
u = 0
candidates = _find_candidates(u, gparams, sparams, G1_degree)
assert candidates == {self.mapped[u]}
m.pop(9)
m_rev.pop(self.mapped[9])
T1 = {2, 4, 5, 6}
T1_tilde = {0, 3, 7, 8, 9}
T2 = {"g", "h", "b", "d", "e", "f"}
T2_tilde = {"x", "c", "g", "h", "i"}
sparams = _StateParameters(
m, m_rev, T1, None, T1_tilde, None, T2, None, T2_tilde, None
)
u = 7
candidates = _find_candidates(u, gparams, sparams, G1_degree)
assert candidates == {
self.mapped[u],
self.mapped[8],
self.mapped[3],
self.mapped[9],
}
def test_no_covered_neighbors_with_labels(self):
G1 = nx.Graph()
G1.add_edges_from(self.G1_edges)
G1.add_node(0)
G2 = nx.relabel_nodes(G1, self.mapped)
G1_degree = dict(G1.degree)
nx.set_node_attributes(
G1,
dict(zip(G1, it.cycle(labels_many))),
"label",
)
nx.set_node_attributes(
G2,
dict(
zip(
[self.mapped[n] for n in G1],
it.cycle(labels_many),
)
),
"label",
)
l1 = dict(G1.nodes(data="label", default=-1))
l2 = dict(G2.nodes(data="label", default=-1))
gparams = _GraphParameters(
G1,
G2,
l1,
l2,
nx.utils.groups(l1),
nx.utils.groups(l2),
nx.utils.groups(dict(G2.degree())),
)
m = {9: self.mapped[9], 1: self.mapped[1]}
m_rev = {self.mapped[9]: 9, self.mapped[1]: 1}
T1 = {7, 8, 2, 4, 5, 6}
T1_tilde = {0, 3}
T2 = {"g", "h", "b", "d", "e", "f"}
T2_tilde = {"x", "c"}
sparams = _StateParameters(
m, m_rev, T1, None, T1_tilde, None, T2, None, T2_tilde, None
)
u = 3
candidates = _find_candidates(u, gparams, sparams, G1_degree)
assert candidates == {self.mapped[u]}
u = 0
candidates = _find_candidates(u, gparams, sparams, G1_degree)
assert candidates == {self.mapped[u]}
# Change label of disconnected node
G1.nodes[u]["label"] = "blue"
l1 = dict(G1.nodes(data="label", default=-1))
l2 = dict(G2.nodes(data="label", default=-1))
gparams = _GraphParameters(
G1,
G2,
l1,
l2,
nx.utils.groups(l1),
nx.utils.groups(l2),
nx.utils.groups(dict(G2.degree())),
)
# No candidate
candidates = _find_candidates(u, gparams, sparams, G1_degree)
assert candidates == set()
m.pop(9)
m_rev.pop(self.mapped[9])
T1 = {2, 4, 5, 6}
T1_tilde = {0, 3, 7, 8, 9}
T2 = {"b", "d", "e", "f"}
T2_tilde = {"x", "c", "g", "h", "i"}
sparams = _StateParameters(
m, m_rev, T1, None, T1_tilde, None, T2, None, T2_tilde, None
)
u = 7
candidates = _find_candidates(u, gparams, sparams, G1_degree)
assert candidates == {self.mapped[u]}
G1.nodes[8]["label"] = G1.nodes[7]["label"]
G2.nodes[self.mapped[8]]["label"] = G1.nodes[7]["label"]
l1 = dict(G1.nodes(data="label", default=-1))
l2 = dict(G2.nodes(data="label", default=-1))
gparams = _GraphParameters(
G1,
G2,
l1,
l2,
nx.utils.groups(l1),
nx.utils.groups(l2),
nx.utils.groups(dict(G2.degree())),
)
candidates = _find_candidates(u, gparams, sparams, G1_degree)
assert candidates == {self.mapped[u], self.mapped[8]}
def test_covered_neighbors_no_labels(self):
G1 = nx.Graph()
G1.add_edges_from(self.G1_edges)
G1.add_node(0)
G2 = nx.relabel_nodes(G1, self.mapped)
G1_degree = dict(G1.degree)
l1 = dict(G1.nodes(data=None, default=-1))
l2 = dict(G2.nodes(data=None, default=-1))
gparams = _GraphParameters(
G1,
G2,
l1,
l2,
nx.utils.groups(l1),
nx.utils.groups(l2),
nx.utils.groups(dict(G2.degree())),
)
m = {9: self.mapped[9], 1: self.mapped[1]}
m_rev = {self.mapped[9]: 9, self.mapped[1]: 1}
T1 = {7, 8, 2, 4, 5, 6}
T1_tilde = {0, 3}
T2 = {"g", "h", "b", "d", "e", "f"}
T2_tilde = {"x", "c"}
sparams = _StateParameters(
m, m_rev, T1, None, T1_tilde, None, T2, None, T2_tilde, None
)
u = 5
candidates = _find_candidates(u, gparams, sparams, G1_degree)
assert candidates == {self.mapped[u]}
u = 6
candidates = _find_candidates(u, gparams, sparams, G1_degree)
assert candidates == {self.mapped[u], self.mapped[2]}
def test_covered_neighbors_with_labels(self):
G1 = nx.Graph()
G1.add_edges_from(self.G1_edges)
G1.add_node(0)
G2 = nx.relabel_nodes(G1, self.mapped)
G1_degree = dict(G1.degree)
nx.set_node_attributes(
G1,
dict(zip(G1, it.cycle(labels_many))),
"label",
)
nx.set_node_attributes(
G2,
dict(
zip(
[self.mapped[n] for n in G1],
it.cycle(labels_many),
)
),
"label",
)
l1 = dict(G1.nodes(data="label", default=-1))
l2 = dict(G2.nodes(data="label", default=-1))
gparams = _GraphParameters(
G1,
G2,
l1,
l2,
nx.utils.groups(l1),
nx.utils.groups(l2),
nx.utils.groups(dict(G2.degree())),
)
m = {9: self.mapped[9], 1: self.mapped[1]}
m_rev = {self.mapped[9]: 9, self.mapped[1]: 1}
T1 = {7, 8, 2, 4, 5, 6}
T1_tilde = {0, 3}
T2 = {"g", "h", "b", "d", "e", "f"}
T2_tilde = {"x", "c"}
sparams = _StateParameters(
m, m_rev, T1, None, T1_tilde, None, T2, None, T2_tilde, None
)
u = 5
candidates = _find_candidates(u, gparams, sparams, G1_degree)
assert candidates == {self.mapped[u]}
u = 6
candidates = _find_candidates(u, gparams, sparams, G1_degree)
assert candidates == {self.mapped[u]}
# Assign to 2, the same label as 6
G1.nodes[2]["label"] = G1.nodes[u]["label"]
G2.nodes[self.mapped[2]]["label"] = G1.nodes[u]["label"]
l1 = dict(G1.nodes(data="label", default=-1))
l2 = dict(G2.nodes(data="label", default=-1))
gparams = _GraphParameters(
G1,
G2,
l1,
l2,
nx.utils.groups(l1),
nx.utils.groups(l2),
nx.utils.groups(dict(G2.degree())),
)
candidates = _find_candidates(u, gparams, sparams, G1_degree)
assert candidates == {self.mapped[u], self.mapped[2]}
| TestGraphCandidateSelection |
python | numpy__numpy | numpy/f2py/tests/test_isoc.py | {
"start": 98,
"end": 1434
} | class ____(util.F2PyTest):
sources = [
util.getpath("tests", "src", "isocintrin", "isoCtests.f90"),
]
# gh-24553
@pytest.mark.slow
def test_c_double(self):
out = self.module.coddity.c_add(1, 2)
exp_out = 3
assert out == exp_out
# gh-9693
def test_bindc_function(self):
out = self.module.coddity.wat(1, 20)
exp_out = 8
assert out == exp_out
# gh-25207
def test_bindc_kinds(self):
out = self.module.coddity.c_add_int64(1, 20)
exp_out = 21
assert out == exp_out
# gh-25207
def test_bindc_add_arr(self):
a = np.array([1, 2, 3])
b = np.array([1, 2, 3])
out = self.module.coddity.add_arr(a, b)
exp_out = a * 2
assert_allclose(out, exp_out)
def test_process_f2cmap_dict():
from numpy.f2py.auxfuncs import process_f2cmap_dict
f2cmap_all = {"integer": {"8": "rubbish_type"}}
new_map = {"INTEGER": {"4": "int"}}
c2py_map = {"int": "int", "rubbish_type": "long"}
exp_map, exp_maptyp = ({"integer": {"8": "rubbish_type", "4": "int"}}, ["int"])
# Call the function
res_map, res_maptyp = process_f2cmap_dict(f2cmap_all, new_map, c2py_map)
# Assert the result is as expected
assert res_map == exp_map
assert res_maptyp == exp_maptyp
| TestISOC |
python | huggingface__transformers | tests/models/idefics2/test_modeling_idefics2.py | {
"start": 15704,
"end": 25245
} | class ____(GenerationTesterMixin, ModelTesterMixin, unittest.TestCase):
"""
Model tester for `Idefics2ForConditionalGeneration`.
"""
all_model_classes = (Idefics2ForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"image-text-to-text": Idefics2ForConditionalGeneration} if is_torch_available() else ()
test_resize_embeddings = True
def setUp(self):
self.model_tester = Idefics2VisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Idefics2Config, has_text_modality=False)
@unittest.skip(reason="input_embeds cannot be passed in without input_ids")
def test_inputs_embeds():
pass
@unittest.skip(reason="Model does not support padding right")
def test_flash_attn_2_generate_padding_right(self):
pass
@unittest.skip(reason="Model does not support padding right")
def test_flash_attn_2_inference_padding_right(self):
pass
@pytest.mark.generate
@slow
@unittest.skip(
reason="Idefics2 doesn't support SDPA for all backbones, vision backbones has only eager/FA2 attention"
)
def test_eager_matches_sdpa_generate(self):
pass
# We need to override as we need to prepare such that the image token is the last token
def test_resize_tokens_embeddings(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
model_vocab_size = config.text_config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2)
n_images = self.model_tester.num_images * self.model_tester.perceiver_config["resampler_n_latents"]
model.model.image_token_id = model_vocab_size - 15 - 1
inputs_dict["input_ids"][:, -n_images:] = model.model.image_token_id
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1)
self.assertTrue(model.config.text_config.vocab_size + 10, model_vocab_size)
model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0] // 64, 0)
self.assertTrue(model_embed.weight.shape[0], model.config.text_config.vocab_size)
self.assertTrue(model.config.text_config.vocab_size, model.vocab_size)
model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0] // 64, 0)
# Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size
target_dimension = 128
model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0], target_dimension)
with self.assertRaisesRegex(
ValueError,
"Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer",
):
model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3)
# We need to override as we need to prepare such that the image token is the last token
def test_resize_embeddings_untied(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
original_config.tie_word_embeddings = False
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
model.eval()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2)
n_images = self.model_tester.num_images * self.model_tester.perceiver_config["resampler_n_latents"]
model.model.image_token_id = model_vocab_size - 15 - 1
inputs_dict["input_ids"][:, -n_images:] = model.model.image_token_id
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
def test_inputs_embeds_matches_input_ids_with_generate(self):
# overwrite because IDEFICS needs ids and embeds at the input to be not None
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
pad_token_id = config.pad_token_id if config.pad_token_id is not None else 1
wte = model.get_input_embeddings()
input_ids = inputs["input_ids"]
# some models infer position ids/attn mask differently when input ids
# by check if pad_token let's make sure no padding is in input ids
not_pad_token_id = pad_token_id + 1 if max(0, pad_token_id - 1) == 0 else pad_token_id - 1
input_ids[input_ids == pad_token_id] = not_pad_token_id
del inputs["input_ids"]
inputs_embeds = wte(input_ids)
out_ids = model.generate(input_ids=input_ids, **inputs, max_new_tokens=2)
out_embeds = model.generate(input_ids=input_ids, inputs_embeds=inputs_embeds, **inputs, max_new_tokens=2)
torch.testing.assert_close(out_embeds, out_ids)
@require_torch
| Idefics2ForConditionalGenerationModelTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/events.py | {
"start": 25946,
"end": 26484
} | class ____(_EventsHold[_ET]):
all_holds: weakref.WeakKeyDictionary[Any, Any] = (
weakref.WeakKeyDictionary()
)
def resolve(self, class_: Type[_O]) -> Optional[ClassManager[_O]]:
return instrumentation.opt_manager_of_class(class_)
# this fails on pyright if you use Any. Fails on mypy if you use _ET
class HoldInstanceEvents(_EventsHold.HoldEvents[_ET], InstanceEvents): # type: ignore[valid-type,misc] # noqa: E501
pass
dispatch = event.dispatcher(HoldInstanceEvents)
| _InstanceEventsHold |
python | pytorch__pytorch | test/inductor/test_compile.py | {
"start": 1414,
"end": 1567
} | class ____(MyModule):
def forward(self, x): # takes a dict of list
a, b = x["key"]
return {"result": super().forward(a) + b}
| MyModule2 |
python | huggingface__transformers | src/transformers/generation/logits_process.py | {
"start": 53556,
"end": 56574
} | class ____(LogitsProcessor):
r"""
[`LogitsProcessor`] that works similarly to [`NoRepeatNGramLogitsProcessor`], but applied exclusively to prevent
the repetition of n-grams present in the prompt.
It was designed to promote chattiness in a language model, by preventing the generation of n-grams present in
previous conversation rounds.
Args:
encoder_ngram_size (`int`):
All ngrams of size `ngram_size` can only occur within the encoder input ids.
encoder_input_ids (`int`):
The encoder_input_ids that should not be repeated within the decoder ids.
Examples:
```py
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
>>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m")
>>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m")
>>> inputs = tokenizer("Alice: I love cats. What do you love?\nBob:", return_tensors="pt")
>>> # With greedy decoding, we see Bob repeating Alice's opinion. If Bob was a chatbot, it would be a poor one.
>>> outputs = model.generate(**inputs)
>>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
Alice: I love cats. What do you love?
Bob: I love cats. What do you
>>> # With this logits processor, we can prevent Bob from repeating Alice's opinion.
>>> outputs = model.generate(**inputs, encoder_no_repeat_ngram_size=2)
>>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
Alice: I love cats. What do you love?
Bob: My cats are very cute.
```
"""
def __init__(self, encoder_ngram_size: int, encoder_input_ids: torch.LongTensor):
if not isinstance(encoder_ngram_size, int) or encoder_ngram_size <= 0:
raise ValueError(
f"`encoder_ngram_size` has to be a strictly positive integer, but is {encoder_ngram_size}"
)
self.ngram_size = encoder_ngram_size
if len(encoder_input_ids.shape) == 1:
encoder_input_ids = encoder_input_ids.unsqueeze(0)
self.batch_size = encoder_input_ids.shape[0]
self.generated_ngrams = _get_ngrams(encoder_ngram_size, encoder_input_ids, self.batch_size)
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
# B x num_beams
num_hypos = scores.shape[0]
num_beams = num_hypos // self.batch_size
cur_len = input_ids.shape[-1]
scores_processed = scores.clone()
banned_batch_tokens = [
_get_generated_ngrams(
self.generated_ngrams[hypo_idx // num_beams], input_ids[hypo_idx], self.ngram_size, cur_len
)
for hypo_idx in range(num_hypos)
]
for i, banned_tokens in enumerate(banned_batch_tokens):
scores_processed[i, banned_tokens] = -float("inf")
return scores_processed
| EncoderNoRepeatNGramLogitsProcessor |
python | openai__openai-python | tests/lib/test_pydantic.py | {
"start": 7801,
"end": 10381
} | class ____(BaseModel):
color: Color = Field(description="The detected color")
hex_color_code: str = Field(description="The hex color code of the detected color")
def test_enums() -> None:
if not PYDANTIC_V1:
assert openai.pydantic_function_tool(ColorDetection)["function"] == snapshot(
{
"name": "ColorDetection",
"strict": True,
"parameters": {
"$defs": {"Color": {"enum": ["red", "blue", "green"], "title": "Color", "type": "string"}},
"properties": {
"color": {
"description": "The detected color",
"enum": ["red", "blue", "green"],
"title": "Color",
"type": "string",
},
"hex_color_code": {
"description": "The hex color code of the detected color",
"title": "Hex Color Code",
"type": "string",
},
},
"required": ["color", "hex_color_code"],
"title": "ColorDetection",
"type": "object",
"additionalProperties": False,
},
}
)
else:
assert openai.pydantic_function_tool(ColorDetection)["function"] == snapshot(
{
"name": "ColorDetection",
"strict": True,
"parameters": {
"properties": {
"color": {
"description": "The detected color",
"title": "Color",
"enum": ["red", "blue", "green"],
},
"hex_color_code": {
"description": "The hex color code of the detected color",
"title": "Hex Color Code",
"type": "string",
},
},
"required": ["color", "hex_color_code"],
"title": "ColorDetection",
"definitions": {
"Color": {"title": "Color", "description": "An enumeration.", "enum": ["red", "blue", "green"]}
},
"type": "object",
"additionalProperties": False,
},
}
)
| ColorDetection |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict18.py | {
"start": 1203,
"end": 1404
} | class ____(TD5[Literal[1]]):
z: str
def func4(a: TD6) -> Literal[1]: ...
func4({"x": 1, "y": 1, "z": "a"})
f2: TD6 = {"x": 1, "y": 1, "z": "a"}
reveal_type(func4({"x": 1, "y": 1, "z": "a"}))
| TD6 |
python | PrefectHQ__prefect | src/prefect/settings/profiles.py | {
"start": 3378,
"end": 12795
} | class ____:
""" "
A utility class for working with a collection of profiles.
Profiles in the collection must have unique names.
The collection may store the name of the active profile.
"""
def __init__(self, profiles: Iterable[Profile], active: str | None = None) -> None:
self.profiles_by_name: dict[str, Profile] = {
profile.name: profile for profile in profiles
}
self.active_name = active
@property
def names(self) -> set[str]:
"""
Return a set of profile names in this collection.
"""
return set(self.profiles_by_name.keys())
@property
def active_profile(self) -> Profile | None:
"""
Retrieve the active profile in this collection.
"""
if self.active_name is None:
return None
return self[self.active_name]
def set_active(self, name: str | None, check: bool = True) -> None:
"""
Set the active profile name in the collection.
A null value may be passed to indicate that this collection does not determine
the active profile.
"""
if check and name is not None and name not in self.names:
raise ValueError(f"Unknown profile name {name!r}.")
self.active_name = name
def update_profile(
self,
name: str,
settings: dict[Setting, Any],
source: Path | None = None,
) -> Profile:
"""
Add a profile to the collection or update the existing on if the name is already
present in this collection.
If updating an existing profile, the settings will be merged. Settings can
be dropped from the existing profile by setting them to `None` in the new
profile.
Returns the new profile object.
"""
existing = self.profiles_by_name.get(name)
# Convert the input to a `Profile` to cast settings to the correct type
profile = Profile(name=name, settings=settings, source=source)
if existing:
new_settings = {**existing.settings, **profile.settings}
# Drop null keys to restore to default
for key, value in tuple(new_settings.items()):
if value is None:
new_settings.pop(key)
new_profile = Profile(
name=profile.name,
settings=new_settings,
source=source or profile.source,
)
else:
new_profile = profile
self.profiles_by_name[new_profile.name] = new_profile
return new_profile
def add_profile(self, profile: Profile) -> None:
"""
Add a profile to the collection.
If the profile name already exists, an exception will be raised.
"""
if profile.name in self.profiles_by_name:
raise ValueError(
f"Profile name {profile.name!r} already exists in collection."
)
self.profiles_by_name[profile.name] = profile
def remove_profile(self, name: str) -> None:
"""
Remove a profile from the collection.
"""
self.profiles_by_name.pop(name)
def without_profile_source(self, path: Path | None) -> "ProfilesCollection":
"""
Remove profiles that were loaded from a given path.
Returns a new collection.
"""
return ProfilesCollection(
[
profile
for profile in self.profiles_by_name.values()
if profile.source != path
],
active=self.active_name,
)
def to_dict(self) -> dict[str, Any]:
"""
Convert to a dictionary suitable for writing to disk.
"""
return {
"active": self.active_name,
"profiles": {
profile.name: profile.to_environment_variables()
for profile in self.profiles_by_name.values()
},
}
def __getitem__(self, name: str) -> Profile:
return self.profiles_by_name[name]
def __iter__(self) -> Iterator[str]:
return self.profiles_by_name.__iter__()
def items(self) -> list[tuple[str, Profile]]:
return list(self.profiles_by_name.items())
def __eq__(self, __o: object) -> bool:
if not isinstance(__o, ProfilesCollection):
return False
return (
self.profiles_by_name == __o.profiles_by_name
and self.active_name == __o.active_name
)
def __repr__(self) -> str:
return (
f"ProfilesCollection(profiles={list(self.profiles_by_name.values())!r},"
f" active={self.active_name!r})>"
)
def _read_profiles_from(path: Path) -> ProfilesCollection:
"""
Read profiles from a path into a new `ProfilesCollection`.
Profiles are expected to be written in TOML with the following schema:
```
active = <name: Optional[str]>
[profiles.<name: str>]
<SETTING: str> = <value: Any>
```
"""
contents = toml.loads(path.read_text())
active_profile = contents.get("active")
raw_profiles = contents.get("profiles", {})
profiles = []
for name, settings in raw_profiles.items():
profiles.append(Profile(name=name, settings=settings, source=path))
return ProfilesCollection(profiles, active=active_profile)
def _write_profiles_to(path: Path, profiles: ProfilesCollection) -> None:
"""
Write profiles in the given collection to a path as TOML.
Any existing data not present in the given `profiles` will be deleted.
"""
if not path.exists():
path.parent.mkdir(parents=True, exist_ok=True)
path.touch(mode=0o600)
path.write_text(toml.dumps(profiles.to_dict()))
def load_profiles(include_defaults: bool = True) -> ProfilesCollection:
"""
Load profiles from the current profile path. Optionally include profiles from the
default profile path.
"""
current_settings = get_current_settings()
default_profiles = _read_profiles_from(DEFAULT_PROFILES_PATH)
if current_settings.profiles_path is None:
raise RuntimeError(
"No profiles path set; please ensure `PREFECT_PROFILES_PATH` is set."
)
if not include_defaults:
if not current_settings.profiles_path.exists():
return ProfilesCollection([])
return _read_profiles_from(current_settings.profiles_path)
user_profiles_path = current_settings.profiles_path
profiles = default_profiles
if user_profiles_path.exists():
user_profiles = _read_profiles_from(user_profiles_path)
# Merge all of the user profiles with the defaults
for name in user_profiles:
if not (source := user_profiles[name].source):
raise ValueError(f"Profile {name!r} has no source.")
profiles.update_profile(
name,
settings=user_profiles[name].settings,
source=source,
)
if user_profiles.active_name:
profiles.set_active(user_profiles.active_name, check=False)
return profiles
def load_current_profile() -> Profile:
"""
Load the current profile from the default and current profile paths.
This will _not_ include settings from the current settings context. Only settings
that have been persisted to the profiles file will be saved.
"""
import prefect.context
profiles = load_profiles()
context = prefect.context.get_settings_context()
if context:
profiles.set_active(context.profile.name)
return profiles.active_profile
def save_profiles(profiles: ProfilesCollection) -> None:
"""
Writes all non-default profiles to the current profiles path.
"""
profiles_path = get_current_settings().profiles_path
assert profiles_path is not None, "Profiles path is not set."
profiles = profiles.without_profile_source(DEFAULT_PROFILES_PATH)
return _write_profiles_to(profiles_path, profiles)
def load_profile(name: str) -> Profile:
"""
Load a single profile by name.
"""
profiles = load_profiles()
try:
return profiles[name]
except KeyError:
raise ValueError(f"Profile {name!r} not found.")
def update_current_profile(
settings: dict[str | Setting, Any],
) -> Profile:
"""
Update the persisted data for the profile currently in-use.
If the profile does not exist in the profiles file, it will be created.
Given settings will be merged with the existing settings as described in
`ProfilesCollection.update_profile`.
Returns:
The new profile.
"""
import prefect.context
current_profile = prefect.context.get_settings_context().profile
if not current_profile:
from prefect.exceptions import MissingProfileError
raise MissingProfileError("No profile is currently in use.")
profiles = load_profiles()
# Ensure the current profile's settings are present
profiles.update_profile(current_profile.name, current_profile.settings)
# Then merge the new settings in
new_profile = profiles.update_profile(
current_profile.name, _cast_settings(settings)
)
new_profile.validate_settings()
save_profiles(profiles)
return profiles[current_profile.name]
| ProfilesCollection |
python | nedbat__coveragepy | coverage/files.py | {
"start": 8604,
"end": 12245
} | class ____:
"""A matcher for files by file name pattern."""
def __init__(self, pats: Iterable[str], name: str = "unknown") -> None:
self.pats = list(pats)
self.re = globs_to_regex(self.pats, case_insensitive=env.WINDOWS)
self.name = name
def __repr__(self) -> str:
return f"<GlobMatcher {self.name} {self.pats!r}>"
def info(self) -> list[str]:
"""A list of strings for displaying when dumping state."""
return self.pats
def match(self, fpath: str) -> bool:
"""Does `fpath` match one of our file name patterns?"""
return self.re.match(fpath) is not None
def sep(s: str) -> str:
"""Find the path separator used in this string, or os.sep if none."""
if sep_match := re.search(r"[\\/]", s):
the_sep = sep_match[0]
else:
the_sep = os.sep
return the_sep
# Tokenizer for _glob_to_regex.
# None as a sub means disallowed.
# fmt: off
G2RX_TOKENS = [(re.compile(rx), sub) for rx, sub in [
(r"\*\*\*+", None), # Can't have ***
(r"[^/]+\*\*+", None), # Can't have x**
(r"\*\*+[^/]+", None), # Can't have **x
(r"\*\*/\*\*", None), # Can't have **/**
(r"^\*+/", r"(.*[/\\\\])?"), # ^*/ matches any prefix-slash, or nothing.
(r"/\*+$", r"[/\\\\].*"), # /*$ matches any slash-suffix.
(r"\*\*/", r"(.*[/\\\\])?"), # **/ matches any subdirs, including none
(r"/", r"[/\\\\]"), # / matches either slash or backslash
(r"\*", r"[^/\\\\]*"), # * matches any number of non slash-likes
(r"\?", r"[^/\\\\]"), # ? matches one non slash-like
(r"\[.*?\]", r"\g<0>"), # [a-f] matches [a-f]
(r"[a-zA-Z0-9_-]+", r"\g<0>"), # word chars match themselves
(r"[\[\]]", None), # Can't have single square brackets
(r".", r"\\\g<0>"), # Anything else is escaped to be safe
]]
# fmt: on
def _glob_to_regex(pattern: str) -> str:
"""Convert a file-path glob pattern into a regex."""
# Turn all backslashes into slashes to simplify the tokenizer.
pattern = pattern.replace("\\", "/")
if "/" not in pattern:
pattern = f"**/{pattern}"
path_rx = []
pos = 0
while pos < len(pattern):
for rx, sub in G2RX_TOKENS: # pragma: always breaks
if m := rx.match(pattern, pos=pos):
if sub is None:
raise ConfigError(f"File pattern can't include {m[0]!r}")
path_rx.append(m.expand(sub))
pos = m.end()
break
return "".join(path_rx)
def globs_to_regex(
patterns: Iterable[str],
case_insensitive: bool = False,
partial: bool = False,
) -> re.Pattern[str]:
"""Convert glob patterns to a compiled regex that matches any of them.
Slashes are always converted to match either slash or backslash, for
Windows support, even when running elsewhere.
If the pattern has no slash or backslash, then it is interpreted as
matching a file name anywhere it appears in the tree. Otherwise, the glob
pattern must match the whole file path.
If `partial` is true, then the pattern will match if the target string
starts with the pattern. Otherwise, it must match the entire string.
Returns: a compiled regex object. Use the .match method to compare target
strings.
"""
flags = 0
if case_insensitive:
flags |= re.IGNORECASE
rx = join_regex(map(_glob_to_regex, patterns))
if not partial:
rx = rf"(?:{rx})\Z"
compiled = re.compile(rx, flags=flags)
return compiled
| GlobMatcher |
python | Lightning-AI__lightning | src/lightning/pytorch/profilers/profiler.py | {
"start": 970,
"end": 5531
} | class ____(ABC):
"""If you wish to write a custom profiler, you should inherit from this class."""
def __init__(
self,
dirpath: Optional[Union[str, Path]] = None,
filename: Optional[str] = None,
) -> None:
self.dirpath = dirpath
self.filename = filename
self._output_file: Optional[TextIO] = None
self._write_stream: Optional[Callable] = None
self._local_rank: Optional[int] = None
self._stage: Optional[str] = None
@abstractmethod
def start(self, action_name: str) -> None:
"""Defines how to start recording an action."""
@abstractmethod
def stop(self, action_name: str) -> None:
"""Defines how to record the duration once an action is complete."""
def summary(self) -> str:
return ""
@contextmanager
def profile(self, action_name: str) -> Generator:
"""Yields a context manager to encapsulate the scope of a profiled action.
Example::
with self.profile('load training data'):
# load training data code
The profiler will start once you've entered the context and will automatically
stop once you exit the code block.
"""
try:
self.start(action_name)
yield action_name
finally:
self.stop(action_name)
def _rank_zero_info(self, *args: Any, **kwargs: Any) -> None:
if self._local_rank in (None, 0):
log.info(*args, **kwargs)
def _prepare_filename(
self,
action_name: Optional[str] = None,
extension: str = ".txt",
split_token: str = "-", # noqa: S107
) -> str:
args = []
if self._stage is not None:
args.append(self._stage)
if self.filename:
args.append(self.filename)
if self._local_rank is not None:
args.append(str(self._local_rank))
if action_name is not None:
args.append(action_name)
return split_token.join(args) + extension
def _prepare_streams(self) -> None:
if self._write_stream is not None:
return
if self.filename and self.dirpath:
filepath = os.path.join(self.dirpath, self._prepare_filename())
fs = get_filesystem(filepath)
fs.mkdirs(self.dirpath, exist_ok=True)
file = fs.open(filepath, "a")
self._output_file = file
self._write_stream = file.write
else:
self._write_stream = self._rank_zero_info
def describe(self) -> None:
"""Logs a profile report after the conclusion of run."""
# users might call `describe` directly as the profilers can be used by themselves.
# to allow this, we open and close the files within this function by calling `_prepare_streams` and `teardown`
# manually instead of letting the `Trainer` do it through `setup` and `teardown`
self._prepare_streams()
summary = self.summary()
if summary and self._write_stream is not None:
self._write_stream(summary)
if self._output_file is not None:
self._output_file.flush()
self.teardown(stage=self._stage)
def _stats_to_str(self, stats: dict[str, str]) -> str:
stage = f"{self._stage.upper()} " if self._stage is not None else ""
output = [stage + "Profiler Report"]
for action, value in stats.items():
header = f"Profile stats for: {action}"
if self._local_rank is not None:
header += f" rank: {self._local_rank}"
output.append(header)
output.append(value)
return os.linesep.join(output)
def setup(self, stage: str, local_rank: Optional[int] = None, log_dir: Optional[str] = None) -> None:
"""Execute arbitrary pre-profiling set-up steps."""
self._stage = stage
self._local_rank = local_rank
self.dirpath = self.dirpath or log_dir
def teardown(self, stage: Optional[str]) -> None:
"""Execute arbitrary post-profiling tear-down steps.
Closes the currently open file and stream.
"""
self._write_stream = None
if self._output_file is not None:
self._output_file.close()
self._output_file = None # can't pickle TextIOWrapper
def __del__(self) -> None:
self.teardown(stage=self._stage)
@property
def local_rank(self) -> int:
return 0 if self._local_rank is None else self._local_rank
| Profiler |
python | pytorch__pytorch | torch/utils/_content_store.py | {
"start": 5624,
"end": 7447
} | class ____:
# Structure:
# storages/
# 00/
# 0000..00
# tensors/
# name
def __init__(self, loc: str, stable_hash: bool = False) -> None:
self.loc: str = loc
self.seen_storage_hashes: set[str] = set()
self.stable_hash = stable_hash
# TODO: offer some sort of non-blocking API to speed things up
def write_storage(self, storage: torch.UntypedStorage) -> str:
h = hash_storage(storage, stable_hash=self.stable_hash)
if h in self.seen_storage_hashes:
return h
# TODO: consider not using torch.save for this; we don't actually
# need any metadata for the storage
subfolder = os.path.join(self.loc, "storages")
os.makedirs(subfolder, exist_ok=True)
target = os.path.join(subfolder, h)
if os.path.exists(target):
return h
torch.save(storage, target)
self.seen_storage_hashes.add(h)
return h
def compute_tensor_metadata(self, t: torch.Tensor, h=None):
if h is None:
h = hash_storage(t.untyped_storage(), stable_hash=self.stable_hash)
return (
t.dtype,
h,
t.storage_offset(),
tuple(t.shape),
t.stride(),
torch._utils.get_tensor_metadata(t),
)
def write_tensor(self, name: str, t: torch.Tensor) -> None:
storage = t.untyped_storage()
h = self.write_storage(storage)
# TODO: Support more advanced snapshotting of requires_grad/grad/etc
d, f = os.path.split(name)
payload = self.compute_tensor_metadata(t, h=h)
subfolder = os.path.join(self.loc, "tensors", d)
os.makedirs(subfolder, exist_ok=True)
torch.save(payload, os.path.join(subfolder, f))
| ContentStoreWriter |
python | celery__celery | celery/utils/abstract.py | {
"start": 1225,
"end": 2874
} | class ____(CallableTask): # pragma: no cover
"""Celery Signature interface."""
__required_attributes__ = frozenset({
'clone', 'freeze', 'set', 'link', 'link_error', '__or__',
})
@property
@abstractmethod
def name(self):
pass
@property
@abstractmethod
def type(self):
pass
@property
@abstractmethod
def app(self):
pass
@property
@abstractmethod
def id(self):
pass
@property
@abstractmethod
def task(self):
pass
@property
@abstractmethod
def args(self):
pass
@property
@abstractmethod
def kwargs(self):
pass
@property
@abstractmethod
def options(self):
pass
@property
@abstractmethod
def subtask_type(self):
pass
@property
@abstractmethod
def chord_size(self):
pass
@property
@abstractmethod
def immutable(self):
pass
@abstractmethod
def clone(self, args=None, kwargs=None):
pass
@abstractmethod
def freeze(self, id=None, group_id=None, chord=None, root_id=None,
group_index=None):
pass
@abstractmethod
def set(self, immutable=None, **options):
pass
@abstractmethod
def link(self, callback):
pass
@abstractmethod
def link_error(self, errback):
pass
@abstractmethod
def __or__(self, other):
pass
@abstractmethod
def __invert__(self):
pass
@classmethod
def __subclasshook__(cls, C):
return cls._subclasshook_using(CallableSignature, C)
| CallableSignature |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 20337,
"end": 20444
} | class ____(StringEnum):
iter = "iter"
timestamp = "timestamp"
iso_time = "iso_time"
| ScalarKeyEnum |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-firestore/llama_index/readers/firestore/base.py | {
"start": 340,
"end": 2876
} | class ____(BaseReader):
"""
Simple Firestore reader.
Args:
project_id (str): The Google Cloud Project ID.
*args (Optional[Any]): Additional arguments.
**kwargs (Optional[Any]): Additional keyword arguments.
Returns:
FirestoreReader: A FirestoreReader object.
"""
def __init__(
self,
project_id: str,
database_id: str = DEFAULT_FIRESTORE_DATABASE,
*args: Optional[Any],
**kwargs: Optional[Any],
) -> None:
"""Initialize with parameters."""
try:
from google.cloud import firestore
from google.cloud.firestore_v1.services.firestore.transports.base import (
DEFAULT_CLIENT_INFO,
)
except ImportError:
raise ImportError(IMPORT_ERROR_MSG)
client_info = DEFAULT_CLIENT_INFO
client_info.user_agent = USER_AGENT
self.db = firestore.Client(
project=project_id, database=database_id, client_info=client_info
)
def load_data(self, collection: str) -> List[Document]:
"""
Load data from a Firestore collection, returning a list of Documents.
Args:
collection (str): The name of the Firestore collection to read from.
Returns:
List[Document]: A list of Document objects.
"""
documents = []
col_ref = self.db.collection(collection)
for doc in col_ref.stream():
doc_str = ", ".join([f"{k}: {v}" for k, v in doc.to_dict().items()])
documents.append(Document(text=doc_str))
return documents
def load_document(self, document_url: str) -> Document:
"""
Load a single document from Firestore.
Args:
document_url (str): The absolute path to the Firestore document to read.
Returns:
Document: A Document object.
"""
parts = document_url.split("/")
if len(parts) % 2 != 0:
raise ValueError(f"Invalid document URL: {document_url}")
ref = self.db.collection(parts[0])
for i in range(1, len(parts)):
if i % 2 == 0:
ref = ref.collection(parts[i])
else:
ref = ref.document(parts[i])
doc = ref.get()
if not doc.exists:
raise ValueError(f"No such document: {document_url}")
doc_str = ", ".join([f"{k}: {v}" for k, v in doc.to_dict().items()])
return Document(text=doc_str)
| FirestoreReader |
python | pyca__cryptography | src/cryptography/x509/extensions.py | {
"start": 31941,
"end": 32393
} | class ____(ExtensionType):
oid = ExtensionOID.PRECERT_POISON
def __eq__(self, other: object) -> bool:
if not isinstance(other, PrecertPoison):
return NotImplemented
return True
def __hash__(self) -> int:
return hash(PrecertPoison)
def __repr__(self) -> str:
return "<PrecertPoison()>"
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
| PrecertPoison |
python | streamlit__streamlit | lib/streamlit/runtime/scriptrunner_utils/script_run_context.py | {
"start": 2136,
"end": 10893
} | class ____:
"""A context object that contains data for a "script run" - that is,
data that's scoped to a single ScriptRunner execution (and therefore also
scoped to a single connected "session").
ScriptRunContext is used internally by virtually every `st.foo()` function.
It is accessed only from the script thread that's created by ScriptRunner,
or from app-created helper threads that have been "attached" to the
ScriptRunContext via `add_script_run_ctx`.
Streamlit code typically retrieves the active ScriptRunContext via the
`get_script_run_ctx` function.
"""
session_id: str
_enqueue: Callable[[ForwardMsg], None]
query_string: str
session_state: SafeSessionState
uploaded_file_mgr: UploadedFileManager
main_script_path: str
user_info: UserInfo
fragment_storage: FragmentStorage
pages_manager: PagesManager
# Hashes of messages that are cached in the client browser:
cached_message_hashes: set[str] = field(default_factory=set)
context_info: ContextInfo | None = None
gather_usage_stats: bool = False
command_tracking_deactivated: bool = False
tracked_commands: list[Command] = field(default_factory=list)
tracked_commands_counter: Counter[str] = field(default_factory=collections.Counter)
_has_script_started: bool = False
widget_ids_this_run: set[str] = field(default_factory=set)
widget_user_keys_this_run: set[str] = field(default_factory=set)
form_ids_this_run: set[str] = field(default_factory=set)
cursors: dict[int, RunningCursor] = field(default_factory=dict)
script_requests: ScriptRequests | None = None
current_fragment_id: str | None = None
fragment_ids_this_run: list[str] | None = None
new_fragment_ids: set[str] = field(default_factory=set)
in_fragment_callback: bool = False
_active_script_hash: str = ""
# we allow only one dialog to be open at the same time
has_dialog_opened: bool = False
# TODO(willhuang1997): Remove this variable when experimental query params are removed
_experimental_query_params_used = False
_production_query_params_used = False
@property
def page_script_hash(self) -> str:
return self.pages_manager.current_page_script_hash
@property
def active_script_hash(self) -> str:
return self._active_script_hash
@property
def main_script_parent(self) -> Path:
return self.pages_manager.main_script_parent
@contextlib.contextmanager
def run_with_active_hash(self, page_hash: str) -> Generator[None, None, None]:
original_page_hash = self._active_script_hash
self._active_script_hash = page_hash
try:
yield
finally:
# in the event of any exception, ensure we set the active hash back
self._active_script_hash = original_page_hash
def set_mpa_v2_page(self, page_script_hash: str) -> None:
self._active_script_hash = self.pages_manager.main_script_hash
self.pages_manager.set_current_page_script_hash(page_script_hash)
def reset(
self,
query_string: str = "",
page_script_hash: str = "",
fragment_ids_this_run: list[str] | None = None,
cached_message_hashes: set[str] | None = None,
context_info: ContextInfo | None = None,
) -> None:
self.cursors = {}
self.widget_ids_this_run = set()
self.widget_user_keys_this_run = set()
self.form_ids_this_run = set()
self.query_string = query_string
self.context_info = context_info
self.pages_manager.set_current_page_script_hash(page_script_hash)
self._active_script_hash = self.pages_manager.main_script_hash
self._has_script_started = False
self.command_tracking_deactivated: bool = False
self.tracked_commands = []
self.tracked_commands_counter = collections.Counter()
self.current_fragment_id = None
self.current_fragment_delta_path: list[int] = []
self.fragment_ids_this_run = fragment_ids_this_run
self.new_fragment_ids = set()
self.has_dialog_opened = False
self.cached_message_hashes = cached_message_hashes or set()
in_cached_function.set(False)
parsed_query_params = parse.parse_qs(query_string, keep_blank_values=True)
with self.session_state.query_params() as qp:
qp.clear_with_no_forward_msg()
for key, val in parsed_query_params.items():
if len(val) == 0:
qp.set_with_no_forward_msg(key, val="")
elif len(val) == 1:
qp.set_with_no_forward_msg(key, val=val[-1])
else:
qp.set_with_no_forward_msg(key, val)
def on_script_start(self) -> None:
self._has_script_started = True
def enqueue(self, msg: ForwardMsg) -> None:
"""Enqueue a ForwardMsg for this context's session."""
msg.metadata.active_script_hash = self.active_script_hash
# We populate the hash and cacheable field for all messages.
# Besides the forward message cache, the hash might also be used
# for other aspects within the frontend.
populate_hash_if_needed(msg)
msg_to_send = msg
if (
msg.metadata.cacheable
and msg.hash
and msg.hash in self.cached_message_hashes
):
_LOGGER.debug("Sending cached message ref (hash=%s)", msg.hash)
msg_to_send = create_reference_msg(msg)
# Pass the message up to our associated ScriptRunner.
self._enqueue(msg_to_send)
def ensure_single_query_api_used(self) -> None:
if self._experimental_query_params_used and self._production_query_params_used:
raise StreamlitAPIException(
"Using `st.query_params` together with either `st.experimental_get_query_params` "
"or `st.experimental_set_query_params` is not supported. Please "
" convert your app to only use `st.query_params`"
)
def mark_experimental_query_params_used(self) -> None:
self._experimental_query_params_used = True
self.ensure_single_query_api_used()
def mark_production_query_params_used(self) -> None:
self._production_query_params_used = True
self.ensure_single_query_api_used()
SCRIPT_RUN_CONTEXT_ATTR_NAME: Final = "streamlit_script_run_ctx"
def add_script_run_ctx(
thread: threading.Thread | None = None, ctx: ScriptRunContext | None = None
) -> threading.Thread:
"""Adds the current ScriptRunContext to a newly-created thread.
This should be called from this thread's parent thread,
before the new thread starts.
Parameters
----------
thread : threading.Thread
The thread to attach the current ScriptRunContext to.
ctx : ScriptRunContext or None
The ScriptRunContext to add, or None to use the current thread's
ScriptRunContext.
Returns
-------
threading.Thread
The same thread that was passed in, for chaining.
"""
if thread is None:
thread = threading.current_thread()
if ctx is None:
ctx = get_script_run_ctx()
if ctx is not None:
setattr(thread, SCRIPT_RUN_CONTEXT_ATTR_NAME, ctx)
return thread
def get_script_run_ctx(suppress_warning: bool = False) -> ScriptRunContext | None:
"""
Parameters
----------
suppress_warning : bool
If True, don't log a warning if there's no ScriptRunContext.
Returns
-------
ScriptRunContext | None
The current thread's ScriptRunContext, or None if it doesn't have one.
"""
thread = threading.current_thread()
ctx: ScriptRunContext | None = getattr(thread, SCRIPT_RUN_CONTEXT_ATTR_NAME, None)
if ctx is None and not suppress_warning:
# Only warn about a missing ScriptRunContext if suppress_warning is False, and
# we were started via `streamlit run`. Otherwise, the user is likely running a
# script "bare", and doesn't need to be warned about streamlit
# bits that are irrelevant when not connected to a session.
_LOGGER.warning(
"Thread '%s': missing ScriptRunContext! This warning can be ignored when "
"running in bare mode.",
thread.name,
)
return ctx
def enqueue_message(msg: ForwardMsg) -> None:
"""Enqueues a ForwardMsg proto to send to the app."""
ctx = get_script_run_ctx()
if ctx is None:
raise NoSessionContext()
if ctx.current_fragment_id and msg.WhichOneof("type") == "delta":
msg.delta.fragment_id = ctx.current_fragment_id
ctx.enqueue(msg)
| ScriptRunContext |
python | pydantic__pydantic | tests/mypy/modules/plugin_fail.py | {
"start": 1035,
"end": 1093
} | class ____(BaseModel, extra=1):
pass
| KwargsBadExtraModel |
python | django__django | django/contrib/gis/db/models/aggregates.py | {
"start": 2205,
"end": 2306
} | class ____(GeoAggregate):
name = "Collect"
output_field_class = GeometryCollectionField
| Collect |
python | pytorch__pytorch | test/dynamo/test_subclasses.py | {
"start": 44508,
"end": 46082
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[3, 4]"):
l_x_ = L_x_
add_: "f32[3, 4]" = l_x_.add_(1.0)
relu_: "f32[3, 4]" = torch.relu_(l_x_); l_x_ = None
add: "f32[3, 4]" = add_ + relu_; add_ = relu_ = None
return (add,)
""",
)
self.assertTrue(torch._is_functional_tensor(backend.example_inputs[1][0]))
# Cannot reuse the version from AOTAutograd, since that uses python functional tensors.
def to_fun(x):
x_functional = torch._to_functional_tensor(x)
torch._mirror_autograd_meta_to(x, x_functional)
return x_functional
def aot_f_wrapper(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
torch._enable_functionalization(reapply_views=False)
try:
func_args = pytree.tree_map(to_fun, args)
func_kwargs = pytree.tree_map(to_fun, kwargs)
return func(*func_args, **func_kwargs)
finally:
torch._disable_functionalization()
return wrapper
aot_ff = aot_f_wrapper(f)
aot_ff_out = aot_ff(x_clone2)
self.assertEqual(cnt.frame_count, 3)
self.assertEqual(cnt.op_count, 9)
self.assertEqual(len(backend.graphs), 3)
self.assertEqual(len(backend.example_inputs), 3)
actual = normalize_gm(backend.graphs[2].print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
| GraphModule |
python | joke2k__faker | faker/providers/phone_number/de_AT/__init__.py | {
"start": 49,
"end": 2505
} | class ____(PhoneNumberProvider):
"""Phone number provider for `de_AT` locale.
Sources:
- https://de.wikipedia.org/wiki/Telefonvorwahl_(%C3%96sterreich)
"""
dialing_codes = (
"650",
"655",
"660",
"661",
"663",
"664",
"665",
"667",
"670",
"676",
"677",
"678",
"680",
"681",
"688",
"690",
"699",
)
area_code_formats = (
"1", # Wien
"316", # Graz
"463", # Klagenfurt
"512", # Innsbruck
"662", # Salzburg
"732", # Linz
"21##",
"22##",
"25##",
"26##",
"27##",
"28##",
"29##",
"31##",
"33##",
"34##",
"35##",
"36##",
"38##",
"42##",
"43##",
"47##",
"48##",
"52##",
"53##",
"54##",
"55##",
"56##",
"61##",
"62##",
"64##",
"65##",
"72##",
"73##",
"74##",
"75##",
"76##",
"77##",
"79##",
)
cellphone_formats = (
"+43 (0) {{dialing_code}} ########",
"+43 {{dialing_code}} ### ### ##",
"+43 {{dialing_code}}########",
"0{{dialing_code}} ### ### ##",
"0{{dialing_code}}/########",
)
landline_formats = (
"+43 (0) {{area_code}} ########",
"+43 {{area_code}} ##### ###",
"+43 {{area_code}}########",
"0{{area_code}} ##### ###",
"(0{{area_code}}) ##### ###",
"0{{area_code}}/########",
)
"""
Get dialing code for cellphone numbers.
"""
def dialing_code(self) -> str:
return self.random_element(self.dialing_codes)
"""
Get area code for landlines.
"""
def area_code(self) -> str:
area_code: str = self.random_element(self.area_code_formats)
return self.numerify(area_code)
"""
Get a landline phone number.
"""
def phone_number(self) -> str:
pattern: str = self.random_element(self.landline_formats)
return self.numerify(self.generator.parse(pattern))
"""
Get a cellphone number.
"""
def cellphone_number(self) -> str:
pattern: str = self.random_element(self.cellphone_formats)
return self.numerify(self.generator.parse(pattern))
| Provider |
python | django-crispy-forms__django-crispy-forms | crispy_forms/exceptions.py | {
"start": 41,
"end": 288
} | class ____(CrispyError):
"""
This is raised when building a form via helpers throws an error.
We want to catch form helper errors as soon as possible because
debugging templatetags is never fun.
"""
pass
| FormHelpersException |
python | modin-project__modin | modin/core/dataframe/pandas/interchange/dataframe_protocol/dataframe.py | {
"start": 1671,
"end": 7859
} | class ____(ProtocolDataframe):
"""
A data frame class, with only the methods required by the interchange protocol defined.
Instances of this (private) class are returned from ``modin.pandas.DataFrame.__dataframe__``
as objects with the methods and attributes defined on this class.
A "data frame" represents an ordered collection of named columns.
A column's "name" must be a unique string. Columns may be accessed by name or by position.
This could be a public data frame class, or an object with the methods and
attributes defined on this DataFrame class could be returned from the
``__dataframe__`` method of a public data frame class in a library adhering
to the dataframe interchange protocol specification.
Parameters
----------
df : PandasDataframe
A ``PandasDataframe`` object.
nan_as_null : bool, default:False
A keyword intended for the consumer to tell the producer
to overwrite null values in the data with ``NaN`` (or ``NaT``).
This currently has no effect; once support for nullable extension
dtypes is added, this value should be propagated to columns.
allow_copy : bool, default: True
A keyword that defines whether or not the library is allowed
to make a copy of the data. For example, copying data would be necessary
if a library supports strided buffers, given that this protocol
specifies contiguous buffers. Currently, if the flag is set to ``False``
and a copy is needed, a ``RuntimeError`` will be raised.
"""
def __init__(
self,
df: PandasDataframe,
nan_as_null: bool = False,
allow_copy: bool = True,
) -> None:
self._df = df
self._nan_as_null = nan_as_null
self._allow_copy = allow_copy
def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True):
return PandasProtocolDataframe(
self._df, nan_as_null=nan_as_null, allow_copy=allow_copy
)
@property
def metadata(self) -> Dict[str, Any]:
return {"modin.index": self._df.index}
def num_columns(self) -> int:
return len(self._df.columns)
def num_rows(self) -> int:
return len(self._df.index)
def num_chunks(self) -> int:
return self._df._partitions.shape[0]
def column_names(self) -> Iterable[str]:
for col in self._df.columns:
yield col
def get_column(self, i: int) -> PandasProtocolColumn:
return PandasProtocolColumn(
self._df.take_2d_labels_or_positional(
row_positions=None, col_positions=[i]
),
allow_copy=self._allow_copy,
)
def get_column_by_name(self, name: str) -> PandasProtocolColumn:
return PandasProtocolColumn(
self._df.take_2d_labels_or_positional(
row_positions=None, col_labels=[name]
),
allow_copy=self._allow_copy,
)
def get_columns(self) -> Iterable[PandasProtocolColumn]:
for name in self._df.columns:
yield PandasProtocolColumn(
self._df.take_2d_labels_or_positional(
row_positions=None, col_labels=[name]
),
allow_copy=self._allow_copy,
)
def select_columns(self, indices: Sequence[int]) -> "PandasProtocolDataframe":
if not isinstance(indices, collections.abc.Sequence):
raise ValueError("`indices` is not a sequence")
return PandasProtocolDataframe(
self._df.take_2d_labels_or_positional(
row_positions=None, col_positions=indices
),
allow_copy=self._allow_copy,
)
def select_columns_by_name(self, names: Sequence[str]) -> "PandasProtocolDataframe":
if not isinstance(names, collections.abc.Sequence):
raise ValueError("`names` is not a sequence")
return PandasProtocolDataframe(
self._df.take_2d_labels_or_positional(row_positions=None, col_labels=names),
allow_copy=self._allow_copy,
)
def get_chunks(
self, n_chunks: Optional[int] = None
) -> Iterable["PandasProtocolDataframe"]:
cur_n_chunks = self.num_chunks()
n_rows = self.num_rows()
if n_chunks is None or n_chunks == cur_n_chunks:
cum_row_lengths = np.cumsum([0] + self._df.row_lengths)
for i in range(len(cum_row_lengths) - 1):
yield PandasProtocolDataframe(
self._df.take_2d_labels_or_positional(
row_positions=range(cum_row_lengths[i], cum_row_lengths[i + 1]),
col_positions=None,
),
allow_copy=self._allow_copy,
)
return
if n_chunks % cur_n_chunks != 0:
raise RuntimeError(
"The passed `n_chunks` must be a multiple of `self.num_chunks()`."
)
if n_chunks > n_rows:
raise RuntimeError(
"The passed `n_chunks` value is bigger than `self.num_rows()`."
)
chunksize = n_rows // n_chunks
new_lengths = [chunksize] * n_chunks
new_lengths[-1] = n_rows % n_chunks + new_lengths[-1]
new_partitions = self._df._partition_mgr_cls.map_axis_partitions(
0,
self._df._partitions,
lambda df: df,
keep_partitioning=False,
lengths=new_lengths,
)
new_df = self._df.__constructor__(
new_partitions,
self._df.index,
self._df.columns,
new_lengths,
self._df.column_widths,
)
cum_row_lengths = np.cumsum([0] + new_df.row_lengths)
for i in range(len(cum_row_lengths) - 1):
yield PandasProtocolDataframe(
new_df.take_2d_labels_or_positional(
row_positions=range(cum_row_lengths[i], cum_row_lengths[i + 1]),
col_positions=None,
),
allow_copy=self._allow_copy,
)
| PandasProtocolDataframe |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-github/tests/test_gh_base_url.py | {
"start": 78,
"end": 1230
} | class ____:
pass
@pytest.fixture()
def github_reader():
return GithubRepositoryReader(
github_client=MockGithubClient(), owner="owner", repo="repo"
)
@pytest.mark.parametrize(
("blob_url", "expected_base_url"),
[
("https://github.com/owner/repo/blob/main/file.py", "https://github.com/"),
(
"https://github-enterprise.com/owner/repo/blob/main/file.py",
"https://github-enterprise.com/",
),
(
"https://custom-domain.com/owner/repo/blob/main/file.py",
"https://custom-domain.com/",
),
(
"https://subdomain.github.com/owner/repo/blob/main/file.py",
"https://subdomain.github.com/",
),
(
"https://something.org/owner/repo/blob/main/file.py",
"https://github.com/",
),
("", "https://github.com/"),
],
)
def test_get_base_url(github_reader, blob_url, expected_base_url):
base_url = github_reader._get_base_url(blob_url)
assert base_url == expected_base_url, (
f"Expected {expected_base_url}, but got {base_url}"
)
| MockGithubClient |
python | sphinx-doc__sphinx | sphinx/ext/napoleon/__init__.py | {
"start": 502,
"end": 18649
} | class ____:
"""Sphinx napoleon extension settings in `conf.py`.
Listed below are all the settings used by napoleon and their default
values. These settings can be changed in the Sphinx `conf.py` file. Make
sure that "sphinx.ext.napoleon" is enabled in `conf.py`::
# conf.py
# Add any Sphinx extension module names here, as strings
extensions = ['sphinx.ext.napoleon']
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
napoleon_use_keyword = True
napoleon_preprocess_types = False
napoleon_type_aliases = None
napoleon_custom_sections = None
napoleon_attr_annotations = True
.. _Google style:
https://google.github.io/styleguide/pyguide.html
.. _NumPy style:
https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard
Attributes
----------
napoleon_google_docstring : :obj:`bool` (Defaults to True)
True to parse `Google style`_ docstrings. False to disable support
for Google style docstrings.
napoleon_numpy_docstring : :obj:`bool` (Defaults to True)
True to parse `NumPy style`_ docstrings. False to disable support
for NumPy style docstrings.
napoleon_include_init_with_doc : :obj:`bool` (Defaults to False)
True to list ``__init___`` docstrings separately from the class
docstring. False to fall back to Sphinx's default behavior, which
considers the ``__init___`` docstring as part of the class
documentation.
**If True**::
def __init__(self):
\"\"\"
This will be included in the docs because it has a docstring
\"\"\"
def __init__(self):
# This will NOT be included in the docs
napoleon_include_private_with_doc : :obj:`bool` (Defaults to False)
True to include private members (like ``_membername``) with docstrings
in the documentation. False to fall back to Sphinx's default behavior.
**If True**::
def _included(self):
\"\"\"
This will be included in the docs because it has a docstring
\"\"\"
pass
def _skipped(self):
# This will NOT be included in the docs
pass
napoleon_include_special_with_doc : :obj:`bool` (Defaults to False)
True to include special members (like ``__membername__``) with
docstrings in the documentation. False to fall back to Sphinx's
default behavior.
**If True**::
def __str__(self):
\"\"\"
This will be included in the docs because it has a docstring
\"\"\"
return unicode(self).encode('utf-8')
def __unicode__(self):
# This will NOT be included in the docs
return unicode(self.__class__.__name__)
napoleon_use_admonition_for_examples : :obj:`bool` (Defaults to False)
True to use the ``.. admonition::`` directive for the **Example** and
**Examples** sections. False to use the ``.. rubric::`` directive
instead. One may look better than the other depending on what HTML
theme is used.
This `NumPy style`_ snippet will be converted as follows::
Example
-------
This is just a quick example
**If True**::
.. admonition:: Example
This is just a quick example
**If False**::
.. rubric:: Example
This is just a quick example
napoleon_use_admonition_for_notes : :obj:`bool` (Defaults to False)
True to use the ``.. admonition::`` directive for **Notes** sections.
False to use the ``.. rubric::`` directive instead.
Note
----
The singular **Note** section will always be converted to a
``.. note::`` directive.
See Also
--------
:confval:`napoleon_use_admonition_for_examples`
napoleon_use_admonition_for_references : :obj:`bool` (Defaults to False)
True to use the ``.. admonition::`` directive for **References**
sections. False to use the ``.. rubric::`` directive instead.
See Also
--------
:confval:`napoleon_use_admonition_for_examples`
napoleon_use_ivar : :obj:`bool` (Defaults to False)
True to use the ``:ivar:`` role for instance variables. False to use
the ``.. attribute::`` directive instead.
This `NumPy style`_ snippet will be converted as follows::
Attributes
----------
attr1 : int
Description of `attr1`
**If True**::
:ivar attr1: Description of `attr1`
:vartype attr1: int
**If False**::
.. attribute:: attr1
Description of `attr1`
:type: int
napoleon_use_param : :obj:`bool` (Defaults to True)
True to use a ``:param:`` role for each function parameter. False to
use a single ``:parameters:`` role for all the parameters.
This `NumPy style`_ snippet will be converted as follows::
Parameters
----------
arg1 : str
Description of `arg1`
arg2 : int, optional
Description of `arg2`, defaults to 0
**If True**::
:param arg1: Description of `arg1`
:type arg1: str
:param arg2: Description of `arg2`, defaults to 0
:type arg2: int, optional
**If False**::
:parameters: * **arg1** (*str*) --
Description of `arg1`
* **arg2** (*int, optional*) --
Description of `arg2`, defaults to 0
napoleon_use_keyword : :obj:`bool` (Defaults to True)
True to use a ``:keyword:`` role for each function keyword argument.
False to use a single ``:keyword arguments:`` role for all the
keywords.
This behaves similarly to :confval:`napoleon_use_param`. Note unlike
docutils, ``:keyword:`` and ``:param:`` will not be treated the same
way - there will be a separate "Keyword Arguments" section, rendered
in the same fashion as "Parameters" section (type links created if
possible)
See Also
--------
:confval:`napoleon_use_param`
napoleon_use_rtype : :obj:`bool` (Defaults to True)
True to use the ``:rtype:`` role for the return type. False to output
the return type inline with the description.
This `NumPy style`_ snippet will be converted as follows::
Returns
-------
bool
True if successful, False otherwise
**If True**::
:returns: True if successful, False otherwise
:rtype: bool
**If False**::
:returns: *bool* -- True if successful, False otherwise
napoleon_preprocess_types : :obj:`bool` (Defaults to False)
Enable the type preprocessor.
napoleon_type_aliases : :obj:`dict` (Defaults to None)
Add a mapping of strings to string, translating types in numpy
style docstrings. Only works if ``napoleon_preprocess_types = True``.
napoleon_custom_sections : :obj:`list` (Defaults to None)
Add a list of custom sections to include, expanding the list of parsed sections.
The entries can either be strings or tuples, depending on the intention:
* To create a custom "generic" section, just pass a string.
* To create an alias for an existing section, pass a tuple containing the
alias name and the original, in that order.
* To create a custom section that displays like the parameters or returns
section, pass a tuple containing the custom section name and a string
value, "params_style" or "returns_style".
If an entry is just a string, it is interpreted as a header for a generic
section. If the entry is a tuple/list/indexed container, the first entry
is the name of the section, the second is the section key to emulate. If the
second entry value is "params_style" or "returns_style", the custom section
will be displayed like the parameters section or returns section.
napoleon_attr_annotations : :obj:`bool` (Defaults to True)
Use the type annotations of class attributes that are documented in the docstring
but do not have a type in the docstring.
"""
_config_values: Sequence[tuple[str, bool | None, _ConfigRebuild, Set[type]]] = (
('napoleon_google_docstring', True, 'env', frozenset({bool})),
('napoleon_numpy_docstring', True, 'env', frozenset({bool})),
('napoleon_include_init_with_doc', False, 'env', frozenset({bool})),
('napoleon_include_private_with_doc', False, 'env', frozenset({bool})),
('napoleon_include_special_with_doc', False, 'env', frozenset({bool})),
('napoleon_use_admonition_for_examples', False, 'env', frozenset({bool})),
('napoleon_use_admonition_for_notes', False, 'env', frozenset({bool})),
('napoleon_use_admonition_for_references', False, 'env', frozenset({bool})),
('napoleon_use_ivar', False, 'env', frozenset({bool})),
('napoleon_use_param', True, 'env', frozenset({bool})),
('napoleon_use_rtype', True, 'env', frozenset({bool})),
('napoleon_use_keyword', True, 'env', frozenset({bool})),
('napoleon_preprocess_types', False, 'env', frozenset({bool})),
('napoleon_type_aliases', None, 'env', frozenset({dict, NoneType})),
('napoleon_custom_sections', None, 'env', frozenset({list, tuple, NoneType})),
('napoleon_attr_annotations', True, 'env', frozenset({bool})),
)
def __init__(self, **settings: Any) -> None:
for name, default, _rebuild, _types in self._config_values:
setattr(self, name, default)
for name, value in settings.items():
setattr(self, name, value)
def setup(app: Sphinx) -> ExtensionMetadata:
"""Sphinx extension setup function.
When the extension is loaded, Sphinx imports this module and executes
the ``setup()`` function, which in turn notifies Sphinx of everything
the extension offers.
Parameters
----------
app : sphinx.application.Sphinx
Application object representing the Sphinx process
See Also
--------
`The Sphinx documentation on Extensions
<https://www.sphinx-doc.org/extensions.html>`_
`The Extension Tutorial <https://www.sphinx-doc.org/extdev/tutorial.html>`_
`The Extension API <https://www.sphinx-doc.org/extdev/appapi.html>`_
"""
if not isinstance(app, Sphinx):
# probably called by tests
return {
'version': sphinx.__display_version__,
'parallel_read_safe': True,
}
_patch_python_domain()
app.setup_extension('sphinx.ext.autodoc')
app.connect('autodoc-process-docstring', _process_docstring)
app.connect('autodoc-skip-member', _skip_member)
for name, default, rebuild, types in Config._config_values:
app.add_config_value(name, default, rebuild, types=types)
return {
'version': sphinx.__display_version__,
'parallel_read_safe': True,
}
def _patch_python_domain() -> None:
from sphinx.domains.python._object import PyObject, PyTypedField
from sphinx.locale import _
for doc_field in PyObject.doc_field_types:
if doc_field.name == 'parameter':
doc_field.names = ('param', 'parameter', 'arg', 'argument')
break
PyObject.doc_field_types.append(
PyTypedField(
'keyword',
label=_('Keyword Arguments'),
names=('keyword', 'kwarg', 'kwparam'),
typerolename='class',
typenames=('paramtype', 'kwtype'),
can_collapse=True,
)
)
def _process_docstring(
app: Sphinx, what: str, name: str, obj: Any, options: Any, lines: list[str]
) -> None:
"""Process the docstring for a given python object.
Called when autodoc has read and processed a docstring. `lines` is a list
of docstring lines that `_process_docstring` modifies in place to change
what Sphinx outputs.
The following settings in conf.py control what styles of docstrings will
be parsed:
* ``napoleon_google_docstring`` -- parse Google style docstrings
* ``napoleon_numpy_docstring`` -- parse NumPy style docstrings
Parameters
----------
app : sphinx.application.Sphinx
Application object representing the Sphinx process.
what : str
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : str
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
options : sphinx.ext.autodoc.Options
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and no_index that
are True if the flag option of same name was given to the auto
directive.
lines : list of str
The lines of the docstring, see above.
.. note:: `lines` is modified *in place*
"""
result_lines = lines
docstring: GoogleDocstring
if app.config.napoleon_numpy_docstring:
docstring = NumpyDocstring(
result_lines, app.config, app, what, name, obj, options
)
result_lines = docstring.lines()
if app.config.napoleon_google_docstring:
docstring = GoogleDocstring(
result_lines, app.config, app, what, name, obj, options
)
result_lines = docstring.lines()
lines[:] = result_lines.copy()
def _skip_member(
app: Sphinx, what: str, name: str, obj: Any, skip: bool, options: Any
) -> bool | None:
"""Determine if private and special class members are included in docs.
The following settings in conf.py determine if private and special class
members or init methods are included in the generated documentation:
* ``napoleon_include_init_with_doc`` --
include init methods if they have docstrings
* ``napoleon_include_private_with_doc`` --
include private members if they have docstrings
* ``napoleon_include_special_with_doc`` --
include special members if they have docstrings
Parameters
----------
app : sphinx.application.Sphinx
Application object representing the Sphinx process
what : str
A string specifying the type of the object to which the member
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : str
The name of the member.
obj : module, class, exception, function, method, or attribute.
For example, if the member is the __init__ method of class A, then
`obj` will be `A.__init__`.
skip : bool
A boolean indicating if autodoc will skip this member if `_skip_member`
does not override the decision
options : sphinx.ext.autodoc.Options
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and no_index that
are True if the flag option of same name was given to the auto
directive.
Returns
-------
bool
True if the member should be skipped during creation of the docs,
False if it should be included in the docs.
"""
has_doc = getattr(obj, '__doc__', False)
is_member = what in {'class', 'exception', 'module'}
if name != '__weakref__' and has_doc and is_member:
cls_is_owner = False
if what in {'class', 'exception'}:
qualname = getattr(obj, '__qualname__', '')
cls_path, _, _ = qualname.rpartition('.')
if cls_path:
try:
if '.' in cls_path:
import functools
import importlib
mod = importlib.import_module(obj.__module__)
mod_path = cls_path.split('.')
cls = functools.reduce(getattr, mod_path, mod)
else:
cls = inspect.unwrap(obj).__globals__[cls_path]
except Exception:
cls_is_owner = False
else:
cls_is_owner = (
cls # type: ignore[assignment]
and hasattr(cls, name)
and name in cls.__dict__
)
else:
cls_is_owner = False
if what == 'module' or cls_is_owner:
is_init = name == '__init__'
is_special = not is_init and name.startswith('__') and name.endswith('__')
is_private = not is_init and not is_special and name.startswith('_')
inc_init = app.config.napoleon_include_init_with_doc
inc_special = app.config.napoleon_include_special_with_doc
inc_private = app.config.napoleon_include_private_with_doc
if (
(is_special and inc_special)
or (is_private and inc_private)
or (is_init and inc_init)
):
return False
return None
| Config |
python | python__mypy | mypyc/test/test_typeops.py | {
"start": 3003,
"end": 3935
} | class ____(unittest.TestCase):
def test_simple_type_result(self) -> None:
assert RUnion.make_simplified_union([int_rprimitive]) == int_rprimitive
def test_remove_duplicate(self) -> None:
assert RUnion.make_simplified_union([int_rprimitive, int_rprimitive]) == int_rprimitive
def test_cannot_simplify(self) -> None:
assert RUnion.make_simplified_union(
[int_rprimitive, str_rprimitive, object_rprimitive]
) == RUnion([int_rprimitive, str_rprimitive, object_rprimitive])
def test_nested(self) -> None:
assert RUnion.make_simplified_union(
[int_rprimitive, RUnion([str_rprimitive, int_rprimitive])]
) == RUnion([int_rprimitive, str_rprimitive])
assert RUnion.make_simplified_union(
[int_rprimitive, RUnion([str_rprimitive, RUnion([int_rprimitive])])]
) == RUnion([int_rprimitive, str_rprimitive])
| TestUnionSimplification |
python | justquick__django-activity-stream | actstream/managers.py | {
"start": 315,
"end": 4052
} | class ____(GFKManager):
"""
Default manager for Actions, accessed through Action.objects
"""
def public(self, *args, **kwargs):
"""
Only return public actions
"""
kwargs['public'] = True
return self.filter(*args, **kwargs)
@stream
def actor(self, obj: Model, **kwargs):
"""
Stream of most recent actions where obj is the actor.
Keyword arguments will be passed to Action.objects.filter
"""
check(obj)
return obj.actor_actions.public(**kwargs)
@stream
def target(self, obj: Model, **kwargs):
"""
Stream of most recent actions where obj is the target.
Keyword arguments will be passed to Action.objects.filter
"""
check(obj)
return obj.target_actions.public(**kwargs)
@stream
def action_object(self, obj: Model, **kwargs):
"""
Stream of most recent actions where obj is the action_object.
Keyword arguments will be passed to Action.objects.filter
"""
check(obj)
return obj.action_object_actions.public(**kwargs)
@stream
def model_actions(self, model: Type[Model], **kwargs):
"""
Stream of most recent actions by any particular model
"""
check(model)
ctype = ContentType.objects.get_for_model(model)
return self.public(
(Q(target_content_type=ctype)
| Q(action_object_content_type=ctype)
| Q(actor_content_type=ctype)),
**kwargs
)
@stream
def any(self, obj: Model, **kwargs):
"""
Stream of most recent actions where obj is the actor OR target OR action_object.
"""
check(obj)
ctype = ContentType.objects.get_for_model(obj)
return self.public(
Q(
actor_content_type=ctype,
actor_object_id=obj.pk,
) | Q(
target_content_type=ctype,
target_object_id=obj.pk,
) | Q(
action_object_content_type=ctype,
action_object_object_id=obj.pk,
), **kwargs)
@stream
def user(self, obj: Model, with_user_activity=False, follow_flag=None, **kwargs):
"""Create a stream of the most recent actions by objects that the user is following."""
q = Q()
qs = self.public()
if not obj:
return qs.none()
check(obj)
if with_user_activity:
q = q | Q(
actor_content_type=ContentType.objects.get_for_model(obj),
actor_object_id=obj.pk
)
follows = apps.get_model('actstream', 'follow').objects.filter(user=obj)
if follow_flag:
follows = follows.filter(flag=follow_flag)
content_types = ContentType.objects.filter(
pk__in=follows.values('content_type_id')
)
if not (content_types.exists() or with_user_activity):
return qs.none()
for content_type in content_types:
object_ids = follows.filter(content_type=content_type)
q = q | Q(
actor_content_type=content_type,
actor_object_id__in=object_ids.values('object_id')
) | Q(
target_content_type=content_type,
target_object_id__in=object_ids.filter(
actor_only=False).values('object_id')
) | Q(
action_object_content_type=content_type,
action_object_object_id__in=object_ids.filter(
actor_only=False).values('object_id')
)
return qs.filter(q, **kwargs)
| ActionManager |
python | pandas-dev__pandas | pandas/tests/tools/test_to_datetime.py | {
"start": 84751,
"end": 102676
} | class ____:
def test_to_datetime_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that dropping nanos would result
# in an in-bounds datetime
arr = np.array(["2262-04-11 23:47:16.854775808"], dtype=object)
msg = "^Out of bounds nanosecond timestamp: .*"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime(arr)
@pytest.mark.parametrize(
"arg, exp_str",
[
["2012-01-01 00:00:00", "2012-01-01 00:00:00"],
["20121001", "2012-10-01"], # bad iso 8601
],
)
def test_to_datetime_iso8601(self, cache, arg, exp_str):
result = to_datetime([arg], cache=cache)
exp = Timestamp(exp_str)
assert result[0] == exp
@pytest.mark.parametrize(
"input, format",
[
("2012", "%Y-%m"),
("2012-01", "%Y-%m-%d"),
("2012-01-01", "%Y-%m-%d %H"),
("2012-01-01 10", "%Y-%m-%d %H:%M"),
("2012-01-01 10:00", "%Y-%m-%d %H:%M:%S"),
("2012-01-01 10:00:00", "%Y-%m-%d %H:%M:%S.%f"),
("2012-01-01 10:00:00.123", "%Y-%m-%d %H:%M:%S.%f%z"),
(0, "%Y-%m-%d"),
],
)
@pytest.mark.parametrize("exact", [True, False])
def test_to_datetime_iso8601_fails(self, input, format, exact):
# https://github.com/pandas-dev/pandas/issues/12649
# `format` is longer than the string, so this fails regardless of `exact`
with pytest.raises(
ValueError,
match=(rf"time data \"{input}\" doesn't match format " rf"\"{format}\""),
):
to_datetime(input, format=format, exact=exact)
@pytest.mark.parametrize(
"input, format",
[
("2012-01-01", "%Y-%m"),
("2012-01-01 10", "%Y-%m-%d"),
("2012-01-01 10:00", "%Y-%m-%d %H"),
("2012-01-01 10:00:00", "%Y-%m-%d %H:%M"),
(0, "%Y-%m-%d"),
],
)
def test_to_datetime_iso8601_exact_fails(self, input, format):
# https://github.com/pandas-dev/pandas/issues/12649
# `format` is shorter than the date string, so only fails with `exact=True`
msg = "|".join(
[
'^unconverted data remains when parsing with format ".*": ".*". '
f"{PARSING_ERR_MSG}$",
f'^time data ".*" doesn\'t match format ".*". {PARSING_ERR_MSG}$',
]
)
with pytest.raises(
ValueError,
match=(msg),
):
to_datetime(input, format=format)
@pytest.mark.parametrize(
"input, format",
[
("2012-01-01", "%Y-%m"),
("2012-01-01 00", "%Y-%m-%d"),
("2012-01-01 00:00", "%Y-%m-%d %H"),
("2012-01-01 00:00:00", "%Y-%m-%d %H:%M"),
],
)
def test_to_datetime_iso8601_non_exact(self, input, format):
# https://github.com/pandas-dev/pandas/issues/12649
expected = Timestamp(2012, 1, 1)
result = to_datetime(input, format=format, exact=False)
assert result == expected
@pytest.mark.parametrize(
"input, format",
[
("2020-01", "%Y/%m"),
("2020-01-01", "%Y/%m/%d"),
("2020-01-01 00", "%Y/%m/%dT%H"),
("2020-01-01T00", "%Y/%m/%d %H"),
("2020-01-01 00:00", "%Y/%m/%dT%H:%M"),
("2020-01-01T00:00", "%Y/%m/%d %H:%M"),
("2020-01-01 00:00:00", "%Y/%m/%dT%H:%M:%S"),
("2020-01-01T00:00:00", "%Y/%m/%d %H:%M:%S"),
],
)
def test_to_datetime_iso8601_separator(self, input, format):
# https://github.com/pandas-dev/pandas/issues/12649
with pytest.raises(
ValueError,
match=(rf"time data \"{input}\" doesn\'t match format " rf"\"{format}\""),
):
to_datetime(input, format=format)
@pytest.mark.parametrize(
"input, format",
[
("2020-01", "%Y-%m"),
("2020-01-01", "%Y-%m-%d"),
("2020-01-01 00", "%Y-%m-%d %H"),
("2020-01-01T00", "%Y-%m-%dT%H"),
("2020-01-01 00:00", "%Y-%m-%d %H:%M"),
("2020-01-01T00:00", "%Y-%m-%dT%H:%M"),
("2020-01-01 00:00:00", "%Y-%m-%d %H:%M:%S"),
("2020-01-01T00:00:00", "%Y-%m-%dT%H:%M:%S"),
("2020-01-01T00:00:00.000", "%Y-%m-%dT%H:%M:%S.%f"),
("2020-01-01T00:00:00.000000", "%Y-%m-%dT%H:%M:%S.%f"),
("2020-01-01T00:00:00.000000000", "%Y-%m-%dT%H:%M:%S.%f"),
],
)
def test_to_datetime_iso8601_valid(self, input, format):
# https://github.com/pandas-dev/pandas/issues/12649
expected = Timestamp(2020, 1, 1)
result = to_datetime(input, format=format)
assert result == expected
@pytest.mark.parametrize(
"input, format",
[
("2020-1", "%Y-%m"),
("2020-1-1", "%Y-%m-%d"),
("2020-1-1 0", "%Y-%m-%d %H"),
("2020-1-1T0", "%Y-%m-%dT%H"),
("2020-1-1 0:0", "%Y-%m-%d %H:%M"),
("2020-1-1T0:0", "%Y-%m-%dT%H:%M"),
("2020-1-1 0:0:0", "%Y-%m-%d %H:%M:%S"),
("2020-1-1T0:0:0", "%Y-%m-%dT%H:%M:%S"),
("2020-1-1T0:0:0.000", "%Y-%m-%dT%H:%M:%S.%f"),
("2020-1-1T0:0:0.000000", "%Y-%m-%dT%H:%M:%S.%f"),
("2020-1-1T0:0:0.000000000", "%Y-%m-%dT%H:%M:%S.%f"),
],
)
def test_to_datetime_iso8601_non_padded(self, input, format):
# https://github.com/pandas-dev/pandas/issues/21422
expected = Timestamp(2020, 1, 1)
result = to_datetime(input, format=format)
assert result == expected
@pytest.mark.parametrize(
"input, format",
[
("2020-01-01T00:00:00.000000000+00:00", "%Y-%m-%dT%H:%M:%S.%f%z"),
("2020-01-01T00:00:00+00:00", "%Y-%m-%dT%H:%M:%S%z"),
("2020-01-01T00:00:00Z", "%Y-%m-%dT%H:%M:%S%z"),
],
)
def test_to_datetime_iso8601_with_timezone_valid(self, input, format):
# https://github.com/pandas-dev/pandas/issues/12649
expected = Timestamp(2020, 1, 1, tzinfo=timezone.utc)
result = to_datetime(input, format=format)
assert result == expected
def test_to_datetime_default(self, cache):
rs = to_datetime("2001", cache=cache)
xp = datetime(2001, 1, 1)
assert rs == xp
@pytest.mark.xfail(reason="fails to enforce dayfirst=True, which would raise")
def test_to_datetime_respects_dayfirst(self, cache):
# dayfirst is essentially broken
# The msg here is not important since it isn't actually raised yet.
msg = "Invalid date specified"
with pytest.raises(ValueError, match=msg):
# if dayfirst is respected, then this would parse as month=13, which
# would raise
with tm.assert_produces_warning(UserWarning, match="Provide format"):
to_datetime("01-13-2012", dayfirst=True, cache=cache)
def test_to_datetime_on_datetime64_series(self, cache):
# #2699
ser = Series(date_range("1/1/2000", periods=10))
result = to_datetime(ser, cache=cache)
assert result[0] == ser[0]
def test_to_datetime_with_space_in_series(self, cache):
# GH 6428
ser = Series(["10/18/2006", "10/18/2008", " "])
msg = (
r'^time data " " doesn\'t match format "%m/%d/%Y". ' rf"{PARSING_ERR_MSG}$"
)
with pytest.raises(ValueError, match=msg):
to_datetime(ser, errors="raise", cache=cache)
result_coerce = to_datetime(ser, errors="coerce", cache=cache)
expected_coerce = Series([datetime(2006, 10, 18), datetime(2008, 10, 18), NaT])
tm.assert_series_equal(result_coerce, expected_coerce)
@td.skip_if_not_us_locale
def test_to_datetime_with_apply(self, cache):
# this is only locale tested with US/None locales
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(["May 04", "Jun 02", "Dec 11"], index=[1, 2, 3])
expected = to_datetime(td, format="%b %y", cache=cache)
result = td.apply(to_datetime, format="%b %y", cache=cache)
tm.assert_series_equal(result, expected)
def test_to_datetime_timezone_name(self):
# https://github.com/pandas-dev/pandas/issues/49748
result = to_datetime("2020-01-01 00:00:00UTC", format="%Y-%m-%d %H:%M:%S%Z")
expected = Timestamp(2020, 1, 1).tz_localize("UTC")
assert result == expected
@td.skip_if_not_us_locale
@pytest.mark.parametrize("errors", ["raise", "coerce"])
def test_to_datetime_with_apply_with_empty_str(self, cache, errors):
# this is only locale tested with US/None locales
# GH 5195, GH50251
# with a format and coerce a single item to_datetime fails
td = Series(["May 04", "Jun 02", ""], index=[1, 2, 3])
expected = to_datetime(td, format="%b %y", errors=errors, cache=cache)
result = td.apply(
lambda x: to_datetime(x, format="%b %y", errors="coerce", cache=cache)
)
tm.assert_series_equal(result, expected)
def test_to_datetime_empty_stt(self, cache):
# empty string
result = to_datetime("", cache=cache)
assert result is NaT
def test_to_datetime_empty_str_list(self, cache):
result = to_datetime(["", ""], cache=cache)
assert isna(result).all()
def test_to_datetime_zero(self, cache):
# ints
result = Timestamp(0)
expected = to_datetime(0, cache=cache)
assert result == expected
def test_to_datetime_strings(self, cache):
# GH 3888 (strings)
expected = to_datetime(["2012"], cache=cache)[0]
result = to_datetime("2012", cache=cache)
assert result == expected
def test_to_datetime_strings_variation(self, cache):
array = ["2012", "20120101", "20120101 12:01:01"]
expected = [to_datetime(dt_str, cache=cache) for dt_str in array]
result = [Timestamp(date_str) for date_str in array]
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("result", [Timestamp("2012"), to_datetime("2012")])
def test_to_datetime_strings_vs_constructor(self, result):
expected = Timestamp(2012, 1, 1)
assert result == expected
def test_to_datetime_unprocessable_input(self, cache):
# GH 4928
# GH 21864
msg = '^Given date string "1" not likely a datetime$'
with pytest.raises(ValueError, match=msg):
to_datetime([1, "1"], errors="raise", cache=cache)
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view("M8[us]")
as_obj = scalar.astype("O")
index = DatetimeIndex([scalar])
assert index[0] == scalar.astype("O")
value = Timestamp(scalar)
assert value == as_obj
def test_to_datetime_list_of_integers(self):
rng = date_range("1/1/2000", periods=20, unit="ns")
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
tm.assert_index_equal(rng, result)
def test_to_datetime_overflow(self):
# gh-17637
# we are overflowing Timedelta range here
msg = "Cannot cast 139999 days 00:00:00 to unit='ns' without overflow"
with pytest.raises(OutOfBoundsTimedelta, match=msg):
date_range(start="1/1/1700", freq="B", periods=100000, unit="ns")
def test_to_datetime_float_with_nans_floating_point_error(self):
# GH#58419
ser = Series([np.nan] * 1000 + [1712219033.0], dtype=np.float64)
result = to_datetime(ser, unit="s", errors="coerce")
expected = Series(
[NaT] * 1000 + [Timestamp("2024-04-04 08:23:53")], dtype="datetime64[ns]"
)
tm.assert_series_equal(result, expected)
def test_string_invalid_operation(self, cache):
invalid = np.array(["87156549591102612381000001219H5"], dtype=object)
# GH #51084
with pytest.raises(ValueError, match="Unknown datetime string format"):
to_datetime(invalid, errors="raise", cache=cache)
def test_string_na_nat_conversion(self, cache):
# GH #999, #858
strings = np.array(["1/1/2000", "1/2/2000", np.nan, "1/4/2000"], dtype=object)
expected = np.empty(4, dtype="M8[us]")
for i, val in enumerate(strings):
if isna(val):
expected[i] = iNaT
else:
expected[i] = parse(val)
result = tslib.array_to_datetime(strings)[0]
tm.assert_almost_equal(result, expected)
result2 = to_datetime(strings, cache=cache)
assert isinstance(result2, DatetimeIndex)
tm.assert_numpy_array_equal(result, result2.values)
def test_string_na_nat_conversion_malformed(self, cache):
malformed = np.array(["1/100/2000", np.nan], dtype=object)
# GH 10636, default is now 'raise'
msg = r"Unknown datetime string format"
with pytest.raises(ValueError, match=msg):
to_datetime(malformed, errors="raise", cache=cache)
with pytest.raises(ValueError, match=msg):
to_datetime(malformed, errors="raise", cache=cache)
def test_string_na_nat_conversion_with_name(self, cache):
idx = ["a", "b", "c", "d", "e"]
series = Series(
["1/1/2000", np.nan, "1/3/2000", np.nan, "1/5/2000"], index=idx, name="foo"
)
dseries = Series(
[
to_datetime("1/1/2000", cache=cache),
np.nan,
to_datetime("1/3/2000", cache=cache),
np.nan,
to_datetime("1/5/2000", cache=cache),
],
index=idx,
name="foo",
)
result = to_datetime(series, cache=cache)
dresult = to_datetime(dseries, cache=cache)
expected = Series(np.empty(5, dtype="M8[us]"), index=idx)
for i in range(5):
x = series.iloc[i]
if isna(x):
expected.iloc[i] = NaT
else:
expected.iloc[i] = to_datetime(x, cache=cache)
tm.assert_series_equal(result, expected, check_names=False)
assert result.name == "foo"
tm.assert_series_equal(dresult, expected, check_names=False)
assert dresult.name == "foo"
@pytest.mark.parametrize(
"unit",
["h", "m", "s", "ms", "us", "ns"],
)
def test_dti_constructor_numpy_timeunits(self, cache, unit):
# GH 9114
dtype = np.dtype(f"M8[{unit}]")
base = to_datetime(["2000-01-01T00:00", "2000-01-02T00:00", "NaT"], cache=cache)
values = base.values.astype(dtype)
if unit in ["h", "m"]:
# we cast to closest supported unit
unit = "s"
exp_dtype = np.dtype(f"M8[{unit}]")
expected = DatetimeIndex(base.astype(exp_dtype))
assert expected.dtype == exp_dtype
tm.assert_index_equal(DatetimeIndex(values), expected)
tm.assert_index_equal(to_datetime(values, cache=cache), expected)
def test_dayfirst(self, cache):
# GH 5917
arr = ["10/02/2014", "11/02/2014", "12/02/2014"]
expected = DatetimeIndex(
[datetime(2014, 2, 10), datetime(2014, 2, 11), datetime(2014, 2, 12)]
)
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True, cache=cache)
idx4 = to_datetime(np.array(arr), dayfirst=True, cache=cache)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
tm.assert_index_equal(expected, idx1)
tm.assert_index_equal(expected, idx2)
tm.assert_index_equal(expected, idx3)
tm.assert_index_equal(expected, idx4)
tm.assert_index_equal(expected, idx5)
tm.assert_index_equal(expected, idx6)
def test_dayfirst_warnings_valid_input(self):
# GH 12585
warning_msg = (
"Parsing dates in .* format when dayfirst=.* was specified. "
"Pass `dayfirst=.*` or specify a format to silence this warning."
)
# CASE 1: valid input
arr = ["31/12/2014", "10/03/2011"]
expected = DatetimeIndex(
["2014-12-31", "2011-03-10"], dtype="datetime64[us]", freq=None
)
# A. dayfirst arg correct, no warning
res1 = to_datetime(arr, dayfirst=True)
tm.assert_index_equal(expected, res1)
# B. dayfirst arg incorrect, warning
with tm.assert_produces_warning(UserWarning, match=warning_msg):
res2 = to_datetime(arr, dayfirst=False)
tm.assert_index_equal(expected, res2)
def test_dayfirst_warnings_invalid_input(self):
# CASE 2: invalid input
# cannot consistently process with single format
# ValueError *always* raised
# first in DD/MM/YYYY, second in MM/DD/YYYY
arr = ["31/12/2014", "03/30/2011"]
with pytest.raises(
ValueError,
match=(
r'^time data "03/30/2011" doesn\'t match format '
rf'"%d/%m/%Y". {PARSING_ERR_MSG}$'
),
):
to_datetime(arr, dayfirst=True)
@pytest.mark.parametrize("klass", [DatetimeIndex, DatetimeArray._from_sequence])
def test_to_datetime_dta_tz(self, klass):
# GH#27733
dti = date_range("2015-04-05", periods=3).rename("foo")
expected = dti.tz_localize("UTC")
obj = klass(dti)
expected = klass(expected)
result = to_datetime(obj, utc=True)
tm.assert_equal(result, expected)
| TestToDatetimeMisc |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/bigquery.py | {
"start": 69328,
"end": 71709
} | class ____(GoogleCloudBaseOperator):
"""
Retrieve the list of tables in the specified dataset.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryGetDatasetTablesOperator`
:param dataset_id: the dataset ID of the requested dataset.
:param project_id: (Optional) the project of the requested dataset. If None,
self.project_id will be used.
:param max_results: (Optional) the maximum number of tables to return.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dataset_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
ui_color = BigQueryUIColors.DATASET.value
def __init__(
self,
*,
dataset_id: str,
project_id: str = PROVIDE_PROJECT_ID,
max_results: int | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.max_results = max_results
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context: Context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return bq_hook.get_dataset_tables(
dataset_id=self.dataset_id,
project_id=self.project_id,
max_results=self.max_results,
)
| BigQueryGetDatasetTablesOperator |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 69561,
"end": 70132
} | class ____(BaseModel):
active_bytes: int = Field(..., description="Total number of bytes in active pages allocated by the application")
allocated_bytes: int = Field(..., description="Total number of bytes allocated by the application")
metadata_bytes: int = Field(..., description="Total number of bytes dedicated to metadata")
resident_bytes: int = Field(..., description="Maximum number of bytes in physically resident data pages mapped")
retained_bytes: int = Field(..., description="Total number of bytes in virtual memory mappings")
| MemoryTelemetry |
python | python-openxml__python-docx | src/docx/text/run.py | {
"start": 746,
"end": 10117
} | class ____(StoryChild):
"""Proxy object wrapping `<w:r>` element.
Several of the properties on Run take a tri-state value, |True|, |False|, or |None|.
|True| and |False| correspond to on and off respectively. |None| indicates the
property is not specified directly on the run and its effective value is taken from
the style hierarchy.
"""
def __init__(self, r: CT_R, parent: t.ProvidesStoryPart):
super().__init__(parent)
self._r = self._element = self.element = r
def add_break(self, break_type: WD_BREAK = WD_BREAK.LINE):
"""Add a break element of `break_type` to this run.
`break_type` can take the values `WD_BREAK.LINE`, `WD_BREAK.PAGE`, and
`WD_BREAK.COLUMN` where `WD_BREAK` is imported from `docx.enum.text`.
`break_type` defaults to `WD_BREAK.LINE`.
"""
type_, clear = {
WD_BREAK.LINE: (None, None),
WD_BREAK.PAGE: ("page", None),
WD_BREAK.COLUMN: ("column", None),
WD_BREAK.LINE_CLEAR_LEFT: ("textWrapping", "left"),
WD_BREAK.LINE_CLEAR_RIGHT: ("textWrapping", "right"),
WD_BREAK.LINE_CLEAR_ALL: ("textWrapping", "all"),
}[break_type]
br = self._r.add_br()
if type_ is not None:
br.type = type_
if clear is not None:
br.clear = clear
def add_picture(
self,
image_path_or_stream: str | IO[bytes],
width: int | Length | None = None,
height: int | Length | None = None,
) -> InlineShape:
"""Return |InlineShape| containing image identified by `image_path_or_stream`.
The picture is added to the end of this run.
`image_path_or_stream` can be a path (a string) or a file-like object containing
a binary image.
If neither width nor height is specified, the picture appears at
its native size. If only one is specified, it is used to compute a scaling
factor that is then applied to the unspecified dimension, preserving the aspect
ratio of the image. The native size of the picture is calculated using the dots-
per-inch (dpi) value specified in the image file, defaulting to 72 dpi if no
value is specified, as is often the case.
"""
inline = self.part.new_pic_inline(image_path_or_stream, width, height)
self._r.add_drawing(inline)
return InlineShape(inline)
def add_tab(self) -> None:
"""Add a ``<w:tab/>`` element at the end of the run, which Word interprets as a
tab character."""
self._r.add_tab()
def add_text(self, text: str):
"""Returns a newly appended |_Text| object (corresponding to a new ``<w:t>``
child element) to the run, containing `text`.
Compare with the possibly more friendly approach of assigning text to the
:attr:`Run.text` property.
"""
t = self._r.add_t(text)
return _Text(t)
@property
def bold(self) -> bool | None:
"""Read/write tri-state value.
When |True|, causes the text of the run to appear in bold face. When |False|,
the text unconditionally appears non-bold. When |None| the bold setting for this
run is inherited from the style hierarchy.
"""
return self.font.bold
@bold.setter
def bold(self, value: bool | None):
self.font.bold = value
def clear(self):
"""Return reference to this run after removing all its content.
All run formatting is preserved.
"""
self._r.clear_content()
return self
@property
def contains_page_break(self) -> bool:
"""`True` when one or more rendered page-breaks occur in this run.
Note that "hard" page-breaks inserted by the author are not included. A hard
page-break gives rise to a rendered page-break in the right position so if those
were included that page-break would be "double-counted".
It would be very rare for multiple rendered page-breaks to occur in a single
run, but it is possible.
"""
return bool(self._r.lastRenderedPageBreaks)
@property
def font(self) -> Font:
"""The |Font| object providing access to the character formatting properties for
this run, such as font name and size."""
return Font(self._element)
@property
def italic(self) -> bool | None:
"""Read/write tri-state value.
When |True|, causes the text of the run to appear in italics. When |False|, the
text unconditionally appears non-italic. When |None| the italic setting for this
run is inherited from the style hierarchy.
"""
return self.font.italic
@italic.setter
def italic(self, value: bool | None):
self.font.italic = value
def iter_inner_content(self) -> Iterator[str | Drawing | RenderedPageBreak]:
"""Generate the content-items in this run in the order they appear.
NOTE: only content-types currently supported by `python-docx` are generated. In
this version, that is text and rendered page-breaks. Drawing is included but
currently only provides access to its XML element (CT_Drawing) on its
`._drawing` attribute. `Drawing` attributes and methods may be expanded in
future releases.
There are a number of element-types that can appear inside a run, but most of
those (w:br, w:cr, w:noBreakHyphen, w:t, w:tab) have a clear plain-text
equivalent. Any contiguous range of such elements is generated as a single
`str`. Rendered page-break and drawing elements are generated individually. Any
other elements are ignored.
"""
for item in self._r.inner_content_items:
if isinstance(item, str):
yield item
elif isinstance(item, CT_LastRenderedPageBreak):
yield RenderedPageBreak(item, self)
elif isinstance(item, CT_Drawing): # pyright: ignore[reportUnnecessaryIsInstance]
yield Drawing(item, self)
def mark_comment_range(self, last_run: Run, comment_id: int) -> None:
"""Mark the range of runs from this run to `last_run` (inclusive) as belonging to a comment.
`comment_id` identfies the comment that references this range.
"""
# -- insert `w:commentRangeStart` with `comment_id` before this (first) run --
self._r.insert_comment_range_start_above(comment_id)
# -- insert `w:commentRangeEnd` and `w:commentReference` run with `comment_id` after
# -- `last_run`
last_run._r.insert_comment_range_end_and_reference_below(comment_id)
@property
def style(self) -> CharacterStyle:
"""Read/write.
A |CharacterStyle| object representing the character style applied to this run.
The default character style for the document (often `Default Character Font`) is
returned if the run has no directly-applied character style. Setting this
property to |None| removes any directly-applied character style.
"""
style_id = self._r.style
return cast(CharacterStyle, self.part.get_style(style_id, WD_STYLE_TYPE.CHARACTER))
@style.setter
def style(self, style_or_name: str | CharacterStyle | None):
style_id = self.part.get_style_id(style_or_name, WD_STYLE_TYPE.CHARACTER)
self._r.style = style_id
@property
def text(self) -> str:
"""String formed by concatenating the text equivalent of each run.
Each `<w:t>` element adds the text characters it contains. A `<w:tab/>` element
adds a `\\t` character. A `<w:cr/>` or `<w:br>` element each add a `\\n`
character. Note that a `<w:br>` element can indicate a page break or column
break as well as a line break. Only line-break `<w:br>` elements translate to
a `\\n` character. Others are ignored. All other content child elements, such as
`<w:drawing>`, are ignored.
Assigning text to this property has the reverse effect, translating each `\\t`
character to a `<w:tab/>` element and each `\\n` or `\\r` character to a
`<w:cr/>` element. Any existing run content is replaced. Run formatting is
preserved.
"""
return self._r.text
@text.setter
def text(self, text: str):
self._r.text = text
@property
def underline(self) -> bool | WD_UNDERLINE | None:
"""The underline style for this |Run|.
Value is one of |None|, |True|, |False|, or a member of :ref:`WdUnderline`.
A value of |None| indicates the run has no directly-applied underline value and
so will inherit the underline value of its containing paragraph. Assigning
|None| to this property removes any directly-applied underline value.
A value of |False| indicates a directly-applied setting of no underline,
overriding any inherited value.
A value of |True| indicates single underline.
The values from :ref:`WdUnderline` are used to specify other outline styles such
as double, wavy, and dotted.
"""
return self.font.underline
@underline.setter
def underline(self, value: bool | WD_UNDERLINE | None):
self.font.underline = value
| Run |
python | huggingface__transformers | tests/models/marian/test_modeling_marian.py | {
"start": 20952,
"end": 21512
} | class ____(MarianIntegrationTest):
"""Cover low resource/high perplexity setting. This breaks without adjust_logits_generation overwritten"""
src = "mt"
tgt = "en"
src_text = ["Billi messu b'mod ġentili, Ġesù fejjaq raġel li kien milqut bil - marda kerha tal - ġdiem."]
expected_text = ["Touching gently, Jesus healed a man who was affected by the sad disease of leprosy."]
@slow
def test_batch_generation_mt_en(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
| TestMarian_MT_EN |
python | wandb__wandb | wandb/sdk/artifacts/_generated/artifact_created_by.py | {
"start": 314,
"end": 402
} | class ____(GQLResult):
artifact: Optional[ArtifactCreatedByArtifact]
| ArtifactCreatedBy |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.