language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | doocs__leetcode | solution/2100-2199/2170.Minimum Operations to Make the Array Alternating/Solution.py | {
"start": 0,
"end": 556
} | class ____:
def minimumOperations(self, nums: List[int]) -> int:
def f(i: int) -> Tuple[int, int, int, int]:
k1 = k2 = 0
cnt = Counter(nums[i::2])
for k, v in cnt.items():
if cnt[k1] < v:
k2, k1 = k1, k
elif cnt[k2] < v:
k2 = k
return k1, cnt[k1], k2, cnt[k2]
a, b = f(0), f(1)
n = len(nums)
if a[0] != b[0]:
return n - (a[1] + b[1])
return n - max(a[1] + b[3], a[3] + b[1])
| Solution |
python | crytic__slither | slither/detectors/statements/unused_import.py | {
"start": 227,
"end": 5147
} | class ____(AbstractDetector):
"""
Detector unused imports.
"""
ARGUMENT = "unused-import"
HELP = "Detects unused imports"
IMPACT = DetectorClassification.INFORMATIONAL
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#unused-imports"
WIKI_TITLE = "Unused Imports"
WIKI_DESCRIPTION = "Importing a file that is not used in the contract likely indicates a mistake. The import should be removed until it is needed."
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
import {A} from "./A.sol";
contract B {}
```
B either should import from A and it was forgotten or the import is not needed and should be removed.
"""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = (
"Remove the unused import. If the import is needed later, it can be added back."
)
@staticmethod
def _is_import_container(scope: FileScope) -> bool: # pylint: disable=too-many-branches
"""
Returns True if a given file (provided as a `FileScope` object) contains only `import` directives (and pragmas).
Such a file doesn't need the imports it contains, but its purpose is to aggregate certain correlated imports.
"""
for c in scope.contracts.values():
if c.file_scope == scope:
return False
for err in scope.custom_errors:
if err.file_scope == scope:
return False
for en in scope.enums.values():
if en.file_scope == scope:
return False
for f in scope.functions:
if f.file_scope == scope:
return False
for st in scope.structures.values():
if st.file_scope == scope:
return False
for ct in scope.type_aliases.values():
if ct.source_mapping and ct.source_mapping.filename == scope.filename:
return False
for uf in scope.using_for_directives:
if uf.file_scope == scope:
return False
for v in scope.variables.values():
if v.file_scope == scope:
return False
return True
def _detect(self) -> List[Output]: # pylint: disable=too-many-branches
results: List[Output] = []
# This is computed lazily and then memoized so we need to trigger the computation.
self.slither._compute_offsets_to_ref_impl_decl()
for unit in self.slither.compilation_units:
for filename, current_scope in unit.scopes.items():
# Skip files that are dependencies
if unit.crytic_compile.is_dependency(filename.absolute):
continue
unused_list = []
for i in current_scope.imports:
# `scope.imports` contains all transitive imports so we need to filter out imports not explicitly imported in the file.
# Otherwise, we would recommend removing an import that is used by a leaf contract and cause compilation errors.
if i.scope != current_scope:
continue
# If a scope doesn't define any contract, function, etc., it is an import container.
# The second case accounts for importing from an import container as a reference will only be in the definition's file.
if self._is_import_container(i.scope) or self._is_import_container(
unit.get_scope(i.filename)
):
continue
imported_path = self.slither.crytic_compile.filename_lookup(i.filename)
use_found = False
# Search through all references to the imported file
for _, refs_to_imported_path in self.slither._offset_to_references[
imported_path
].items():
for ref in refs_to_imported_path:
# If there is a reference in this file to the imported file, it is used.
if ref.filename == filename:
use_found = True
break
if use_found:
break
if not use_found:
unused_list.append(f"{i.source_mapping.content} ({i.source_mapping})")
if len(unused_list) > 0:
info = [
f"The following unused import(s) in {filename.used} should be removed:",
]
for unused in unused_list:
info += ["\n\t-", unused, "\n"]
results.append(self.generate_result(info))
return results
| UnusedImport |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_assign.py | {
"start": 75,
"end": 2982
} | class ____:
def test_assign(self):
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
original = df.copy()
result = df.assign(C=df.B / df.A)
expected = df.copy()
expected["C"] = [4, 2.5, 2]
tm.assert_frame_equal(result, expected)
# lambda syntax
result = df.assign(C=lambda x: x.B / x.A)
tm.assert_frame_equal(result, expected)
# original is unmodified
tm.assert_frame_equal(df, original)
# Non-Series array-like
result = df.assign(C=[4, 2.5, 2])
tm.assert_frame_equal(result, expected)
# original is unmodified
tm.assert_frame_equal(df, original)
result = df.assign(B=df.B / df.A)
expected = expected.drop("B", axis=1).rename(columns={"C": "B"})
tm.assert_frame_equal(result, expected)
# overwrite
result = df.assign(A=df.A + df.B)
expected = df.copy()
expected["A"] = [5, 7, 9]
tm.assert_frame_equal(result, expected)
# lambda
result = df.assign(A=lambda x: x.A + x.B)
tm.assert_frame_equal(result, expected)
def test_assign_multiple(self):
df = DataFrame([[1, 4], [2, 5], [3, 6]], columns=["A", "B"])
result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B)
expected = DataFrame(
[[1, 4, 7, 1, 4], [2, 5, 8, 2, 5], [3, 6, 9, 3, 6]], columns=list("ABCDE")
)
tm.assert_frame_equal(result, expected)
def test_assign_order(self):
# GH 9818
df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
result = df.assign(D=df.A + df.B, C=df.A - df.B)
expected = DataFrame([[1, 2, 3, -1], [3, 4, 7, -1]], columns=list("ABDC"))
tm.assert_frame_equal(result, expected)
result = df.assign(C=df.A - df.B, D=df.A + df.B)
expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]], columns=list("ABCD"))
tm.assert_frame_equal(result, expected)
def test_assign_bad(self):
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
# non-keyword argument
msg = r"assign\(\) takes 1 positional argument but 2 were given"
with pytest.raises(TypeError, match=msg):
df.assign(lambda x: x.A)
msg = "'DataFrame' object has no attribute 'C'"
with pytest.raises(AttributeError, match=msg):
df.assign(C=df.A, D=df.A + df.C)
def test_assign_dependent(self):
df = DataFrame({"A": [1, 2], "B": [3, 4]})
result = df.assign(C=df.A, D=lambda x: x["A"] + x["C"])
expected = DataFrame([[1, 3, 1, 2], [2, 4, 2, 4]], columns=list("ABCD"))
tm.assert_frame_equal(result, expected)
result = df.assign(C=lambda df: df.A, D=lambda df: df["A"] + df["C"])
expected = DataFrame([[1, 3, 1, 2], [2, 4, 2, 4]], columns=list("ABCD"))
tm.assert_frame_equal(result, expected)
| TestAssign |
python | pydantic__pydantic | tests/mypy/modules/plugin_success.py | {
"start": 2793,
"end": 3035
} | class ____(BaseModel):
class Model(BaseModel):
id: str
model: Model
_ = NestedModel.Model
DynamicModel = create_model('DynamicModel', __base__=Model)
dynamic_model = DynamicModel(x=1, y='y')
dynamic_model.x = 2
| NestedModel |
python | facebookresearch__faiss | benchs/distributed_ondisk/search_server.py | {
"start": 3042,
"end": 6943
} | class ____:
"""manages a local index, that does the coarse quantization and a set
of sub_indexes. The sub_indexes search a subset of the inverted
lists. The SplitPerListIndex merges results from the sub-indexes"""
def __init__(self, index, sub_indexes):
self.index = index
self.code_size = faiss.extract_index_ivf(index.index).code_size
self.sub_indexes = sub_indexes
self.ni = len(self.sub_indexes)
# pool of threads. Each thread manages one sub-index.
self.pool = ThreadPool(self.ni)
self.verbose = False
def set_nprobe(self, nprobe):
self.index.set_nprobe(nprobe)
self.pool.map(
lambda i: self.sub_indexes[i].set_nprobe(nprobe),
range(self.ni)
)
def set_omp_num_threads(self, nt):
faiss.omp_set_num_threads(nt)
self.pool.map(
lambda idx: idx.set_omp_num_threads(nt),
self.sub_indexes
)
def set_parallel_mode(self, pm):
self.index.set_parallel_mode(pm)
self.pool.map(
lambda idx: idx.set_parallel_mode(pm),
self.sub_indexes
)
def set_prefetch_nthread(self, nt):
self.index.set_prefetch_nthread(nt)
self.pool.map(
lambda idx: idx.set_prefetch_nthread(nt),
self.sub_indexes
)
def balance_lists(self, list_nos):
big_il = self.index.big_il
weights = np.array([big_il.list_size(int(i))
for i in list_nos.ravel()])
bins, assign = distribute_weights(weights, self.ni)
if self.verbose:
print('bins weight range %d:%d total %d (%.2f MiB)' % (
bins.min(), bins.max(), bins.sum(),
bins.sum() * (self.code_size + 8) / 2 ** 20))
self.nscan = bins.sum()
return assign.reshape(list_nos.shape)
def search(self, x, k):
xqo, list_nos, coarse_dis = self.index.transform_and_assign(x)
assign = self.balance_lists(list_nos)
def do_query(i):
sub_index = self.sub_indexes[i]
list_nos_i = list_nos.copy()
list_nos_i[assign != i] = -1
t0 = time.time()
Di, Ii = sub_index.ivf_search_preassigned(
xqo, list_nos_i, coarse_dis, k)
# print(list_nos_i, Ii)
if self.verbose:
print('client %d: %.3f s' % (i, time.time() - t0))
return Di, Ii
rh = ResultHeap(x.shape[0], k)
for Di, Ii in self.pool.imap(do_query, range(self.ni)):
# print("ADD", Ii, rh.I)
rh.add_batch_result(Di, Ii, 0)
rh.finalize()
return rh.D, rh.I
def range_search(self, x, radius):
xqo, list_nos, coarse_dis = self.index.transform_and_assign(x)
assign = self.balance_lists(list_nos)
nq = len(x)
def do_query(i):
sub_index = self.sub_indexes[i]
list_nos_i = list_nos.copy()
list_nos_i[assign != i] = -1
t0 = time.time()
limi, Di, Ii = sub_index.ivf_range_search_preassigned(
xqo, list_nos_i, coarse_dis, radius)
if self.verbose:
print('slice %d: %.3f s' % (i, time.time() - t0))
return limi, Di, Ii
D = [[] for i in range(nq)]
I = [[] for i in range(nq)]
sizes = np.zeros(nq, dtype=int)
for lims, Di, Ii in self.pool.imap(do_query, range(self.ni)):
for i in range(nq):
l0, l1 = lims[i:i + 2]
D[i].append(Di[l0:l1])
I[i].append(Ii[l0:l1])
sizes[i] += l1 - l0
lims = np.zeros(nq + 1, dtype=int)
lims[1:] = np.cumsum(sizes)
D = np.hstack([j for i in D for j in i])
I = np.hstack([j for i in I for j in i])
return lims, D, I
| SplitPerListIndex |
python | getsentry__sentry | src/sentry/migrations/0925_backfill_open_periods.py | {
"start": 703,
"end": 1168
} | class ____(Enum):
SET_REGRESSION = 6
SET_RESOLVED = 1
SET_RESOLVED_IN_RELEASE = 13
SET_RESOLVED_BY_AGE = 15
SET_RESOLVED_IN_COMMIT = 16
SET_RESOLVED_IN_PULL_REQUEST = 21
RESOLVED_ACTIVITY_TYPES = [
ActivityType.SET_RESOLVED.value,
ActivityType.SET_RESOLVED_IN_RELEASE.value,
ActivityType.SET_RESOLVED_BY_AGE.value,
ActivityType.SET_RESOLVED_IN_COMMIT.value,
ActivityType.SET_RESOLVED_IN_PULL_REQUEST.value,
]
| ActivityType |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 16223,
"end": 16547
} | class ____(BaseModel):
root: list[JsonValue]
type: Literal["XComSequenceSliceResult"] = "XComSequenceSliceResult"
@classmethod
def from_response(cls, response: XComSequenceSliceResponse) -> XComSequenceSliceResult:
return cls(root=response.root, type="XComSequenceSliceResult")
| XComSequenceSliceResult |
python | xlwings__xlwings | xlwings/main.py | {
"start": 145200,
"end": 148217
} | class ____(Collection):
"""
A collection of all :meth:`book <Book>` objects:
>>> import xlwings as xw
>>> xw.books # active app
Books([<Book [Book1]>, <Book [Book2]>])
>>> xw.apps[10559].books # specific app, get the PIDs via xw.apps.keys()
Books([<Book [Book1]>, <Book [Book2]>])
.. versionadded:: 0.9.0
"""
_wrap = Book
@property
def active(self):
"""
Returns the active Book.
"""
return Book(impl=self.impl.active)
def add(self):
"""
Creates a new Book. The new Book becomes the active Book. Returns a Book object.
"""
return Book(impl=self.impl.add())
def open(
self,
fullname=None,
update_links=None,
read_only=None,
format=None,
password=None,
write_res_password=None,
ignore_read_only_recommended=None,
origin=None,
delimiter=None,
editable=None,
notify=None,
converter=None,
add_to_mru=None,
local=None,
corrupt_load=None,
json=None,
):
"""
Opens a Book if it is not open yet and returns it. If it is already open,
it doesn't raise an exception but simply returns the Book object.
Parameters
----------
fullname : str or path-like object
filename or fully qualified filename, e.g. ``r'C:\\path\\to\\file.xlsx'``
or ``'file.xlsm'``. Without a full path, it looks for the file in the
current working directory.
Other Parameters
see: :meth:`xlwings.Book()`
Returns
-------
Book : Book that has been opened.
"""
if self.impl.app.engine.type == "remote":
return Book(impl=self.impl.open(json=json))
fullname = utils.fspath(fullname)
if not os.path.exists(fullname):
raise FileNotFoundError("No such file: '%s'" % fullname)
fullname = os.path.realpath(fullname)
_, name = os.path.split(fullname)
if self.impl.app.engine.type == "reader":
return Book(impl=self.impl.open(filename=fullname))
try:
impl = self.impl(name)
if not os.path.samefile(impl.fullname, fullname):
raise ValueError(
"Cannot open two workbooks named '%s', even if they are saved in"
"different locations." % name
)
except KeyError:
impl = self.impl.open(
fullname,
update_links,
read_only,
format,
password,
write_res_password,
ignore_read_only_recommended,
origin,
delimiter,
editable,
notify,
converter,
add_to_mru,
local,
corrupt_load,
)
return Book(impl=impl)
| Books |
python | django__django | tests/expressions/tests.py | {
"start": 44750,
"end": 52339
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
ceo = Employee.objects.create(firstname="Just", lastname="Doit", salary=30)
# MySQL requires that the values calculated for expressions don't pass
# outside of the field's range, so it's inconvenient to use the values
# in the more general tests.
cls.c5020 = Company.objects.create(
name="5020 Ltd", num_employees=50, num_chairs=20, ceo=ceo
)
cls.c5040 = Company.objects.create(
name="5040 Ltd", num_employees=50, num_chairs=40, ceo=ceo
)
cls.c5050 = Company.objects.create(
name="5050 Ltd", num_employees=50, num_chairs=50, ceo=ceo
)
cls.c5060 = Company.objects.create(
name="5060 Ltd", num_employees=50, num_chairs=60, ceo=ceo
)
cls.c99300 = Company.objects.create(
name="99300 Ltd", num_employees=99, num_chairs=300, ceo=ceo
)
def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self):
# __in lookups can use F() expressions for integers.
queryset = Company.objects.filter(num_employees__in=([F("num_chairs") - 10]))
self.assertSequenceEqual(queryset, [self.c5060])
self.assertCountEqual(
Company.objects.filter(
num_employees__in=([F("num_chairs") - 10, F("num_chairs") + 10])
),
[self.c5040, self.c5060],
)
self.assertCountEqual(
Company.objects.filter(
num_employees__in=(
[F("num_chairs") - 10, F("num_chairs"), F("num_chairs") + 10]
)
),
[self.c5040, self.c5050, self.c5060],
)
def test_expressions_range_lookups_join_choice(self):
midpoint = datetime.time(13, 0)
t1 = Time.objects.create(time=datetime.time(12, 0))
t2 = Time.objects.create(time=datetime.time(14, 0))
s1 = SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint)
SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint)
SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint)
SimulationRun.objects.create(start=None, end=None, midpoint=midpoint)
queryset = SimulationRun.objects.filter(
midpoint__range=[F("start__time"), F("end__time")]
)
self.assertSequenceEqual(queryset, [s1])
for alias in queryset.query.alias_map.values():
if isinstance(alias, Join):
self.assertEqual(alias.join_type, constants.INNER)
queryset = SimulationRun.objects.exclude(
midpoint__range=[F("start__time"), F("end__time")]
)
self.assertQuerySetEqual(queryset, [], ordered=False)
for alias in queryset.query.alias_map.values():
if isinstance(alias, Join):
self.assertEqual(alias.join_type, constants.LOUTER)
def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self):
# Range lookups can use F() expressions for integers.
Company.objects.filter(num_employees__exact=F("num_chairs"))
self.assertCountEqual(
Company.objects.filter(num_employees__range=(F("num_chairs"), 100)),
[self.c5020, self.c5040, self.c5050],
)
self.assertCountEqual(
Company.objects.filter(
num_employees__range=(F("num_chairs") - 10, F("num_chairs") + 10)
),
[self.c5040, self.c5050, self.c5060],
)
self.assertCountEqual(
Company.objects.filter(num_employees__range=(F("num_chairs") - 10, 100)),
[self.c5020, self.c5040, self.c5050, self.c5060],
)
self.assertCountEqual(
Company.objects.filter(num_employees__range=(1, 100)),
[self.c5020, self.c5040, self.c5050, self.c5060, self.c99300],
)
def test_range_lookup_namedtuple(self):
EmployeeRange = namedtuple("EmployeeRange", ["minimum", "maximum"])
qs = Company.objects.filter(
num_employees__range=EmployeeRange(minimum=51, maximum=100),
)
self.assertSequenceEqual(qs, [self.c99300])
@unittest.skipUnless(
connection.vendor == "sqlite",
"This defensive test only works on databases that don't validate parameter "
"types",
)
def test_expressions_not_introduce_sql_injection_via_untrusted_string_inclusion(
self,
):
"""
This tests that SQL injection isn't possible using compilation of
expressions in iterable filters, as their compilation happens before
the main query compilation. It's limited to SQLite, as PostgreSQL,
Oracle and other vendors have defense in depth against this by type
checking. Testing against SQLite (the most permissive of the built-in
databases) demonstrates that the problem doesn't exist while keeping
the test simple.
"""
queryset = Company.objects.filter(name__in=[F("num_chairs") + "1)) OR ((1==1"])
self.assertQuerySetEqual(queryset, [], ordered=False)
def test_range_lookup_allows_F_expressions_and_expressions_for_dates(self):
start = datetime.datetime(2016, 2, 3, 15, 0, 0)
end = datetime.datetime(2016, 2, 5, 15, 0, 0)
experiment_1 = Experiment.objects.create(
name="Integrity testing",
assigned=start.date(),
start=start,
end=end,
completed=end.date(),
estimated_time=end - start,
)
experiment_2 = Experiment.objects.create(
name="Taste testing",
assigned=start.date(),
start=start,
end=end,
completed=end.date(),
estimated_time=end - start,
)
r1 = Result.objects.create(
experiment=experiment_1,
result_time=datetime.datetime(2016, 2, 4, 15, 0, 0),
)
Result.objects.create(
experiment=experiment_1,
result_time=datetime.datetime(2016, 3, 10, 2, 0, 0),
)
Result.objects.create(
experiment=experiment_2,
result_time=datetime.datetime(2016, 1, 8, 5, 0, 0),
)
tests = [
# Datetimes.
([F("experiment__start"), F("experiment__end")], "result_time__range"),
# Dates.
(
[F("experiment__start__date"), F("experiment__end__date")],
"result_time__date__range",
),
]
for within_experiment_time, lookup in tests:
with self.subTest(lookup=lookup):
queryset = Result.objects.filter(**{lookup: within_experiment_time})
self.assertSequenceEqual(queryset, [r1])
def test_relabeled_clone_rhs(self):
Number.objects.bulk_create([Number(integer=1), Number(integer=2)])
self.assertIs(
Number.objects.filter(
# Ensure iterable of expressions are properly re-labelled on
# subquery pushdown. If the inner query __range right-hand-side
# members are not relabelled they will point at the outer query
# alias and this test will fail.
Exists(
Number.objects.exclude(pk=OuterRef("pk")).filter(
integer__range=(F("integer"), F("integer"))
)
)
).exists(),
True,
)
| IterableLookupInnerExpressionsTests |
python | pypa__setuptools | setuptools/msvc.py | {
"start": 24266,
"end": 24413
} | class ____(TypedDict):
include: str
lib: str
libpath: str
path: str
py_vcruntime_redist: NotRequired[str | None]
| _EnvironmentDict |
python | huggingface__transformers | src/transformers/models/cohere/modeling_cohere.py | {
"start": 17226,
"end": 17772
} | class ____(PreTrainedModel):
config: CohereConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["CohereDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": CohereDecoderLayer,
"attentions": CohereAttention,
}
@auto_docstring
| CoherePreTrainedModel |
python | tiangolo__fastapi | tests/test_union_body.py | {
"start": 244,
"end": 4748
} | class ____(BaseModel):
price: int
@app.post("/items/")
def save_union_body(item: Union[OtherItem, Item]):
return {"item": item}
client = TestClient(app)
def test_post_other_item():
response = client.post("/items/", json={"price": 100})
assert response.status_code == 200, response.text
assert response.json() == {"item": {"price": 100}}
def test_post_item():
response = client.post("/items/", json={"name": "Foo"})
assert response.status_code == 200, response.text
assert response.json() == {"item": {"name": "Foo"}}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Save Union Body",
"operationId": "save_union_body_items__post",
"requestBody": {
"content": {
"application/json": {
"schema": {
"title": "Item",
"anyOf": [
{"$ref": "#/components/schemas/OtherItem"},
{"$ref": "#/components/schemas/Item"},
],
}
}
},
"required": True,
},
}
}
},
"components": {
"schemas": {
"OtherItem": {
"title": "OtherItem",
"required": ["price"],
"type": "object",
"properties": {"price": {"title": "Price", "type": "integer"}},
},
"Item": {
"title": "Item",
"type": "object",
"properties": IsDict(
{
"name": {
"title": "Name",
"anyOf": [{"type": "string"}, {"type": "null"}],
}
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"name": {"title": "Name", "type": "string"}}
),
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
| OtherItem |
python | pyca__cryptography | src/cryptography/utils.py | {
"start": 475,
"end": 1941
} | class ____(UserWarning):
pass
# Several APIs were deprecated with no specific end-of-life date because of the
# ubiquity of their use. They should not be removed until we agree on when that
# cycle ends.
DeprecatedIn36 = CryptographyDeprecationWarning
DeprecatedIn40 = CryptographyDeprecationWarning
DeprecatedIn41 = CryptographyDeprecationWarning
DeprecatedIn42 = CryptographyDeprecationWarning
DeprecatedIn43 = CryptographyDeprecationWarning
DeprecatedIn47 = CryptographyDeprecationWarning
# If you're wondering why we don't use `Buffer`, it's because `Buffer` would
# be more accurately named: Bufferable. It means something which has an
# `__buffer__`. Which means you can't actually treat the result as a buffer
# (and do things like take a `len()`).
if sys.version_info >= (3, 9):
Buffer = typing.Union[bytes, bytearray, memoryview]
else:
Buffer = typing.ByteString
def _check_bytes(name: str, value: bytes) -> None:
if not isinstance(value, bytes):
raise TypeError(f"{name} must be bytes")
def _check_byteslike(name: str, value: Buffer) -> None:
try:
memoryview(value)
except TypeError:
raise TypeError(f"{name} must be bytes-like")
def int_to_bytes(integer: int, length: int | None = None) -> bytes:
if length == 0:
raise ValueError("length argument can't be 0")
return integer.to_bytes(
length or (integer.bit_length() + 7) // 8 or 1, "big"
)
| CryptographyDeprecationWarning |
python | allegroai__clearml | clearml/backend_api/session/client/client.py | {
"start": 13551,
"end": 15094
} | class ____(object):
"""
Superclass for action-grouping classes.
"""
name = abc.abstractproperty()
__doc__ = abc.abstractproperty()
def __init__(self, session: Session) -> None:
self.session = session
def get_requests(service: Service) -> OrderedDict:
# force load proxy object
# noinspection PyBroadException
try:
service.dummy
except Exception:
pass
# noinspection PyProtectedMember
return OrderedDict(
(key, value)
for key, value in sorted(
vars(service.__wrapped__ if hasattr(service, "__wrapped__") else service).items(),
key=itemgetter(0),
)
if isinstance(value, type) and issubclass(value, APIRequest) and value._action
)
def make_service_class(module: types.ModuleType) -> Type[Service]:
"""
Create a service class from service module.
"""
properties = OrderedDict(
[
("__module__", __name__),
("__doc__", module.__doc__),
("name", module_name(module)),
]
)
properties.update(
(f.__name__, f) for f in (make_action(module, value) for key, value in get_requests(module).items())
)
# noinspection PyTypeChecker
return type(str(module_name(module)), (Service,), properties)
def module_name(module: Any) -> Text:
try:
module = module.__name__
except AttributeError:
pass
base_name = module.split(".")[-1]
return "".join(s.capitalize() for s in base_name.split("_"))
| Service |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 105259,
"end": 105733
} | class ____(BaseModel):
id: int = Field(..., description="")
key: Optional["ShardKey"] = Field(default=None, description="")
local: Optional["LocalShardTelemetry"] = Field(default=None, description="")
remote: List["RemoteShardTelemetry"] = Field(..., description="")
replicate_states: Dict[str, "ReplicaState"] = Field(..., description="")
partial_snapshot: Optional["PartialSnapshotTelemetry"] = Field(default=None, description="")
| ReplicaSetTelemetry |
python | plotly__plotly.py | plotly/io/_base_renderers.py | {
"start": 21321,
"end": 23222
} | class ____(ExternalRenderer):
def __init__(
self,
config=None,
auto_play=False,
post_script=None,
animation_opts=None,
include_plotlyjs="cdn",
):
self.config = config
self.auto_play = auto_play
self.post_script = post_script
self.animation_opts = animation_opts
self.include_plotlyjs = include_plotlyjs
self._displayHTML = None
@property
def displayHTML(self):
import inspect
if self._displayHTML is None:
for frame in inspect.getouterframes(inspect.currentframe()):
global_names = set(frame.frame.f_globals)
# Check for displayHTML plus a few others to reduce chance of a false
# hit.
if all(v in global_names for v in ["displayHTML", "display", "spark"]):
self._displayHTML = frame.frame.f_globals["displayHTML"]
break
if self._displayHTML is None:
raise EnvironmentError(
"""
Unable to detect the Databricks displayHTML function. The 'databricks' renderer is only
supported when called from within the Databricks notebook environment."""
)
return self._displayHTML
def render(self, fig_dict):
from plotly.io import to_html
html = to_html(
fig_dict,
config=self.config,
auto_play=self.auto_play,
include_plotlyjs=self.include_plotlyjs,
include_mathjax="cdn",
post_script=self.post_script,
full_html=True,
animation_opts=self.animation_opts,
default_width="100%",
default_height="100%",
validate=False,
)
# displayHTML is a Databricks notebook built-in function
self.displayHTML(html)
| DatabricksRenderer |
python | numpy__numpy | numpy/distutils/msvccompiler.py | {
"start": 1038,
"end": 2647
} | class ____(_MSVCCompiler):
def __init__(self, verbose=0, dry_run=0, force=0):
_MSVCCompiler.__init__(self, verbose, dry_run, force)
def initialize(self):
# The 'lib' and 'include' variables may be overwritten
# by MSVCCompiler.initialize, so save them for later merge.
environ_lib = os.getenv('lib', '')
environ_include = os.getenv('include', '')
_MSVCCompiler.initialize(self)
# Merge current and previous values of 'lib' and 'include'
os.environ['lib'] = _merge(environ_lib, os.environ['lib'])
os.environ['include'] = _merge(environ_include, os.environ['include'])
# msvc9 building for 32 bits requires SSE2 to work around a
# compiler bug.
if platform_bits == 32:
self.compile_options += ['/arch:SSE2']
self.compile_options_debug += ['/arch:SSE2']
def lib_opts_if_msvc(build_cmd):
""" Add flags if we are using MSVC compiler
We can't see `build_cmd` in our scope, because we have not initialized
the distutils build command, so use this deferred calculation to run
when we are building the library.
"""
if build_cmd.compiler.compiler_type != 'msvc':
return []
# Explicitly disable whole-program optimization.
flags = ['/GL-']
# Disable voltbl section for vc142 to allow link using mingw-w64; see:
# https://github.com/matthew-brett/dll_investigation/issues/1#issuecomment-1100468171
if build_cmd.compiler_opt.cc_test_flags(['-d2VolatileMetadata-']):
flags.append('-d2VolatileMetadata-')
return flags
| MSVCCompiler |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/_settings.py | {
"start": 4252,
"end": 6059
} | class ____(Enum):
"""Options for the |settings.phases| argument to |@settings|."""
explicit = "explicit"
"""
Controls whether explicit examples are run.
"""
reuse = "reuse"
"""
Controls whether previous examples will be reused.
"""
generate = "generate"
"""
Controls whether new examples will be generated.
"""
target = "target"
"""
Controls whether examples will be mutated for targeting.
"""
shrink = "shrink"
"""
Controls whether examples will be shrunk.
"""
explain = "explain"
"""
Controls whether Hypothesis attempts to explain test failures.
The explain phase has two parts, each of which is best-effort - if Hypothesis
can't find a useful explanation, we'll just print the minimal failing example.
"""
@classmethod
def _missing_(cls, value):
# deprecation pathway for integer values. Can be removed in Hypothesis 7.
if isinstance(value, int) and not isinstance(value, bool):
int_to_name = {
0: "explicit",
1: "reuse",
2: "generate",
3: "target",
4: "shrink",
5: "explain",
}
if value in int_to_name:
note_deprecation(
f"Passing Phase({value}) as an integer is deprecated. "
"Hypothesis now treats Phase values as strings, not integers. "
f"Use Phase.{int_to_name[value]} instead.",
since="2025-11-05",
has_codemod=False,
stacklevel=2,
)
return cls(int_to_name[value])
return None
def __repr__(self) -> str:
return f"Phase.{self.name}"
| Phase |
python | astropy__astropy | astropy/coordinates/representation/spherical.py | {
"start": 24454,
"end": 33587
} | class ____(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates (using the physics
convention of using ``phi`` and ``theta`` for azimuth and inclination
from the pole).
Parameters
----------
phi, theta : `~astropy.units.Quantity` or str
The azimuth and inclination of the point(s), in angular units. The
inclination should be between 0 and 180 degrees, and the azimuth will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`. If ``copy`` is False, `phi`
will be changed inplace if it is not between 0 and 360 degrees.
r : `~astropy.units.Quantity`
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `~astropy.coordinates.PhysicsSphericalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`~astropy.coordinates.PhysicsSphericalDifferential` instance, or a dictionary of of
differential instances with keys set to a string representation of the
SI unit with which the differential (derivative) is taken. For example,
for a velocity differential on a positional representation, the key
would be ``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"phi": Angle, "theta": Angle, "r": u.Quantity}
def __init__(self, phi, theta=None, r=None, differentials=None, copy=True):
super().__init__(phi, theta, r, copy=copy, differentials=differentials)
# Wrap/validate phi/theta
# Note that _phi already holds our own copy if copy=True.
self._phi.wrap_at(360 * u.deg, inplace=True)
if np.any(self._theta < 0.0 * u.deg) or np.any(self._theta > 180.0 * u.deg):
raise ValueError(
"Inclination angle(s) must be within 0 deg <= angle <= 180 deg, "
f"got {theta.to(u.degree)}"
)
if self._r.unit.physical_type == "length":
self._r = self._r.view(Distance)
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def theta(self):
"""
The elevation of the point(s).
"""
return self._theta
@property
def r(self):
"""
The distance from the origin to the point(s).
"""
return self._r
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
sintheta, costheta = np.sin(self.theta), np.cos(self.theta)
return {
"phi": CartesianRepresentation(-sinphi, cosphi, 0.0, copy=COPY_IF_NEEDED),
"theta": CartesianRepresentation(
costheta * cosphi, costheta * sinphi, -sintheta, copy=COPY_IF_NEEDED
),
"r": CartesianRepresentation(
sintheta * cosphi, sintheta * sinphi, costheta, copy=COPY_IF_NEEDED
),
}
def scale_factors(self):
r = self.r / u.radian
sintheta = np.sin(self.theta)
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"phi": r * sintheta, "theta": r, "r": l}
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
if isinstance(other_class, type):
if issubclass(other_class, SphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
lon=self.phi,
lat=90 * u.deg - self.theta,
distance=self.r,
differentials=diffs,
copy=False,
)
elif issubclass(other_class, UnitSphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
lon=self.phi,
lat=90 * u.deg - self.theta,
differentials=diffs,
copy=False,
)
elif issubclass(other_class, RadialRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
distance=self.r,
differentials=diffs,
copy=False,
)
from .cylindrical import CylindricalRepresentation
if issubclass(other_class, CylindricalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
rho=self.r * np.sin(self.theta),
phi=self.phi,
z=self.r * np.cos(self.theta),
differentials=diffs,
copy=False,
)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.r, Distance):
d = self.r.view(u.Quantity)
else:
d = self.r
x = d * np.sin(self.theta) * np.cos(self.phi)
y = d * np.sin(self.theta) * np.sin(self.phi)
z = d * np.cos(self.theta)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
phi = np.arctan2(cart.y, cart.x)
theta = np.arctan2(s, cart.z)
return cls(phi=phi, theta=theta, r=r, copy=False)
def transform(self, matrix):
"""Transform the spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
"""
# apply transformation in unit-spherical coordinates
xyz = erfa_ufunc.s2c(self.phi, 90 * u.deg - self.theta)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat, ur = erfa_ufunc.p2s(p) # `ur` is transformed unit-`r`
# create transformed physics-spherical representation,
# reapplying the distance scaling
rep = self.__class__(phi=lon, theta=90 * u.deg - lat, r=self.r * ur)
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
return rep.with_differentials(new_diffs)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the radius.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.r)
def _scale_operation(self, op, *args):
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super()._scale_operation(op, *args)
phi_op, adjust_theta_sign, r_op = _spherical_op_funcs(op, *args)
# Also run phi_op on theta to ensure theta remains between 0 and 180:
# any time the scale is negative, we do -theta + 180 degrees.
result = self.__class__(
phi_op(self.phi),
phi_op(adjust_theta_sign(self.theta)),
r_op(self.r),
copy=COPY_IF_NEEDED,
)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(operator.pos, adjust_theta_sign, r_op), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
| PhysicsSphericalRepresentation |
python | readthedocs__readthedocs.org | readthedocs/projects/admin.py | {
"start": 6941,
"end": 12984
} | class ____(ExtraSimpleHistoryAdmin):
"""Project model admin view."""
prepopulated_fields = {"slug": ("name",)}
list_display = ("name", "slug", "repo")
list_filter = tuple()
if "readthedocsext.spamfighting" in settings.INSTALLED_APPS:
list_filter = list_filter + (ProjectSpamThreshold,)
list_filter = list_filter + (
ProjectOwnerBannedFilter,
"is_spam",
"feature__feature_id",
"repo_type",
"privacy_level",
"programming_language",
"documentation_type",
)
search_fields = ("slug", "repo")
inlines = [
ProjectRelationshipInline,
RedirectInline,
DomainInline,
VersionInline,
]
readonly_fields = (
"pub_date",
"modified_date",
"feature_flags",
"matching_spam_rules",
)
raw_id_fields = ("users", "main_language_project", "remote_repository", "latest_build")
actions = [
"ban_owner",
"run_spam_rule_checks",
"build_default_version",
"reindex_active_versions",
"import_tags_from_vcs",
]
def matching_spam_rules(self, obj):
result = []
for spam_rule in obj.spam_rules.filter(enabled=True):
result.append(f"{spam_rule.spam_rule_type} ({spam_rule.value})")
return "\n".join(result) or "No matching spam rules"
def feature_flags(self, obj):
return "\n".join([str(f.get_feature_display()) for f in obj.features])
def run_spam_rule_checks(self, request, queryset):
"""Run all the spam checks on this project."""
if "readthedocsext.spamfighting" not in settings.INSTALLED_APPS:
messages.add_message(
request,
messages.ERROR,
"Spam fighting Django application not installed",
)
return
from readthedocsext.spamfighting.tasks import spam_rules_check # noqa
project_slugs = queryset.values_list("slug", flat=True)
# NOTE: convert queryset to a simple list so Celery can serialize it
spam_rules_check.delay(project_slugs=list(project_slugs))
messages.add_message(
request,
messages.INFO,
"Spam check task triggered for {} projects".format(queryset.count()),
)
@admin.action(description="Ban project owner")
def ban_owner(self, request, queryset):
"""
Ban project owner.
This will only ban single owners, because a malicious user could add a
user as a co-owner of the project. We don't want to induce and
collateral damage when flagging users.
"""
total = 0
for project in queryset:
if project.users.count() == 1:
user = project.users.first()
user.profile.banned = True
set_change_reason(user.profile, self.get_change_reason())
user.profile.save()
total += 1
else:
messages.add_message(
request,
messages.ERROR,
"Project has multiple owners: {}".format(project),
)
if total == 0:
messages.add_message(request, messages.ERROR, "No users banned")
else:
messages.add_message(
request,
messages.INFO,
"Banned {} user(s)".format(total),
)
def delete_selected_and_artifacts(self, request, queryset):
"""Remove HTML/etc artifacts from storage."""
if request.POST.get("post"):
for project in queryset:
clean_project_resources(project)
return delete_selected(self, request, queryset)
@admin.action(description="Build default version")
def build_default_version(self, request, queryset):
"""Trigger a build for the project version."""
total = 0
for project in queryset:
trigger_build(project=project)
total += 1
messages.add_message(
request,
messages.INFO,
"Triggered builds for {} project(s).".format(total),
)
@admin.action(description="Reindex active versions to ES")
def reindex_active_versions(self, request, queryset):
"""Reindex all active versions of the selected projects to ES."""
qs_iterator = queryset.iterator()
for project in qs_iterator:
versions_id_to_reindex = project.versions.for_reindex().values_list("pk", flat=True)
if not versions_id_to_reindex.exists():
self.message_user(
request,
"No versions to be re-indexed for project {}".format(project),
messages.ERROR,
)
else:
for version_id in versions_id_to_reindex.iterator():
reindex_version.delay(version_id)
self.message_user(
request,
"Task initiated successfully for {}".format(project),
messages.SUCCESS,
)
@admin.action(description="Import tags from the version control API")
def import_tags_from_vcs(self, request, queryset):
for project in queryset.iterator():
tags = import_tags(project)
if tags:
self.message_user(
request,
"Imported tags for {}: {}".format(project, tags),
messages.SUCCESS,
)
else:
self.message_user(request, "No tags found for {}".format(project), messages.WARNING)
def get_actions(self, request):
actions = super().get_actions(request)
actions["delete_selected"] = (
self.__class__.delete_selected_and_artifacts,
"delete_selected",
delete_selected.short_description,
)
return actions
@admin.register(HTMLFile, ImportedFile)
| ProjectAdmin |
python | pypa__pipenv | pipenv/environment.py | {
"start": 2031,
"end": 31854
} | class ____:
def __init__(
self,
prefix: str | None = None,
python: str | None = None,
is_venv: bool = False,
base_working_set: list[importlib_metadata.Distribution] = None,
pipfile: tomlkit.toml_document.TOMLDocument | TPipfile | None = None,
sources: list[TSource] | None = None,
project: Project | None = None,
):
super().__init__()
self._modules = {"pipenv": pipenv}
self.base_working_set = base_working_set if base_working_set else BASE_WORKING_SET
prefix = normalize_path(prefix)
self._python = None
if python is not None:
self._python = Path(python).absolute().as_posix()
self.is_venv = is_venv or prefix != normalize_path(sys.prefix)
if not sources:
sources = []
self.project = project
if project and not sources:
sources = project.sources
self.sources = sources
if project and not pipfile:
pipfile = project.parsed_pipfile
self.pipfile = pipfile
self.extra_dists = []
if self.is_venv and prefix is not None and not Path(prefix).exists():
return
self.prefix = Path(prefix if prefix else sys.prefix)
self._base_paths = {}
if self.is_venv:
self._base_paths = self.get_paths()
self.sys_paths = get_paths()
def safe_import(self, name: str) -> ModuleType:
"""Helper utility for reimporting previously imported modules while inside the env"""
module = None
if name not in self._modules:
self._modules[name] = importlib.import_module(name)
module = self._modules[name]
if not module:
dist = next(
iter(dist for dist in self.base_working_set if dist.project_name == name),
None,
)
if dist:
dist.activate()
module = importlib.import_module(name)
return module
@cached_property
def python_version(self) -> str | None:
with self.activated() as active:
if active:
# Extract version parts
version_str = f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
python_version = Version(version_str) # Create PEP 440 compliant version
return str(python_version) # Return the string representation
else:
return None
@property
def python_info(self) -> dict[str, str]:
include_dir = self.prefix / "include"
if not include_dir.exists():
include_dirs = self.get_include_path()
if include_dirs:
include_path = include_dirs.get(
"include", include_dirs.get("platinclude")
)
if not include_path:
return {}
include_dir = Path(include_path)
python_path = next(iter(list(include_dir.iterdir())), None)
if python_path and python_path.name.startswith("python"):
python_version = python_path.name.replace("python", "")
py_version_short, abiflags = python_version[:3], python_version[3:]
return {"py_version_short": py_version_short, "abiflags": abiflags}
return {}
def _replace_parent_version(self, path: str, replace_version: str) -> str:
path_obj = Path(path)
if not path_obj.exists():
parent = path_obj.parent
grandparent = parent.parent
leaf = f"{parent.name}/{path_obj.name}"
leaf = leaf.replace(
replace_version,
self.python_info.get("py_version_short", get_python_version()),
)
return str(grandparent / leaf)
return str(path_obj)
@cached_property
def install_scheme(self):
if "venv" in get_scheme_names():
return "venv"
elif os.name == "nt":
return "nt"
else:
return "posix_prefix"
@cached_property
def base_paths(self) -> dict[str, str]:
"""
Returns the context appropriate paths for the environment.
:return: A dictionary of environment specific paths to be used for installation operations
:rtype: dict
.. note:: The implementation of this is borrowed from a combination of pip and
virtualenv and is likely to change at some point in the future.
{'PATH': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/bin::/bin:/usr/bin',
'PYTHONPATH': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'data': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW',
'include': '/home/hawk/.pyenv/versions/3.7.1/include/python3.7m',
'libdir': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'platinclude': '/home/hawk/.pyenv/versions/3.7.1/include/python3.7m',
'platlib': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'platstdlib': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7',
'prefix': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW',
'purelib': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'scripts': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/bin',
'stdlib': '/home/hawk/.pyenv/versions/3.7.1/lib/python3.7'}
"""
prefix = Path(self.prefix)
paths = {}
if self._base_paths:
paths = self._base_paths.copy()
else:
try:
paths = self.get_paths()
except Exception:
paths = get_paths(
self.install_scheme,
vars={
"base": prefix,
"platbase": prefix,
},
)
current_version = get_python_version()
try:
for k in list(paths.keys()):
if not os.path.exists(paths[k]):
paths[k] = self._replace_parent_version(
paths[k], current_version
)
except OSError:
# Sometimes virtualenvs are made using virtualenv interpreters and there is no
# include directory, which will cause this approach to fail. This failsafe
# will make sure we fall back to the shell execution to find the real include path
paths = self.get_include_path()
paths.update(self.get_lib_paths())
paths["scripts"] = self.script_basedir
if not paths:
paths = get_paths(
self.install_scheme,
vars={
"base": prefix,
"platbase": prefix,
},
)
if not os.path.exists(paths["purelib"]) and not os.path.exists(paths["platlib"]):
lib_paths = self.get_lib_paths()
paths.update(lib_paths)
paths["PATH"] = str(paths["scripts"]) + os.pathsep + os.defpath
if "prefix" not in paths:
paths["prefix"] = prefix
purelib = paths["purelib"] = Path(paths["purelib"])
platlib = paths["platlib"] = Path(paths["platlib"])
if purelib == platlib:
lib_dirs = [purelib]
else:
lib_dirs = [purelib, platlib]
paths["libdir"] = purelib
paths["PYTHONPATH"] = os.pathsep.join(["", ".", str(purelib), str(platlib)])
paths["libdirs"] = lib_dirs
return paths
@cached_property
def script_basedir(self) -> str:
"""Path to the environment scripts dir"""
prefix = Path(self.prefix)
paths = get_paths(
self.install_scheme,
vars={
"base": prefix,
"platbase": prefix,
},
)
return paths["scripts"]
@property
def python(self) -> str:
"""Path to the environment python"""
if self._python is None:
self._python = (
(virtualenv_scripts_dir(self.prefix) / "python").absolute().as_posix()
)
return self._python
@cached_property
def sys_path(self) -> list[str]:
"""
The system path inside the environment
:return: The :data:`sys.path` from the environment
:rtype: list
"""
import json
current_executable = Path(sys.executable).as_posix()
if not self.python or self.python == current_executable:
return sys.path
elif any([sys.prefix == self.prefix, not self.is_venv]):
return sys.path
try:
path = pipenv.utils.shell.load_path(self.python)
except json.decoder.JSONDecodeError:
path = sys.path
return path
def build_command(
self,
python_lib: bool = False,
python_inc: bool = False,
scripts: bool = False,
py_version: bool = False,
) -> str:
"""Build the text for running a command in the given environment
:param python_lib: Whether to include the python lib dir commands, defaults to False
:type python_lib: bool, optional
:param python_inc: Whether to include the python include dir commands, defaults to False
:type python_inc: bool, optional
:param scripts: Whether to include the scripts directory, defaults to False
:type scripts: bool, optional
:param py_version: Whether to include the python version info, defaults to False
:type py_version: bool, optional
:return: A string representing the command to run
"""
pylib_lines = []
pyinc_lines = []
py_command = (
"import sysconfig, json; paths = {%s};"
"value = u'{0}'.format(json.dumps(paths)); print(value)"
)
sysconfig_line = "sysconfig.get_path('{0}')"
if python_lib:
pylib_lines += [
f"u'{key}': u'{{0}}'.format({sysconfig_line.format(key)})"
for key in ("purelib", "platlib", "stdlib", "platstdlib")
]
if python_inc:
pyinc_lines += [
f"u'{key}': u'{{0}}'.format({sysconfig_line.format(key)})"
for key in ("include", "platinclude")
]
lines = pylib_lines + pyinc_lines
if scripts:
lines.append(
"u'scripts': u'{{0}}'.format({})".format(sysconfig_line.format("scripts"))
)
if py_version:
lines.append(
"u'py_version_short': u'{0}'.format(sysconfig.get_python_version()),"
)
lines_as_str = ",".join(lines)
py_command = py_command % lines_as_str
return py_command
def get_paths(self) -> dict[str, str] | None:
"""
Get the paths for the environment by running a subcommand
:return: The python paths for the environment
:rtype: Dict[str, str]
"""
py_command = self.build_command(
python_lib=True, python_inc=True, scripts=True, py_version=True
)
command = [self.python, "-c", py_command]
c = subprocess_run(command)
if c.returncode == 0:
paths = json.loads(c.stdout)
if "purelib" in paths:
paths["libdir"] = paths["purelib"] = Path(paths["purelib"])
for key in (
"platlib",
"scripts",
"platstdlib",
"stdlib",
"include",
"platinclude",
):
if key in paths:
paths[key] = Path(paths[key])
return paths
else:
console.print(f"Failed to load paths: {c.stderr}", style="yellow")
console.print(f"Output: {c.stdout}", style="yellow")
return None
def get_lib_paths(self) -> dict[str, str]:
"""Get the include path for the environment
:return: The python include path for the environment
:rtype: Dict[str, str]
"""
py_command = self.build_command(python_lib=True)
command = [self.python, "-c", py_command]
c = subprocess_run(command)
paths = None
if c.returncode == 0:
paths = json.loads(c.stdout)
if "purelib" in paths:
paths["libdir"] = paths["purelib"] = Path(paths["purelib"])
for key in ("platlib", "platstdlib", "stdlib"):
if key in paths:
paths[key] = Path(paths[key])
return paths
else:
console.print(f"Failed to load paths: {c.stderr}", style="yellow")
console.print(f"Output: {c.stdout}", style="yellow")
if not paths:
if not self.prefix.joinpath("lib").exists():
return {}
stdlib_path = next(
iter(
[
p
for p in self.prefix.joinpath("lib").iterdir()
if p.name.startswith("python")
]
),
None,
)
lib_path = None
if stdlib_path:
lib_path = next(
iter(
[
p.as_posix()
for p in stdlib_path.iterdir()
if p.name == "site-packages"
]
)
)
paths = {"stdlib": stdlib_path.as_posix()}
if lib_path:
paths["purelib"] = lib_path
return paths
return {}
def get_include_path(self) -> dict[str, str] | None:
"""Get the include path for the environment
:return: The python include path for the environment
:rtype: Dict[str, str]
"""
py_command = self.build_command(python_inc=True)
command = [self.python, "-c", py_command]
c = subprocess_run(command)
if c.returncode == 0:
paths = json.loads(c.stdout)
for key in ("include", "platinclude"):
if key in paths:
paths[key] = Path(paths[key])
return paths
else:
console.print(f"Failed to load paths: {c.stderr}", style="yellow")
console.print(f"Output: {c.stdout}", style="yellow")
return None
@cached_property
def sys_prefix(self) -> str:
"""
The prefix run inside the context of the environment
:return: The python prefix inside the environment
:rtype: :data:`sys.prefix`
"""
command = [self.python, "-c", "import sys; print(sys.prefix)"]
c = subprocess_run(command)
sys_prefix = Path(c.stdout.strip()).as_posix()
return sys_prefix
@cached_property
def paths(self) -> dict[str, str]:
paths = {}
with temp_environ(), temp_path():
os.environ["PYTHONIOENCODING"] = "utf-8"
os.environ["PYTHONDONTWRITEBYTECODE"] = "1"
paths = self.base_paths
os.environ["PATH"] = paths["PATH"]
os.environ["PYTHONPATH"] = paths["PYTHONPATH"]
if "headers" not in paths:
paths["headers"] = paths["include"]
return paths
@property
def scripts_dir(self) -> str:
return self.paths["scripts"]
@property
def libdir(self) -> str:
purelib = self.paths.get("purelib", None)
if purelib and os.path.exists(purelib):
return "purelib", purelib
return "platlib", self.paths["platlib"]
def expand_egg_links(self) -> None:
"""
Expand paths specified in egg-link files to prevent pip errors during
reinstall
"""
prefixes = [
Path(prefix)
for prefix in self.base_paths["libdirs"].split(os.pathsep)
if is_in_path(prefix, self.prefix.as_posix())
]
for loc in prefixes:
if not loc.exists():
continue
for pth in loc.iterdir():
if pth.suffix != ".egg-link":
continue
contents = [
normalize_path(line.strip()) for line in pth.read_text().splitlines()
]
pth.write_text("\n".join(contents))
def get_distributions(self) -> Generator[importlib_metadata.Distribution, None, None]:
"""
Retrieves the distributions installed on the library path of the environment
:return: A set of distributions found on the library path
:rtype: iterator
"""
libdirs = self.base_paths["libdirs"]
for libdir in libdirs:
dists = importlib_metadata.distributions(path=[str(libdir)])
yield from dists
def find_egg(self, egg_dist: importlib_metadata.Distribution) -> str:
"""Find an egg by name in the given environment"""
site_packages = self.libdir[1]
search_filename = f"{normalized_name(egg_dist)}.egg-link"
try:
user_site = site.getusersitepackages()
except AttributeError:
user_site = site.USER_SITE
search_locations = [site_packages, user_site]
for site_directory in search_locations:
egg = os.path.join(site_directory, search_filename)
if os.path.isfile(egg):
return egg
def locate_dist(self, dist: importlib_metadata.Distribution) -> str:
"""Given a distribution, try to find a corresponding egg link first.
If the egg - link doesn 't exist, return the supplied distribution."""
location = self.find_egg(dist)
return location or dist._path
def dist_is_in_project(self, dist: importlib_metadata.Distribution) -> bool:
"""Determine whether the supplied distribution is in the environment."""
libdirs = self.base_paths["libdirs"]
location = Path(self.locate_dist(dist))
if not location:
return False
# Since is_relative_to is not available in Python 3.8, we use a workaround
if sys.version_info < (3, 9):
location_str = str(location)
return any(location_str.startswith(str(libdir)) for libdir in libdirs)
else:
return any(location.is_relative_to(libdir) for libdir in libdirs)
def get_installed_packages(self) -> list[importlib_metadata.Distribution]:
"""Returns all of the installed packages in a given environment"""
workingset = self.get_working_set()
packages = [
pkg
for pkg in workingset
if self.dist_is_in_project(pkg) and normalized_name(pkg) != "python"
]
return packages
@contextlib.contextmanager
def get_finder(self, pre: bool = False) -> ContextManager[PackageFinder]:
from .utils.resolver import get_package_finder
pip_command = InstallCommand(
name="InstallCommand", summary="pip Install command."
)
pip_args = prepare_pip_source_args(self.sources)
pip_options, _ = pip_command.parser.parse_args(pip_args)
pip_options.cache_dir = self.project.s.PIPENV_CACHE_DIR
pip_options.pre = self.pipfile.get("pre", pre)
session = pip_command._build_session(pip_options)
finder = get_package_finder(
install_cmd=pip_command, options=pip_options, session=session
)
yield finder
def get_package_info(
self, pre: bool = False
) -> Generator[importlib_metadata.Distribution, None, None]:
packages = self.get_installed_packages()
with self.get_finder() as finder:
for dist in packages:
name = normalized_name(dist)
all_candidates = finder.find_all_candidates(name)
if not self.pipfile.get("pre", finder.allow_all_prereleases):
# Remove prereleases
all_candidates = [
candidate
for candidate in all_candidates
if not candidate.version.is_prerelease
]
if not all_candidates:
continue
candidate_evaluator = finder.make_candidate_evaluator(project_name=name)
best_candidate_result = candidate_evaluator.compute_best_candidate(
all_candidates
)
remote_version = parse_version(
str(best_candidate_result.best_candidate.version)
)
if best_candidate_result.best_candidate.link.is_wheel:
pass
else:
pass
# This is dirty but makes the rest of the code much cleaner
dist.latest_version = remote_version
yield dist
def get_outdated_packages(
self, pre: bool = False
) -> list[importlib_metadata.Distribution]:
return [
pkg
for pkg in self.get_package_info(pre=pre)
if pkg.latest_version > parse_version(pkg.version)
]
@classmethod
def _get_requirements_for_package(cls, node, key_tree, parent=None, chain=None):
if chain is None:
chain = [node.project_name]
d = node.as_dict()
if parent:
d["required_version"] = node.version_spec if node.version_spec else "Any"
else:
d["required_version"] = d["installed_version"]
get_children = lambda n: key_tree.get(n.key, []) # noqa
d["dependencies"] = [
cls._get_requirements_for_package(
c, key_tree, parent=node, chain=chain + [c.project_name]
)
for c in get_children(node)
if c.project_name not in chain
]
return d
def get_package_requirements(self, pkg=None):
flatten = chain.from_iterable
packages = self.get_installed_packages()
if pkg:
packages = [p for p in packages if normalized_name(p) == pkg]
try:
tree = PackageDAG.from_pkgs(packages)
except InvalidRequirementError as e:
console.print(f"Invalid requirement: {e}", style="yellow")
tree = PackageDAG({})
except UndefinedEnvironmentName:
# Handle the case when 'extra' environment variable is not defined
tree = PackageDAG({})
except Exception as e:
# Handle any other exceptions that may occur during PackageDAG initialization
console.print(f"Failed to create PackageDAG: {e}", style="yellow")
tree = PackageDAG({})
tree = tree.sort()
branch_keys = {r.project_name for r in flatten(tree.values())}
if pkg is None:
nodes = [p for p in tree if p.project_name not in branch_keys]
else:
nodes = [p for p in tree if p.project_name == pkg]
key_tree = {k.project_name: v for k, v in tree.items()}
return [self._get_requirements_for_package(p, key_tree) for p in nodes]
@classmethod
def reverse_dependency(cls, node):
new_node = {
"package_name": node["package_name"],
"installed_version": node["installed_version"],
"required_version": node["required_version"],
}
for dependency in node.get("dependencies", []):
for dep in cls.reverse_dependency(dependency):
new_dep = dep.copy()
new_dep["parent"] = (node["package_name"], node["installed_version"])
yield new_dep
yield new_node
def reverse_dependencies(self):
rdeps = {}
for req in self.get_package_requirements():
for d in self.reverse_dependency(req):
parents = None
name = d["package_name"]
pkg = {
name: {
"installed": d["installed_version"],
"required": d["required_version"],
}
}
parents = tuple(d.get("parent", ()))
pkg[name]["parents"] = parents
if rdeps.get(name):
if not (rdeps[name].get("required") or rdeps[name].get("installed")):
rdeps[name].update(pkg[name])
rdeps[name]["parents"] = rdeps[name].get("parents", ()) + parents
else:
rdeps[name] = pkg[name]
for k in list(rdeps.keys()):
entry = rdeps[k]
if entry.get("parents"):
rdeps[k]["parents"] = {
p for p, version in chunked(2, unnest(entry["parents"]))
}
return rdeps
def get_working_set(self) -> Iterable:
"""Retrieve the working set of installed packages for the environment."""
if not hasattr(self, "sys_path"):
return []
return importlib_metadata.distributions(path=self.sys_path)
def is_installed(self, pkgname):
"""Given a package name, returns whether it is installed in the environment
:param str pkgname: The name of a package
:return: Whether the supplied package is installed in the environment
:rtype: bool
"""
return any(d for d in self.get_distributions() if normalized_name(d) == pkgname)
def is_satisfied(self, req: InstallRequirement):
match = next(
iter(
d
for d in self.get_distributions()
if req.name
and canonicalize_name(normalized_name(d)) == canonicalize_name(req.name)
),
None,
)
if match is not None:
if req.specifier is not None:
return SpecifierSet(str(req.specifier)).contains(
match.version, prereleases=True
)
if req.link is None:
return True
elif req.editable and req.link.is_file:
requested_path = req.link.file_path
if os.path.exists(requested_path):
local_path = requested_path
else:
parsed_url = urlparse(requested_path)
local_path = parsed_url.path
return requested_path and os.path.samefile(local_path, match.location)
elif match.has_metadata("direct_url.json") or (req.link and req.link.is_vcs):
# Direct URL installs and VCS installs we assume are not satisfied
# since due to skip-lock we may be installing from Pipfile we have insufficient
# information to determine if a branch or ref has actually changed.
return False
return True
return False
def run_activate_this(self):
"""Runs the environment's inline activation script"""
if self.is_venv:
activate_this = os.path.join(self.scripts_dir, "activate_this.py")
if not os.path.isfile(activate_this):
raise OSError(f"No such file: {activate_this!s}")
with open(activate_this) as f:
code = compile(f.read(), activate_this, "exec")
exec(code, {"__file__": activate_this})
@contextlib.contextmanager
def activated(self):
"""Helper context manager to activate the environment.
This context manager will set the following variables for the duration
of its activation:
* sys.prefix
* sys.path
* os.environ["VIRTUAL_ENV"]
* os.environ["PATH"]
In addition, it will make any distributions passed into `extra_dists` available
on `sys.path` while inside the context manager, as well as making `passa` itself
available.
The environment's `prefix` as well as `scripts_dir` properties are both prepended
to `os.environ["PATH"]` to ensure that calls to `~Environment.run()` use the
environment's path preferentially.
"""
# Fail if the virtualenv is needed but cannot be found
if self.is_venv and (
hasattr(self, "prefix")
and not self.prefix.exists()
or not hasattr(self, "prefix")
):
yield False
return
original_path = sys.path
original_prefix = sys.prefix
prefix = self.prefix.as_posix()
with temp_environ(), temp_path():
os.environ["PATH"] = os.pathsep.join(
[
self.script_basedir,
self.prefix.as_posix(),
os.environ.get("PATH", ""),
]
)
os.environ["PYTHONIOENCODING"] = "utf-8"
os.environ["PYTHONDONTWRITEBYTECODE"] = "1"
if self.is_venv:
os.environ["PYTHONPATH"] = self.base_paths["PYTHONPATH"]
os.environ["VIRTUAL_ENV"] = prefix
elif not self.project.s.PIPENV_USE_SYSTEM and not os.environ.get(
"VIRTUAL_ENV"
):
os.environ["PYTHONPATH"] = self.base_paths["PYTHONPATH"]
os.environ.pop("PYTHONHOME", None)
sys.path = self.sys_path
sys.prefix = self.sys_prefix
try:
yield True
finally:
sys.path = original_path
sys.prefix = original_prefix
| Environment |
python | django__django | tests/test_client/test_conditional_content_removal.py | {
"start": 186,
"end": 1957
} | class ____(SimpleTestCase):
def test_conditional_content_removal(self):
"""
Content is removed from regular and streaming responses with a
status_code of 100-199, 204, 304, or a method of "HEAD".
"""
req = HttpRequest()
# Do nothing for 200 responses.
res = HttpResponse("abc")
conditional_content_removal(req, res)
self.assertEqual(res.content, b"abc")
res = StreamingHttpResponse(["abc"])
conditional_content_removal(req, res)
self.assertEqual(b"".join(res), b"abc")
# Strip content for some status codes.
for status_code in (100, 150, 199, 204, 304):
res = HttpResponse("abc", status=status_code)
conditional_content_removal(req, res)
self.assertEqual(res.content, b"")
res = StreamingHttpResponse(["abc"], status=status_code)
conditional_content_removal(req, res)
self.assertEqual(b"".join(res), b"")
# Issue #20472
abc = gzip.compress(b"abc")
res = HttpResponse(abc, status=304)
res["Content-Encoding"] = "gzip"
conditional_content_removal(req, res)
self.assertEqual(res.content, b"")
res = StreamingHttpResponse([abc], status=304)
res["Content-Encoding"] = "gzip"
conditional_content_removal(req, res)
self.assertEqual(b"".join(res), b"")
# Strip content for HEAD requests.
req.method = "HEAD"
res = HttpResponse("abc")
conditional_content_removal(req, res)
self.assertEqual(res.content, b"")
res = StreamingHttpResponse(["abc"])
conditional_content_removal(req, res)
self.assertEqual(b"".join(res), b"")
| ConditionalContentTests |
python | Lightning-AI__lightning | src/lightning/pytorch/overrides/distributed.py | {
"start": 9483,
"end": 9983
} | class ____(UnrepeatedDistributedSampler):
"""Equivalent class to ``DistributedSamplerWrapper`` but for the ``UnrepeatedDistributedSampler``."""
def __init__(self, sampler: Union[Sampler, Iterable], *args: Any, **kwargs: Any) -> None:
super().__init__(_DatasetSamplerWrapper(sampler), *args, **kwargs)
@override
def __iter__(self) -> Iterator:
self.dataset.reset()
return (self.dataset[index] for index in super().__iter__())
| UnrepeatedDistributedSamplerWrapper |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_textbox23.py | {
"start": 315,
"end": 868
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox23.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox("E9", "This is some text", {"font": {"color": "red"}})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | allegroai__clearml | clearml/utilities/gpu/pynvml.py | {
"start": 43667,
"end": 43932
} | class ____(_PrintableStructure):
_fields_ = [
('pid', c_uint),
('usedGpuMemory', c_ulonglong),
('gpuInstanceId', c_uint),
('computeInstanceId', c_uint),
]
_fmt_ = {'usedGpuMemory': "%d B",
}
| c_nvmlProcessInfo_t |
python | django__django | tests/serializers/models/base.py | {
"start": 2351,
"end": 2663
} | class ____(models.Model):
actor = models.ForeignKey(Actor, models.CASCADE)
title = models.CharField(max_length=50)
price = models.DecimalField(max_digits=6, decimal_places=2, default=Decimal("0.00"))
class Meta:
ordering = ("title",)
def __str__(self):
return self.title
| Movie |
python | vyperlang__vyper | tests/functional/builtins/codegen/test_convert.py | {
"start": 1366,
"end": 5828
} | class ____(Exception):
"""
A Python-level conversion is out of bounds
"""
pass
def can_convert(i_typ, o_typ):
"""
Checks whether conversion from one type to another is valid.
"""
if i_typ == o_typ:
return False
if isinstance(o_typ, BoolT):
return True
if isinstance(i_typ, BoolT):
return not isinstance(o_typ, AddressT)
if isinstance(i_typ, IntegerT):
if isinstance(o_typ, BytesM_T):
return bytes_of_type(i_typ) <= bytes_of_type(o_typ)
ret = isinstance(o_typ, (IntegerT, DecimalT, BytesM_T, BytesT))
if not i_typ.is_signed:
ret |= isinstance(o_typ, AddressT)
return ret
if isinstance(i_typ, BytesM_T):
if isinstance(o_typ, BytesT):
# bytesN must be of equal or smaller size to the input
return bytes_of_type(i_typ) <= bytes_of_type(o_typ)
return isinstance(o_typ, (DecimalT, BytesM_T, IntegerT, AddressT))
if isinstance(i_typ, BytesT):
return isinstance(o_typ, (IntegerT, DecimalT, AddressT))
if isinstance(i_typ, DecimalT):
if isinstance(o_typ, BytesM_T):
return bytes_of_type(i_typ) <= bytes_of_type(o_typ)
return isinstance(o_typ, (IntegerT, BoolT))
if isinstance(i_typ, AddressT):
if isinstance(o_typ, BytesM_T):
return bytes_of_type(i_typ) <= bytes_of_type(o_typ)
if isinstance(o_typ, IntegerT):
return not o_typ.is_signed
return False
raise AssertionError(f"unreachable {i_typ} {o_typ}")
def uniq(xs):
return list(set(xs))
def _cases_for_int(typ):
lo, hi = typ.ast_bounds
ret = [lo - 1, lo, lo + 1, -1, 0, 1, hi - 1, hi, hi + 1]
# random cases cause reproducibility issues. TODO fixme
# NUM_RANDOM_CASES = 6
# ret.extend(random.randrange(lo, hi) for _ in range(NUM_RANDOM_CASES))
return ret
def _cases_for_decimal(typ):
lo, hi = typ.ast_bounds
ret = [Decimal(i) for i in [-1, 0, 1]]
ret.extend([lo - 1, lo, lo + 1, hi - 1, hi, hi + 1])
ret.extend(
[lo - DECIMAL_EPSILON, lo + DECIMAL_EPSILON, hi - DECIMAL_EPSILON, hi + DECIMAL_EPSILON]
)
# random cases cause reproducibility issues. TODO fixme
# (use int values because randrange can't generate fractional decimals)
# int_lo, int_hi = info.bounds # e.g. -(2**167)
# NUM_RANDOM_CASES = 10 # more than int, just for paranoia's sake
# DIVISOR = info.divisor
# ret.extend(random.randrange(int_lo, int_hi) / DIVISOR for _ in range(NUM_RANDOM_CASES))
return ret
def _cases_for_address(_typ):
cases = _filter_cases(_cases_for_int(UINT160_T), UINT160_T)
return [_py_convert(c, UINT160_T, AddressT()) for c in cases]
def _cases_for_bool(_typ):
return [True, False]
def _cases_for_bytes(typ):
# reuse the cases for the equivalent int type
equiv_int_type = UINT(typ.m_bits)
cases = _filter_cases(_cases_for_int(equiv_int_type), equiv_int_type)
return [_py_convert(c, equiv_int_type, typ) for c in cases]
def _cases_for_Bytes(typ):
ret = []
# would not need this if we tested all Bytes[1]...Bytes[32] types.
for i in range(32):
ret.extend(_cases_for_bytes(BytesM_T(i + 1)))
ret.append(b"")
return uniq(ret)
def _cases_for_String(typ):
ret = []
# would not need this if we tested all Bytes[1]...Bytes[32] types.
for i in range(32):
ret.extend([str(c, "utf-8") for c in _cases_for_bytes(BytesM_T(i + 1))])
ret.append("")
return uniq(ret)
# generate all cases of interest for a type, potentially including invalid cases
def interesting_cases_for_type(typ):
if isinstance(typ, IntegerT):
return _cases_for_int(typ)
if isinstance(typ, DecimalT):
return _cases_for_decimal(typ)
if isinstance(typ, BytesM_T):
return _cases_for_bytes(typ)
if isinstance(typ, BytesT):
return _cases_for_Bytes(typ)
if isinstance(typ, StringT):
return _cases_for_String(typ)
if isinstance(typ, BoolT):
return _cases_for_bool(typ)
if isinstance(typ, AddressT):
return _cases_for_address(typ)
def _filter_cases(cases, i_typ):
cases = uniq(cases)
def _in_bounds(c):
try:
return _py_convert(c, i_typ, i_typ) is not None
except eth.codecs.abi.exceptions.EncodeError:
return False
return [c for c in cases if _in_bounds(c)]
| _OutOfBounds |
python | huggingface__transformers | src/transformers/models/florence2/modeling_florence2.py | {
"start": 6381,
"end": 8854
} | class ____(nn.Module):
"""Image to Patch Embedding"""
def __init__(self, config: Florence2VisionConfig, stage_idx: int):
super().__init__()
self.config = config
self.stage_idx = stage_idx
self.patch_size = config.patch_size[stage_idx]
self.in_channels = config.in_channels if stage_idx == 0 else config.embed_dim[stage_idx - 1]
self.embed_dim = config.embed_dim[stage_idx]
self.stride = config.patch_stride[stage_idx]
self.padding = config.patch_padding[stage_idx]
self.pre_norm = config.patch_prenorm[stage_idx]
self.conv = nn.Conv2d(
self.in_channels,
self.embed_dim,
kernel_size=self.patch_size,
stride=self.stride,
padding=self.padding,
)
dim_norm = self.in_channels if self.pre_norm else self.embed_dim
self.norm = nn.LayerNorm(dim_norm)
def forward(self, hidden_states: torch.Tensor):
if self.norm and self.pre_norm:
hidden_states = hidden_states.permute(0, 2, 3, 1)
hidden_states = self.norm(hidden_states)
hidden_states = hidden_states.permute(0, 3, 1, 2)
hidden_states = self.conv(hidden_states)
if self.norm and not self.pre_norm:
hidden_states = hidden_states.permute(0, 2, 3, 1)
hidden_states = self.norm(hidden_states)
hidden_states = hidden_states.permute(0, 3, 1, 2)
return hidden_states
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| Florence2VisionConvEmbed |
python | google__pytype | pytype/tests/test_flow3.py | {
"start": 300,
"end": 3085
} | class ____(test_base.BaseTest):
"""Tests for python 3.11 support."""
def test_context_manager(self):
self.Check("""
class A:
def __enter__(self):
pass
def __exit__(self, a, b, c):
pass
lock = A()
def f() -> str:
path = ''
with lock:
try:
pass
except:
pass
return path
""")
def test_exception_type(self):
self.Check("""
class FooError(Exception):
pass
try:
raise FooError()
except FooError as e:
assert_type(e, FooError)
""")
def test_try_with(self):
self.Check("""
def f(obj, x):
try:
with __any_object__:
obj.get(x)
except:
pass
""")
def test_try_if_with(self):
self.Check("""
from typing import Any
import os
pytz: Any
def f():
tz_env = os.environ.get('TZ')
try:
if tz_env == 'localtime':
with open('localtime') as localtime:
return pytz.tzfile.build_tzinfo('', localtime)
except IOError:
return pytz.UTC
""")
def test_try_finally(self):
self.Check("""
import tempfile
dir_ = None
def f():
global dir_
try:
if dir_:
return dir_
dir_ = tempfile.mkdtemp()
finally:
print(dir_)
""")
def test_nested_try_in_for(self):
self.Check("""
def f(x):
for i in x:
fd = __any_object__
try:
try:
if __random__:
return True
except ValueError:
continue
finally:
fd.close()
""")
def test_while_and_nested_try(self):
self.Check("""
def f(p):
try:
while __random__:
try:
return p.communicate()
except KeyboardInterrupt:
pass
finally:
pass
""")
def test_while_and_nested_try_2(self):
self.Check("""
def f():
i = j = 0
while True:
try:
try:
i += 1
finally:
j += 1
except:
break
return
""")
def test_while_and_nested_try_3(self):
self.Check("""
import os
def RmDirs(dir_name):
try:
parent_directory = os.path.dirname(dir_name)
while parent_directory:
try:
os.rmdir(parent_directory)
except OSError as err:
pass
parent_directory = os.path.dirname(parent_directory)
except OSError as err:
pass
""")
if __name__ == "__main__":
test_base.main()
| TestPy311 |
python | ray-project__ray | doc/source/tune/doc_code/stopping.py | {
"start": 1529,
"end": 4461
} | class ____(Stopper):
def __init__(self):
self.should_stop = False
def __call__(self, trial_id: str, result: dict) -> bool:
if not self.should_stop and result["mean_accuracy"] >= 0.8:
self.should_stop = True
return self.should_stop
def stop_all(self) -> bool:
"""Returns whether to stop trials and prevent new ones from starting."""
return self.should_stop
stopper = CustomStopper()
tuner = tune.Tuner(
my_trainable,
run_config=tune.RunConfig(stop=stopper),
tune_config=tune.TuneConfig(num_samples=2),
)
result_grid = tuner.fit()
# __stopping_cls_end__
for result in result_grid:
final_iter = result.metrics.get("training_iteration", 0)
assert final_iter <= 8, final_iter
# __stopping_on_trial_error_start__
from ray import tune
import time
def my_failing_trainable(config):
if config["should_fail"]:
raise RuntimeError("Failing (on purpose)!")
# Do some training...
time.sleep(10)
tune.report({"mean_accuracy": 0.9})
tuner = tune.Tuner(
my_failing_trainable,
param_space={"should_fail": tune.grid_search([True, False])},
run_config=tune.RunConfig(failure_config=tune.FailureConfig(fail_fast=True)),
)
result_grid = tuner.fit()
# __stopping_on_trial_error_end__
for result in result_grid:
# Should never get to report
final_iter = result.metrics.get("training_iteration")
assert not final_iter, final_iter
# __early_stopping_start__
from ray import tune
from ray.tune.schedulers import AsyncHyperBandScheduler
scheduler = AsyncHyperBandScheduler(time_attr="training_iteration")
tuner = tune.Tuner(
my_trainable,
run_config=tune.RunConfig(stop={"training_iteration": 10}),
tune_config=tune.TuneConfig(
scheduler=scheduler, num_samples=2, metric="mean_accuracy", mode="max"
),
)
result_grid = tuner.fit()
# __early_stopping_end__
def my_trainable(config):
# NOTE: Introduce the sleep again for the time-based unit-tests.
i = 1
while True:
time.sleep(1)
# Do some training, and report some metrics for demonstration...
tune.report({"mean_accuracy": min(i / 10, 1.0)})
i += 1
# __stopping_trials_by_time_start__
from ray import tune
tuner = tune.Tuner(
my_trainable,
# Stop a trial after it's run for more than 5 seconds.
run_config=tune.RunConfig(stop={"time_total_s": 5}),
)
result_grid = tuner.fit()
# __stopping_trials_by_time_end__
# Should only get ~5 reports
assert result_grid[0].metrics["training_iteration"] < 8
# __stopping_experiment_by_time_start__
from ray import tune
# Stop the entire experiment after ANY trial has run for more than 5 seconds.
tuner = tune.Tuner(my_trainable, tune_config=tune.TuneConfig(time_budget_s=5.0))
result_grid = tuner.fit()
# __stopping_experiment_by_time_end__
# Should only get ~5 reports
assert result_grid[0].metrics["training_iteration"] < 8
| CustomStopper |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 234444,
"end": 265817
} | class ____(ClassDefNode):
# An extension type definition.
#
# visibility 'private' or 'public' or 'extern'
# typedef_flag boolean
# api boolean
# module_name string or None For import of extern type objects
# class_name string Unqualified name of class
# as_name string or None Name to declare as in this scope
# bases TupleNode Base class(es)
# objstruct_name string or None Specified C name of object struct
# typeobj_name string or None Specified C name of type object
# check_size 'warn', 'error', 'ignore' What to do if tp_basicsize does not match
# in_pxd boolean Is in a .pxd file
# decorators [DecoratorNode] list of decorators or None
# doc string or None
# body StatNode or None
# entry Symtab.Entry
# base_type PyExtensionType or None
# buffer_defaults_node DictNode or None Declares defaults for a buffer
# buffer_defaults_pos
child_attrs = ["body"]
buffer_defaults_node = None
buffer_defaults_pos = None
typedef_flag = False
api = False
objstruct_name = None
typeobj_name = None
check_size = None
decorators = None
shadow = False
@property
def punycode_class_name(self):
return punycodify_name(self.class_name)
def buffer_defaults(self, env):
if not hasattr(self, '_buffer_defaults'):
from . import Buffer
if self.buffer_defaults_node:
self._buffer_defaults = Buffer.analyse_buffer_options(
self.buffer_defaults_pos,
env, [], self.buffer_defaults_node,
need_complete=False)
else:
self._buffer_defaults = None
return self._buffer_defaults
def declare(self, env):
if self.module_name and self.visibility != 'extern':
module_path = self.module_name.split(".")
home_scope = env.find_imported_module(module_path, self.pos)
if not home_scope:
return None
else:
home_scope = env
self.entry = home_scope.declare_c_class(
name=self.class_name,
pos=self.pos,
defining=0,
implementing=0,
module_name=self.module_name,
base_type=None,
objstruct_cname=self.objstruct_name,
typeobj_cname=self.typeobj_name,
visibility=self.visibility,
typedef_flag=self.typedef_flag,
check_size = self.check_size,
api=self.api,
buffer_defaults=self.buffer_defaults(env),
shadow=self.shadow)
if self.bases and len(self.bases.args) > 1:
self.entry.type.multiple_bases = True
def _handle_cclass_decorators(self, env):
extra_directives = {}
if not self.decorators:
return extra_directives
from . import ExprNodes
remaining_decorators = []
for original_decorator in self.decorators:
decorator = original_decorator.decorator
# entries aren't set at this point, so unfortunately we can't just do
# decorator.get_known_standard_library_import().
# Instead we have to manually look it up
decorator_call = None
if isinstance(decorator, ExprNodes.CallNode):
decorator_call = decorator
decorator = decorator.function
known_name = Builtin.exprnode_to_known_standard_library_name(decorator, env)
if known_name == 'functools.total_ordering':
if decorator_call:
error(decorator_call.pos, "total_ordering cannot be called.")
extra_directives["total_ordering"] = True
continue
elif known_name == "dataclasses.dataclass":
args = None
kwds = {}
if decorator_call:
if isinstance(decorator_call, ExprNodes.SimpleCallNode):
args = decorator_call.args
else:
args = decorator_call.positional_args.args
kwds_ = decorator_call.keyword_args
if kwds_:
kwds = kwds_.as_python_dict()
extra_directives[known_name] = (args, kwds)
continue
remaining_decorators.append(original_decorator)
if remaining_decorators:
error(remaining_decorators[0].pos, "Cdef functions/classes cannot take arbitrary decorators.")
self.decorators = remaining_decorators
return extra_directives
def analyse_declarations(self, env):
#print "CClassDefNode.analyse_declarations:", self.class_name
#print "...visibility =", self.visibility
#print "...module_name =", self.module_name
if env.in_cinclude and not self.objstruct_name:
error(self.pos, "Object struct name specification required for C class defined in 'extern from' block")
extra_directives = self._handle_cclass_decorators(env)
self.base_type = None
# Now that module imports are cached, we need to
# import the modules for extern classes.
if self.module_name:
self.module = None
for module in env.cimported_modules:
if module.name == self.module_name:
self.module = module
if self.module is None:
self.module = ModuleScope(self.module_name, None, env.context)
self.module.has_extern_class = 1
env.add_imported_module(self.module)
if self.bases.args:
base = self.bases.args[0]
base_type = base.analyse_as_type(env)
# If we accidentally picked the C type of the same name, use the Python rather than the C variant.
# We need to go through a local lookup since the builtin names might be redefined by user code.
if base_type is PyrexTypes.c_int_type:
base_type = env.lookup('int').type
elif base_type is PyrexTypes.c_float_type:
base_type = env.lookup('float').type
elif base_type is PyrexTypes.c_double_complex_type:
base_type = env.lookup('complex').type
if base_type is None:
error(base.pos, "First base of '%s' is not an extension type" % self.class_name)
elif base_type is py_object_type:
base_class_scope = None
elif not base_type.is_extension_type and \
not (base_type.is_builtin_type and base_type.objstruct_cname):
error(base.pos, "'%s' is not an extension type" % base_type)
elif not base_type.is_complete():
error(base.pos, "Base class '%s' of type '%s' is incomplete" % (
base_type.name, self.class_name))
elif base_type.scope and base_type.scope.directives and \
base_type.is_final_type:
error(base.pos, "Base class '%s' of type '%s' is final" % (
base_type, self.class_name))
elif base_type.is_builtin_type and \
base_type.name in ('tuple', 'bytes'):
error(base.pos, "inheritance from PyVarObject types like '%s' is not currently supported"
% base_type.name)
else:
self.base_type = base_type
if env.directives.get('freelist', 0) > 0 and base_type != PyrexTypes.py_object_type:
warning(self.pos, "freelists cannot be used on subtypes, only the base class can manage them", 1)
has_body = self.body is not None
if has_body and self.base_type and not self.base_type.scope:
# To properly initialize inherited attributes, the base type must
# be analysed before this type.
self.base_type.defered_declarations.append(lambda : self.analyse_declarations(env))
return
if self.module_name and self.visibility != 'extern':
module_path = self.module_name.split(".")
home_scope = env.find_imported_module(module_path, self.pos)
if not home_scope:
return
else:
home_scope = env
if self.visibility == 'extern':
if (self.module_name == '__builtin__' and
self.class_name in Builtin.builtin_types and
env.qualified_name[:8] != 'cpython.'): # allow overloaded names for cimporting from cpython
warning(self.pos, "%s already a builtin Cython type" % self.class_name, 1)
self.entry = home_scope.declare_c_class(
name=self.class_name,
pos=self.pos,
defining=has_body and self.in_pxd,
implementing=has_body and not self.in_pxd,
module_name=self.module_name,
base_type=self.base_type,
objstruct_cname=self.objstruct_name,
typeobj_cname=self.typeobj_name,
check_size=self.check_size,
visibility=self.visibility,
typedef_flag=self.typedef_flag,
api=self.api,
buffer_defaults=self.buffer_defaults(env),
shadow=self.shadow)
if self.bases and len(self.bases.args) > 1:
self.entry.type.multiple_bases = True
if self.shadow:
home_scope.lookup(self.class_name).as_variable = self.entry
if home_scope is not env and self.visibility == 'extern':
env.add_imported_entry(self.class_name, self.entry, self.pos)
self.scope = scope = self.entry.type.scope
if scope is not None:
if extra_directives:
scope.directives = env.directives.copy()
scope.directives.update(extra_directives)
else:
scope.directives = env.directives
if "dataclasses.dataclass" in scope.directives:
is_frozen = False
# Retrieve the @dataclass config (args, kwargs), as passed into the decorator.
dataclass_config = scope.directives["dataclasses.dataclass"]
if dataclass_config:
decorator_kwargs = dataclass_config[1]
frozen_flag = decorator_kwargs.get('frozen')
is_frozen = frozen_flag and frozen_flag.is_literal and frozen_flag.value
scope.is_c_dataclass_scope = "frozen" if is_frozen else True
if self.doc and Options.docstrings:
scope.doc = embed_position(self.pos, self.doc)
if has_body:
self.body.analyse_declarations(scope)
dict_entry = self.scope.lookup_here("__dict__")
if dict_entry and dict_entry.is_variable and (not scope.defined and not scope.implemented):
dict_entry.getter_cname = self.scope.mangle_internal("__dict__getter")
self.scope.declare_property("__dict__", dict_entry.doc, dict_entry.pos)
if self.in_pxd:
scope.defined = 1
else:
scope.implemented = 1
if len(self.bases.args) > 1:
if not has_body or self.in_pxd:
error(self.bases.args[1].pos, "Only declare first base in declaration.")
# At runtime, we check that the other bases are heap types
# and that a __dict__ is added if required.
for other_base in self.bases.args[1:]:
if other_base.analyse_as_type(env):
error(other_base.pos, "Only one extension type base class allowed.")
self.entry.type.early_init = 0
from . import ExprNodes
self.type_init_args = ExprNodes.TupleNode(
self.pos,
args=[ExprNodes.IdentifierStringNode(self.pos, value=self.class_name),
self.bases,
ExprNodes.DictNode(self.pos, key_value_pairs=[])])
elif self.base_type:
self.entry.type.early_init = self.base_type.is_external or self.base_type.early_init
self.type_init_args = None
else:
self.entry.type.early_init = 1
self.type_init_args = None
env.allocate_vtable_names(self.entry)
for thunk in self.entry.type.defered_declarations:
thunk()
def analyse_expressions(self, env):
if self.body:
scope = self.entry.type.scope
self.body = self.body.analyse_expressions(scope)
if self.type_init_args:
self.type_init_args.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
if self.body:
self.generate_lambda_definitions(self.scope, code)
self.body.generate_function_definitions(self.scope, code)
def generate_execution_code(self, code):
# This is needed to generate evaluation code for
# default values of method arguments.
code.mark_pos(self.pos)
if not self.entry.type.early_init:
bases = None
if self.type_init_args:
# Extract bases tuple and validate 'best base' by actually calling 'type()'.
bases = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
self.type_init_args.generate_evaluation_code(code)
code.putln("%s = __Pyx_PyTuple_GET_ITEM(%s, 1);" % (bases, self.type_init_args.result()))
code.putln(code.error_goto_if(f"!CYTHON_ASSUME_SAFE_MACROS && !{bases}", self.pos))
code.put_incref(bases, PyrexTypes.py_object_type)
first_base = code.funcstate.allocate_temp(Builtin.type_type, manage_ref=False)
code.putln(f"{first_base} = ((PyTypeObject*)__Pyx_PyTuple_GET_ITEM({bases}, 0));")
code.putln(code.error_goto_if(f"!CYTHON_ASSUME_SAFE_MACROS && !{first_base}", self.pos))
# Let Python do the base types compatibility checking.
trial_type = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
# __Pyx_PyType_GetSlot doesn't work on non-heap types in Limited API < 3.10 so awful manual fallback:
code.putln("#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000")
code.putln("if (__Pyx_get_runtime_version() < 0x030A0000) {")
type_new = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
code.putln(f'{type_new} = PyObject_GetAttrString((PyObject*)&PyType_Type, "__new__");')
code.putln(code.error_goto_if_null(type_new, self.pos))
code.put_gotref(type_new, py_object_type)
type_tuple = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
code.putln(f"{type_tuple} = PyTuple_Pack(1, (PyObject*)&PyType_Type);")
code.putln(code.error_goto_if_null(type_tuple, self.pos))
code.put_gotref(type_tuple, py_object_type)
args_tuple = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
code.putln(f"{args_tuple} = PyNumber_Add({type_tuple}, {self.type_init_args.result()});")
code.putln(code.error_goto_if_null(args_tuple, self.pos))
code.put_gotref(args_tuple, py_object_type)
code.putln(f'{trial_type} = PyObject_Call({type_new}, {args_tuple}, NULL);')
for temp in [type_new, type_tuple, args_tuple]:
code.put_decref_clear(temp, PyrexTypes.py_object_type)
code.funcstate.release_temp(temp)
code.putln("} else")
code.putln("#endif")
code.putln("%s = __Pyx_PyType_GetSlot(&PyType_Type, tp_new, newfunc)(&PyType_Type, %s, NULL);" % (
trial_type, self.type_init_args.result()))
code.putln(code.error_goto_if_null(trial_type, self.pos))
code.put_gotref(trial_type, py_object_type)
code.putln("if (__Pyx_PyType_GetSlot((PyTypeObject*) %s, tp_base, PyTypeObject*) != %s) {" % (
trial_type, first_base))
# trial_type is a heaptype so GetSlot works in all versions of the limited API
trial_type_base = "__Pyx_PyType_GetSlot((PyTypeObject*) %s, tp_base, PyTypeObject*)" % trial_type
code.putln("__Pyx_TypeName base_name = __Pyx_PyType_GetFullyQualifiedName(%s);" % trial_type_base)
code.putln("__Pyx_TypeName type_name = __Pyx_PyType_GetFullyQualifiedName(%s);" % first_base)
code.putln("PyErr_Format(PyExc_TypeError, "
"\"best base '\" __Pyx_FMT_TYPENAME \"' must be equal to first base '\" __Pyx_FMT_TYPENAME \"'\",")
code.putln(" base_name, type_name);")
code.putln("__Pyx_DECREF_TypeName(base_name);")
code.putln("__Pyx_DECREF_TypeName(type_name);")
code.putln(code.error_goto(self.pos))
code.putln("}")
code.putln(f"{first_base} = NULL;") # borrowed so no decref
code.funcstate.release_temp(first_base)
code.put_decref_clear(trial_type, PyrexTypes.py_object_type)
code.funcstate.release_temp(trial_type)
self.type_init_args.generate_disposal_code(code)
self.type_init_args.free_temps(code)
self.generate_type_ready_code(self.entry, code, bases_tuple_cname=bases, check_heap_type_bases=True)
if bases is not None:
code.put_decref_clear(bases, PyrexTypes.py_object_type)
code.funcstate.release_temp(bases)
if self.body:
self.body.generate_execution_code(code)
# Also called from ModuleNode for early init types.
@staticmethod
def generate_type_ready_code(entry, code, bases_tuple_cname=None, check_heap_type_bases=False):
# Generate a call to PyType_Ready for an extension
# type defined in this module.
type = entry.type
typeptr_cname = f"{Naming.modulestatevalue_cname}->{type.typeptr_cname}"
scope = type.scope
if not scope: # could be None if there was an error
return
if entry.visibility == 'extern':
# Generate code to initialise the typeptr of an external extension
# type defined in this module to point to its type object.
if type.typeobj_cname:
# FIXME: this should not normally be set :-?
assert not type.typeobj_cname
code.putln("%s = &%s;" % (
type.typeptr_cname,
type.typeobj_cname,
))
return
# TODO: remove 'else:' and dedent
else:
assert typeptr_cname
assert type.typeobj_cname
typespec_cname = "%s_spec" % type.typeobj_cname
code.putln("#if CYTHON_USE_TYPE_SPECS")
tuple_temp = None
if not bases_tuple_cname and scope.parent_type.base_type:
tuple_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln("%s = PyTuple_Pack(1, (PyObject *)%s); %s" % (
tuple_temp,
code.typeptr_cname_in_module_state(scope.parent_type.base_type),
code.error_goto_if_null(tuple_temp, entry.pos),
))
code.put_gotref(tuple_temp, py_object_type)
if bases_tuple_cname or tuple_temp:
if check_heap_type_bases:
code.globalstate.use_utility_code(
UtilityCode.load_cached('ValidateBasesTuple', 'ExtensionTypes.c'))
code.put_error_if_neg(entry.pos, "__Pyx_validate_bases_tuple(%s.name, %d, %s)" % (
typespec_cname,
TypeSlots.get_slot_by_name("tp_dictoffset", scope.directives).slot_code(scope) != "0",
bases_tuple_cname or tuple_temp,
))
code.putln("%s = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(%s, &%s, %s);" % (
typeptr_cname,
Naming.module_cname,
typespec_cname,
bases_tuple_cname or tuple_temp,
))
if tuple_temp:
code.put_xdecref_clear(tuple_temp, type=py_object_type)
code.funcstate.release_temp(tuple_temp)
code.putln(code.error_goto_if_null(typeptr_cname, entry.pos))
else:
code.putln(
"%s = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(%s, &%s, NULL); %s" % (
typeptr_cname,
Naming.module_cname,
typespec_cname,
code.error_goto_if_null(typeptr_cname, entry.pos),
))
# The buffer interface is not currently supported by PyType_FromSpec().
buffer_slot = TypeSlots.get_slot_by_name("tp_as_buffer", code.globalstate.directives)
if not buffer_slot.is_empty(scope):
code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API")
code.putln("%s->%s = %s;" % (
typeptr_cname,
buffer_slot.slot_name,
buffer_slot.slot_code(scope),
))
# Still need to inherit buffer methods since PyType_Ready() didn't do it for us.
for buffer_method_name in ("__getbuffer__", "__releasebuffer__"):
buffer_slot = TypeSlots.get_slot_table(
code.globalstate.directives).get_slot_by_method_name(buffer_method_name)
if buffer_slot.slot_code(scope) == "0" and not TypeSlots.get_base_slot_function(scope, buffer_slot):
code.putln("if (!%s->tp_as_buffer->%s &&"
" %s->tp_base->tp_as_buffer &&"
" %s->tp_base->tp_as_buffer->%s) {" % (
typeptr_cname, buffer_slot.slot_name,
typeptr_cname,
typeptr_cname, buffer_slot.slot_name,
))
code.putln("%s->tp_as_buffer->%s = %s->tp_base->tp_as_buffer->%s;" % (
typeptr_cname, buffer_slot.slot_name,
typeptr_cname, buffer_slot.slot_name,
))
code.putln("}")
code.putln("#elif defined(Py_bf_getbuffer) && defined(Py_bf_releasebuffer)")
code.putln("/* PY_VERSION_HEX >= 0x03090000 || Py_LIMITED_API >= 0x030B0000 */")
code.putln("#elif defined(_MSC_VER)")
code.putln("#pragma message (\"The buffer protocol is not supported in the Limited C-API < 3.11.\")")
code.putln("#else")
code.putln("#warning \"The buffer protocol is not supported in the Limited C-API < 3.11.\"")
code.putln("#endif")
code.putln("#else")
if bases_tuple_cname:
code.put_incref(bases_tuple_cname, py_object_type)
code.put_giveref(bases_tuple_cname, py_object_type)
code.putln("%s.tp_bases = %s;" % (type.typeobj_cname, bases_tuple_cname))
code.putln("%s = &%s;" % (
typeptr_cname,
type.typeobj_cname,
))
code.putln("#endif") # if CYTHON_USE_TYPE_SPECS
base_type = type.base_type
while base_type:
if base_type.is_external and base_type.objstruct_cname != "PyTypeObject":
# 'type' is special-cased because it is actually based on PyHeapTypeObject
# Variable length bases are allowed if the current class doesn't grow
code.putln("if (sizeof(%s%s) != sizeof(%s%s)) {" % (
"" if type.typedef_flag else "struct ", type.objstruct_cname,
"" if base_type.typedef_flag else "struct ", base_type.objstruct_cname))
code.globalstate.use_utility_code(
UtilityCode.load_cached("ValidateExternBase", "ExtensionTypes.c"))
base_typeptr_cname = code.typeptr_cname_in_module_state(type.base_type)
code.put_error_if_neg(entry.pos, "__Pyx_validate_extern_base(%s)" % (
base_typeptr_cname))
code.putln("}")
break
base_type = base_type.base_type
code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API")
# FIXME: these still need to get initialised even with the limited-API
for slot in TypeSlots.get_slot_table(code.globalstate.directives):
slot.generate_dynamic_init_code(scope, code)
code.putln("#endif")
code.putln("#if !CYTHON_USE_TYPE_SPECS")
code.globalstate.use_utility_code(
UtilityCode.load_cached('PyType_Ready', 'ExtensionTypes.c'))
code.put_error_if_neg(entry.pos, "__Pyx_PyType_Ready(%s)" % typeptr_cname)
code.putln("#endif")
code.put_make_object_deferred(f"(PyObject*){typeptr_cname}")
# Use specialised attribute lookup for types with generic lookup but no instance dict.
getattr_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_getattro')
dictoffset_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_dictoffset')
if getattr_slot_func == '0' and dictoffset_slot_func == '0':
code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API") # FIXME
code.putln("if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) &&"
" likely(!%s->tp_dictoffset && %s->tp_getattro == PyObject_GenericGetAttr)) {" % (
typeptr_cname, typeptr_cname))
code.putln("%s->tp_getattro = PyObject_GenericGetAttr;" %
typeptr_cname)
code.putln("}")
code.putln("#endif") # if !CYTHON_COMPILING_IN_LIMITED_API
# Fix special method docstrings. This is a bit of a hack, but
# unless we let PyType_Ready create the slot wrappers we have
# a significant performance hit. (See trac #561.)
for func in entry.type.scope.pyfunc_entries:
is_buffer = func.name in ('__getbuffer__', '__releasebuffer__')
if (func.is_special and Options.docstrings and
func.wrapperbase_cname and not is_buffer):
slot = TypeSlots.get_slot_table(
entry.type.scope.directives).get_slot_by_method_name(func.name)
preprocessor_guard = slot.preprocessor_guard_code() if slot else None
if preprocessor_guard:
code.putln(preprocessor_guard)
code.putln('#if CYTHON_UPDATE_DESCRIPTOR_DOC')
code.putln("{")
code.putln(
'PyObject *wrapper = PyObject_GetAttrString((PyObject *)%s, "%s"); %s' % (
typeptr_cname,
func.name,
code.error_goto_if_null('wrapper', entry.pos)))
code.putln(
"if (Py_IS_TYPE(wrapper, &PyWrapperDescr_Type)) {")
code.putln(
"%s = *((PyWrapperDescrObject *)wrapper)->d_base;" % (
func.wrapperbase_cname))
code.putln(
"%s.doc = %s;" % (func.wrapperbase_cname, func.doc_cname))
code.putln(
"((PyWrapperDescrObject *)wrapper)->d_base = &%s;" % (
func.wrapperbase_cname))
code.putln("}")
code.putln("}")
code.putln('#endif')
if preprocessor_guard:
code.putln('#endif')
if type.vtable_cname:
code.globalstate.use_utility_code(
UtilityCode.load_cached('SetVTable', 'ImportExport.c'))
code.put_error_if_neg(entry.pos, "__Pyx_SetVtable(%s, %s)" % (
typeptr_cname,
type.vtabptr_cname,
))
code.globalstate.use_utility_code(
UtilityCode.load_cached('MergeVTables', 'ImportExport.c'))
code.put_error_if_neg(entry.pos, "__Pyx_MergeVtables(%s)" % typeptr_cname)
if not type.scope.is_internal and not type.scope.directives.get('internal'):
# scope.is_internal is set for types defined by
# Cython (such as closures), the 'internal'
# directive is set by users
code.put_error_if_neg(entry.pos, "PyObject_SetAttr(%s, %s, (PyObject *) %s)" % (
Naming.module_cname,
code.intern_identifier(scope.class_name),
typeptr_cname,
))
weakref_entry = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None
if weakref_entry:
if weakref_entry.type is py_object_type:
tp_weaklistoffset = "%s->tp_weaklistoffset" % typeptr_cname
if type.typedef_flag:
objstruct = type.objstruct_cname
else:
objstruct = "struct %s" % type.objstruct_cname
code.putln("#if CYTHON_USE_TYPE_SLOTS")
code.putln("if (%s == 0) %s = offsetof(%s, %s);" % (
tp_weaklistoffset,
tp_weaklistoffset,
objstruct,
weakref_entry.cname))
code.putln("#endif")
else:
error(weakref_entry.pos, "__weakref__ slot must be of type 'object'")
if scope.lookup_here("__reduce_cython__") if not scope.is_closure_class_scope else None:
# Unfortunately, we cannot reliably detect whether a
# superclass defined __reduce__ at compile time, so we must
# do so at runtime.
code.globalstate.use_utility_code(
UtilityCode.load_cached('SetupReduce', 'ExtensionTypes.c'))
code.put_error_if_neg(entry.pos, "__Pyx_setup_reduce((PyObject *) %s)" % typeptr_cname)
def annotate(self, code):
if self.type_init_args:
self.type_init_args.annotate(code)
if self.body:
self.body.annotate(code)
| CClassDefNode |
python | ansible__ansible | test/units/module_utils/common/test_locale.py | {
"start": 287,
"end": 1652
} | class ____:
"""Tests for get_best_parsable_locale"""
mock_module = MagicMock()
mock_module.get_bin_path = MagicMock(return_value='/usr/bin/locale')
def test_finding_best(self):
self.mock_module.run_command = MagicMock(return_value=(0, "C.utf8\nen_US.utf8\nC\nPOSIX\n", ''))
locale = get_best_parsable_locale(self.mock_module)
assert locale == 'C.utf8'
def test_finding_last(self):
self.mock_module.run_command = MagicMock(return_value=(0, "fr_FR.utf8\nen_UK.utf8\nC\nPOSIX\n", ''))
locale = get_best_parsable_locale(self.mock_module)
assert locale == 'C'
def test_finding_middle(self):
self.mock_module.run_command = MagicMock(return_value=(0, "fr_FR.utf8\nen_US.utf8\nC\nPOSIX\n", ''))
locale = get_best_parsable_locale(self.mock_module)
assert locale == 'en_US.utf8'
def test_finding_prefered(self):
self.mock_module.run_command = MagicMock(return_value=(0, "es_ES.utf8\nMINE\nC\nPOSIX\n", ''))
locale = get_best_parsable_locale(self.mock_module, preferences=['MINE', 'C.utf8'])
assert locale == 'MINE'
def test_finding_C_on_no_match(self):
self.mock_module.run_command = MagicMock(return_value=(0, "fr_FR.UTF8\nMINE\n", ''))
locale = get_best_parsable_locale(self.mock_module)
assert locale == 'C'
| TestLocale |
python | getsentry__sentry-python | sentry_sdk/integrations/socket.py | {
"start": 295,
"end": 3169
} | class ____(Integration):
identifier = "socket"
origin = f"auto.socket.{identifier}"
@staticmethod
def setup_once():
# type: () -> None
"""
patches two of the most used functions of socket: create_connection and getaddrinfo(dns resolver)
"""
_patch_create_connection()
_patch_getaddrinfo()
def _get_span_description(host, port):
# type: (Union[bytes, str, None], Union[bytes, str, int, None]) -> str
try:
host = host.decode() # type: ignore
except (UnicodeDecodeError, AttributeError):
pass
try:
port = port.decode() # type: ignore
except (UnicodeDecodeError, AttributeError):
pass
description = "%s:%s" % (host, port) # type: ignore
return description
def _patch_create_connection():
# type: () -> None
real_create_connection = socket.create_connection
def create_connection(
address,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, # type: ignore
source_address=None,
):
# type: (Tuple[Optional[str], int], Optional[float], Optional[Tuple[Union[bytearray, bytes, str], int]])-> socket.socket
integration = sentry_sdk.get_client().get_integration(SocketIntegration)
if integration is None:
return real_create_connection(address, timeout, source_address)
with sentry_sdk.start_span(
op=OP.SOCKET_CONNECTION,
name=_get_span_description(address[0], address[1]),
origin=SocketIntegration.origin,
) as span:
span.set_data("address", address)
span.set_data("timeout", timeout)
span.set_data("source_address", source_address)
return real_create_connection(
address=address, timeout=timeout, source_address=source_address
)
socket.create_connection = create_connection # type: ignore
def _patch_getaddrinfo():
# type: () -> None
real_getaddrinfo = socket.getaddrinfo
def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
# type: (Union[bytes, str, None], Union[bytes, str, int, None], int, int, int, int) -> List[Tuple[AddressFamily, SocketKind, int, str, Union[Tuple[str, int], Tuple[str, int, int, int], Tuple[int, bytes]]]]
integration = sentry_sdk.get_client().get_integration(SocketIntegration)
if integration is None:
return real_getaddrinfo(host, port, family, type, proto, flags)
with sentry_sdk.start_span(
op=OP.SOCKET_DNS,
name=_get_span_description(host, port),
origin=SocketIntegration.origin,
) as span:
span.set_data("host", host)
span.set_data("port", port)
return real_getaddrinfo(host, port, family, type, proto, flags)
socket.getaddrinfo = getaddrinfo
| SocketIntegration |
python | pandas-dev__pandas | pandas/_typing.py | {
"start": 15341,
"end": 16000
} | class ____(Protocol):
"""
An object with an ``__arrow_c_array__`` method.
This method indicates the object is an Arrow-compatible object implementing
the `Arrow PyCapsule Protocol`_ (exposing the `Arrow C Data Interface`_ in
Python), enabling zero-copy Arrow data interchange across libraries.
.. _Arrow PyCapsule Protocol: https://arrow.apache.org/docs/format/CDataInterface/PyCapsuleInterface.html
.. _Arrow C Data Interface: https://arrow.apache.org/docs/format/CDataInterface.html
"""
def __arrow_c_array__(
self, requested_schema: object | None = None
) -> tuple[object, object]: ...
| ArrowArrayExportable |
python | jazzband__django-waffle | waffle/tests/test_waffle.py | {
"start": 24031,
"end": 27719
} | class ____(TestCase):
databases = DATABASES
def assert_sample_dynamically_created_with_value(self, is_active, expected_value):
SAMPLE_NAME = 'my_dynamically_created_sample'
assert waffle.get_waffle_sample_model().objects.count() == 0
assert is_active == waffle.sample_is_active(SAMPLE_NAME)
assert waffle.get_waffle_sample_model().objects.count() == 1
sample = waffle.get_waffle_sample_model().objects.get(name=SAMPLE_NAME)
assert sample.name == SAMPLE_NAME
assert sample.percent == expected_value
# We assert no queries are made to ensure samples created when the
# `CREATE_MISSING_SAMPLES` setting is active are properly cached.
with self.assertNumQueries(0):
assert is_active == waffle.sample_is_active(SAMPLE_NAME)
def test_sample_100(self):
sample = waffle.get_waffle_sample_model().objects.create(
name="sample", percent="100.0"
)
assert waffle.sample_is_active(sample.name)
def test_sample_0(self):
sample = waffle.get_waffle_sample_model().objects.create(
name="sample", percent="0.0"
)
assert not waffle.sample_is_active(sample.name)
def test_undefined(self):
assert not waffle.sample_is_active('foo')
@override_settings(WAFFLE_SAMPLE_DEFAULT=True)
def test_undefined_default(self):
assert waffle.sample_is_active('foo')
@override_settings(DATABASE_ROUTERS=['waffle.tests.base.ReplicationRouter'])
def test_read_from_write_db(self):
sample = waffle.get_waffle_sample_model().objects.create(
name="sample", percent="100.0"
)
# By default, sample_is_active should hit whatever it configured as the
# read DB (so values will be stale if replication is lagged).
assert not waffle.sample_is_active(sample.name)
with override_settings(WAFFLE_READ_FROM_WRITE_DB=True):
# Save the sample again to flush the cache.
sample.save()
# The next read should now be directed to the write DB, ensuring
# the cache and DB are in sync.
assert waffle.sample_is_active(sample.name)
@override_settings(WAFFLE_CREATE_MISSING_SAMPLES=True)
@override_settings(WAFFLE_SAMPLE_DEFAULT=False)
def test_sample_created_dynamically_default_false(self):
self.assert_sample_dynamically_created_with_value(False, 0.0)
@override_settings(WAFFLE_CREATE_MISSING_SAMPLES=True)
@override_settings(WAFFLE_SAMPLE_DEFAULT=True)
def test_sample_created_dynamically_default_true(self):
self.assert_sample_dynamically_created_with_value(True, 100.0)
@mock.patch('waffle.models.logger')
def test_no_logging_missing_sample_by_default(self, mock_logger):
waffle.switch_is_active('foo')
mock_logger.log.call_count == 0
@override_settings(WAFFLE_LOG_MISSING_SAMPLES=logging.WARNING)
@mock.patch('waffle.models.logger')
def test_logging_missing_sample(self, mock_logger):
waffle.sample_is_active('foo')
mock_logger.log.assert_called_with(logging.WARNING, 'Sample %s not found', 'foo')
@override_settings(WAFFLE_SAMPLE_MODEL='test_app.CustomSample')
def test_pluggable_sample_model(self):
sample_model = waffle.get_waffle_model('SAMPLE_MODEL')
self.assertEqual(CustomSample, sample_model)
sample_model.objects.create(name='test_sample_off', percent=0)
sample_model.objects.create(name='test_sample_on', percent=100)
assert not waffle.sample_is_active('test_sample_off')
assert waffle.sample_is_active('test_sample_on')
| SampleTests |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_template.py | {
"start": 1483,
"end": 3928
} | class ____(RendererBase):
"""
The renderer handles drawing/rendering operations.
This is a minimal do-nothing class that can be used to get started when
writing a new backend. Refer to `.backend_bases.RendererBase` for
documentation of the methods.
"""
def __init__(self, dpi):
super().__init__()
self.dpi = dpi
def draw_path(self, gc, path, transform, rgbFace=None):
pass
# draw_markers is optional, and we get more correct relative
# timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_markers(self, gc, marker_path, marker_trans, path, trans,
# rgbFace=None):
# pass
# draw_path_collection is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_path_collection(self, gc, master_transform, paths,
# all_transforms, offsets, offset_trans,
# facecolors, edgecolors, linewidths, linestyles,
# antialiaseds):
# pass
# draw_quad_mesh is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
# coordinates, offsets, offsetTrans, facecolors,
# antialiased, edgecolors):
# pass
def draw_image(self, gc, x, y, im):
pass
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
pass
def flipy(self):
# docstring inherited
return True
def get_canvas_width_height(self):
# docstring inherited
return 100, 100
def get_text_width_height_descent(self, s, prop, ismath):
return 1, 1, 1
def new_gc(self):
# docstring inherited
return GraphicsContextTemplate()
def points_to_pixels(self, points):
# if backend doesn't have dpi, e.g., postscript or svg
return points
# elif backend assumes a value for pixels_per_inch
# return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
# return points/72.0 * self.dpi.get()
| RendererTemplate |
python | nedbat__coveragepy | tests/test_arcs.py | {
"start": 54061,
"end": 56232
} | class ____(CoverageTest):
"""Tests of arcs with decorators."""
def test_function_decorator(self) -> None:
self.check_coverage(
"""\
def decorator(arg):
def _dec(f):
return f
return _dec
@decorator(6)
@decorator(
len([8]),
)
def my_function(
a=len([11]),
):
x = 13
a = 14
my_function()
""",
branchz="",
branchz_missing="",
)
def test_class_decorator(self) -> None:
self.check_coverage(
"""\
def decorator(arg):
def _dec(c):
return c
return _dec
@decorator(6)
@decorator(
len([8]),
)
class MyObject(
object
):
X = 13
a = 14
""",
branchz="",
branchz_missing="",
)
def test_bug_466a(self) -> None:
# A bad interaction between decorators and multi-line list assignments,
# believe it or not...!
# This example makes more sense when considered in tandem with 466b below.
self.check_coverage(
"""\
class Parser(object):
@classmethod
def parse(cls):
formats = [ 5 ]
return None
Parser.parse()
""",
branchz="",
branchz_missing="",
)
def test_bug_466b(self) -> None:
# A bad interaction between decorators and multi-line list assignments,
# believe it or not...!
self.check_coverage(
"""\
class Parser(object):
@classmethod
def parse(cls):
formats = [
6,
]
return None
Parser.parse()
""",
branchz="",
branchz_missing="",
)
| DecoratorArcTest |
python | pytest-dev__pytest | testing/_py/test_local.py | {
"start": 42696,
"end": 50628
} | class ____:
pytestmark = skiponwin32
def test_hardlink(self, tmpdir):
linkpath = tmpdir.join("test")
filepath = tmpdir.join("file")
filepath.write_text("Hello", encoding="utf-8")
nlink = filepath.stat().nlink
linkpath.mklinkto(filepath)
assert filepath.stat().nlink == nlink + 1
def test_symlink_are_identical(self, tmpdir):
filepath = tmpdir.join("file")
filepath.write_text("Hello", encoding="utf-8")
linkpath = tmpdir.join("test")
linkpath.mksymlinkto(filepath)
assert linkpath.readlink() == str(filepath)
def test_symlink_isfile(self, tmpdir):
linkpath = tmpdir.join("test")
filepath = tmpdir.join("file")
filepath.write_text("", encoding="utf-8")
linkpath.mksymlinkto(filepath)
assert linkpath.check(file=1)
assert not linkpath.check(link=0, file=1)
assert linkpath.islink()
def test_symlink_relative(self, tmpdir):
linkpath = tmpdir.join("test")
filepath = tmpdir.join("file")
filepath.write_text("Hello", encoding="utf-8")
linkpath.mksymlinkto(filepath, absolute=False)
assert linkpath.readlink() == "file"
assert filepath.read_text(encoding="utf-8") == linkpath.read_text(
encoding="utf-8"
)
def test_symlink_not_existing(self, tmpdir):
linkpath = tmpdir.join("testnotexisting")
assert not linkpath.check(link=1)
assert linkpath.check(link=0)
def test_relto_with_root(self, path1, tmpdir):
y = path1.join("x").relto(local("/"))
assert y[0] == str(path1)[1]
def test_visit_recursive_symlink(self, tmpdir):
linkpath = tmpdir.join("test")
linkpath.mksymlinkto(tmpdir)
visitor = tmpdir.visit(None, lambda x: x.check(link=0))
assert list(visitor) == [linkpath]
def test_symlink_isdir(self, tmpdir):
linkpath = tmpdir.join("test")
linkpath.mksymlinkto(tmpdir)
assert linkpath.check(dir=1)
assert not linkpath.check(link=0, dir=1)
def test_symlink_remove(self, tmpdir):
linkpath = tmpdir.join("test")
linkpath.mksymlinkto(linkpath) # point to itself
assert linkpath.check(link=1)
linkpath.remove()
assert not linkpath.check()
def test_realpath_file(self, tmpdir):
linkpath = tmpdir.join("test")
filepath = tmpdir.join("file")
filepath.write_text("", encoding="utf-8")
linkpath.mksymlinkto(filepath)
realpath = linkpath.realpath()
assert realpath.basename == "file"
def test_owner(self, path1, tmpdir):
from grp import getgrgid # type:ignore[attr-defined,unused-ignore]
from pwd import getpwuid # type:ignore[attr-defined,unused-ignore]
stat = path1.stat()
assert stat.path == path1
uid = stat.uid
gid = stat.gid
owner = getpwuid(uid)[0]
group = getgrgid(gid)[0]
assert uid == stat.uid
assert owner == stat.owner
assert gid == stat.gid
assert group == stat.group
def test_stat_helpers(self, tmpdir, monkeypatch):
path1 = tmpdir.ensure("file")
stat1 = path1.stat()
stat2 = tmpdir.stat()
assert stat1.isfile()
assert stat2.isdir()
assert not stat1.islink()
assert not stat2.islink()
def test_stat_non_raising(self, tmpdir):
path1 = tmpdir.join("file")
pytest.raises(error.ENOENT, lambda: path1.stat())
res = path1.stat(raising=False)
assert res is None
def test_atime(self, tmpdir):
import time
path = tmpdir.ensure("samplefile")
# Do not use _pytest.timing here, as we do not want time mocking to affect this test.
now = time.time()
atime1 = path.atime()
# we could wait here but timer resolution is very
# system dependent
path.read_binary()
time.sleep(ATIME_RESOLUTION)
atime2 = path.atime()
time.sleep(ATIME_RESOLUTION)
duration = time.time() - now
assert (atime2 - atime1) <= duration
def test_commondir(self, path1):
# XXX This is here in local until we find a way to implement this
# using the subversion command line api.
p1 = path1.join("something")
p2 = path1.join("otherthing")
assert p1.common(p2) == path1
assert p2.common(p1) == path1
def test_commondir_nocommon(self, path1):
# XXX This is here in local until we find a way to implement this
# using the subversion command line api.
p1 = path1.join("something")
p2 = local(path1.sep + "blabla")
assert p1.common(p2) == "/"
def test_join_to_root(self, path1):
root = path1.parts()[0]
assert len(str(root)) == 1
assert str(root.join("a")) == "/a"
def test_join_root_to_root_with_no_abs(self, path1):
nroot = path1.join("/")
assert str(path1) == str(nroot)
assert path1 == nroot
def test_chmod_simple_int(self, path1):
mode = path1.stat().mode
path1.chmod(int(mode / 2))
try:
assert path1.stat().mode != mode
finally:
path1.chmod(mode)
assert path1.stat().mode == mode
def test_chmod_rec_int(self, path1):
# XXX fragile test
def recfilter(x):
return x.check(dotfile=0, link=0)
oldmodes = {}
for x in path1.visit(rec=recfilter):
oldmodes[x] = x.stat().mode
path1.chmod(int("772", 8), rec=recfilter)
try:
for x in path1.visit(rec=recfilter):
assert x.stat().mode & int("777", 8) == int("772", 8)
finally:
for x, y in oldmodes.items():
x.chmod(y)
def test_copy_archiving(self, tmpdir):
unicode_fn = "something-\342\200\223.txt"
f = tmpdir.ensure("a", unicode_fn)
a = f.dirpath()
oldmode = f.stat().mode
newmode = oldmode ^ 1
f.chmod(newmode)
b = tmpdir.join("b")
a.copy(b, mode=True)
assert b.join(f.basename).stat().mode == newmode
def test_copy_stat_file(self, tmpdir):
src = tmpdir.ensure("src")
dst = tmpdir.join("dst")
# a small delay before the copy
time.sleep(ATIME_RESOLUTION)
src.copy(dst, stat=True)
oldstat = src.stat()
newstat = dst.stat()
assert oldstat.mode == newstat.mode
assert (dst.atime() - src.atime()) < ATIME_RESOLUTION
assert (dst.mtime() - src.mtime()) < ATIME_RESOLUTION
def test_copy_stat_dir(self, tmpdir):
test_files = ["a", "b", "c"]
src = tmpdir.join("src")
for f in test_files:
src.join(f).write_text(f, ensure=True, encoding="utf-8")
dst = tmpdir.join("dst")
# a small delay before the copy
time.sleep(ATIME_RESOLUTION)
src.copy(dst, stat=True)
for f in test_files:
oldstat = src.join(f).stat()
newstat = dst.join(f).stat()
assert (newstat.atime - oldstat.atime) < ATIME_RESOLUTION
assert (newstat.mtime - oldstat.mtime) < ATIME_RESOLUTION
assert oldstat.mode == newstat.mode
def test_chown_identity(self, path1):
owner = path1.stat().owner
group = path1.stat().group
path1.chown(owner, group)
def test_chown_dangling_link(self, path1):
owner = path1.stat().owner
group = path1.stat().group
x = path1.join("hello")
x.mksymlinkto("qlwkejqwlek")
try:
path1.chown(owner, group, rec=1)
finally:
x.remove(rec=0)
def test_chown_identity_rec_mayfail(self, path1):
owner = path1.stat().owner
group = path1.stat().group
path1.chown(owner, group)
| TestPOSIXLocalPath |
python | python-openxml__python-docx | tests/text/test_font.py | {
"start": 501,
"end": 15720
} | class ____:
"""Unit-test suite for `docx.text.font.Font`."""
def it_provides_access_to_its_color_object(self, ColorFormat_: Mock, color_: Mock):
r = cast(CT_R, element("w:r"))
font = Font(r)
color = font.color
ColorFormat_.assert_called_once_with(font.element)
assert color is color_
@pytest.mark.parametrize(
("r_cxml", "expected_value"),
[
("w:r", None),
("w:r/w:rPr", None),
("w:r/w:rPr/w:rFonts", None),
("w:r/w:rPr/w:rFonts{w:ascii=Arial}", "Arial"),
],
)
def it_knows_its_typeface_name(self, r_cxml: str, expected_value: str | None):
r = cast(CT_R, element(r_cxml))
font = Font(r)
assert font.name == expected_value
@pytest.mark.parametrize(
("r_cxml", "value", "expected_r_cxml"),
[
("w:r", "Foo", "w:r/w:rPr/w:rFonts{w:ascii=Foo,w:hAnsi=Foo}"),
("w:r/w:rPr", "Foo", "w:r/w:rPr/w:rFonts{w:ascii=Foo,w:hAnsi=Foo}"),
(
"w:r/w:rPr/w:rFonts{w:hAnsi=Foo}",
"Bar",
"w:r/w:rPr/w:rFonts{w:ascii=Bar,w:hAnsi=Bar}",
),
(
"w:r/w:rPr/w:rFonts{w:ascii=Foo,w:hAnsi=Foo}",
"Bar",
"w:r/w:rPr/w:rFonts{w:ascii=Bar,w:hAnsi=Bar}",
),
],
)
def it_can_change_its_typeface_name(self, r_cxml: str, value: str, expected_r_cxml: str):
r = cast(CT_R, element(r_cxml))
font = Font(r)
expected_xml = xml(expected_r_cxml)
font.name = value
assert font._element.xml == expected_xml
@pytest.mark.parametrize(
("r_cxml", "expected_value"),
[
("w:r", None),
("w:r/w:rPr", None),
("w:r/w:rPr/w:sz{w:val=28}", Pt(14)),
],
)
def it_knows_its_size(self, r_cxml: str, expected_value: Length | None):
r = cast(CT_R, element(r_cxml))
font = Font(r)
assert font.size == expected_value
@pytest.mark.parametrize(
("r_cxml", "value", "expected_r_cxml"),
[
("w:r", Pt(12), "w:r/w:rPr/w:sz{w:val=24}"),
("w:r/w:rPr", Pt(12), "w:r/w:rPr/w:sz{w:val=24}"),
("w:r/w:rPr/w:sz{w:val=24}", Pt(18), "w:r/w:rPr/w:sz{w:val=36}"),
("w:r/w:rPr/w:sz{w:val=36}", None, "w:r/w:rPr"),
],
)
def it_can_change_its_size(self, r_cxml: str, value: Length | None, expected_r_cxml: str):
r = cast(CT_R, element(r_cxml))
font = Font(r)
expected_xml = xml(expected_r_cxml)
font.size = value
assert font._element.xml == expected_xml
@pytest.mark.parametrize(
("r_cxml", "bool_prop_name", "expected_value"),
[
("w:r/w:rPr", "all_caps", None),
("w:r/w:rPr/w:caps", "all_caps", True),
("w:r/w:rPr/w:caps{w:val=on}", "all_caps", True),
("w:r/w:rPr/w:caps{w:val=off}", "all_caps", False),
("w:r/w:rPr/w:b{w:val=1}", "bold", True),
("w:r/w:rPr/w:i{w:val=0}", "italic", False),
("w:r/w:rPr/w:cs{w:val=true}", "complex_script", True),
("w:r/w:rPr/w:bCs{w:val=false}", "cs_bold", False),
("w:r/w:rPr/w:iCs{w:val=on}", "cs_italic", True),
("w:r/w:rPr/w:dstrike{w:val=off}", "double_strike", False),
("w:r/w:rPr/w:emboss{w:val=1}", "emboss", True),
("w:r/w:rPr/w:vanish{w:val=0}", "hidden", False),
("w:r/w:rPr/w:i{w:val=true}", "italic", True),
("w:r/w:rPr/w:imprint{w:val=false}", "imprint", False),
("w:r/w:rPr/w:oMath{w:val=on}", "math", True),
("w:r/w:rPr/w:noProof{w:val=off}", "no_proof", False),
("w:r/w:rPr/w:outline{w:val=1}", "outline", True),
("w:r/w:rPr/w:rtl{w:val=0}", "rtl", False),
("w:r/w:rPr/w:shadow{w:val=true}", "shadow", True),
("w:r/w:rPr/w:smallCaps{w:val=false}", "small_caps", False),
("w:r/w:rPr/w:snapToGrid{w:val=on}", "snap_to_grid", True),
("w:r/w:rPr/w:specVanish{w:val=off}", "spec_vanish", False),
("w:r/w:rPr/w:strike{w:val=1}", "strike", True),
("w:r/w:rPr/w:webHidden{w:val=0}", "web_hidden", False),
],
)
def it_knows_its_bool_prop_states(
self, r_cxml: str, bool_prop_name: str, expected_value: bool | None
):
r = cast(CT_R, element(r_cxml))
font = Font(r)
assert getattr(font, bool_prop_name) == expected_value
@pytest.mark.parametrize(
("r_cxml", "prop_name", "value", "expected_cxml"),
[
# nothing to True, False, and None ---------------------------
("w:r", "all_caps", True, "w:r/w:rPr/w:caps"),
("w:r", "bold", False, "w:r/w:rPr/w:b{w:val=0}"),
("w:r", "italic", None, "w:r/w:rPr"),
# default to True, False, and None ---------------------------
("w:r/w:rPr/w:cs", "complex_script", True, "w:r/w:rPr/w:cs"),
("w:r/w:rPr/w:bCs", "cs_bold", False, "w:r/w:rPr/w:bCs{w:val=0}"),
("w:r/w:rPr/w:iCs", "cs_italic", None, "w:r/w:rPr"),
# True to True, False, and None ------------------------------
(
"w:r/w:rPr/w:dstrike{w:val=1}",
"double_strike",
True,
"w:r/w:rPr/w:dstrike",
),
(
"w:r/w:rPr/w:emboss{w:val=on}",
"emboss",
False,
"w:r/w:rPr/w:emboss{w:val=0}",
),
("w:r/w:rPr/w:vanish{w:val=1}", "hidden", None, "w:r/w:rPr"),
# False to True, False, and None -----------------------------
("w:r/w:rPr/w:i{w:val=false}", "italic", True, "w:r/w:rPr/w:i"),
(
"w:r/w:rPr/w:imprint{w:val=0}",
"imprint",
False,
"w:r/w:rPr/w:imprint{w:val=0}",
),
("w:r/w:rPr/w:oMath{w:val=off}", "math", None, "w:r/w:rPr"),
# random mix -------------------------------------------------
(
"w:r/w:rPr/w:noProof{w:val=1}",
"no_proof",
False,
"w:r/w:rPr/w:noProof{w:val=0}",
),
("w:r/w:rPr", "outline", True, "w:r/w:rPr/w:outline"),
("w:r/w:rPr/w:rtl{w:val=true}", "rtl", False, "w:r/w:rPr/w:rtl{w:val=0}"),
("w:r/w:rPr/w:shadow{w:val=on}", "shadow", True, "w:r/w:rPr/w:shadow"),
(
"w:r/w:rPr/w:smallCaps",
"small_caps",
False,
"w:r/w:rPr/w:smallCaps{w:val=0}",
),
("w:r/w:rPr/w:snapToGrid", "snap_to_grid", True, "w:r/w:rPr/w:snapToGrid"),
("w:r/w:rPr/w:specVanish", "spec_vanish", None, "w:r/w:rPr"),
("w:r/w:rPr/w:strike{w:val=foo}", "strike", True, "w:r/w:rPr/w:strike"),
(
"w:r/w:rPr/w:webHidden",
"web_hidden",
False,
"w:r/w:rPr/w:webHidden{w:val=0}",
),
],
)
def it_can_change_its_bool_prop_settings(
self, r_cxml: str, prop_name: str, value: bool | None, expected_cxml: str
):
r = cast(CT_R, element(r_cxml))
font = Font(r)
expected_xml = xml(expected_cxml)
setattr(font, prop_name, value)
assert font._element.xml == expected_xml
@pytest.mark.parametrize(
("r_cxml", "expected_value"),
[
("w:r", None),
("w:r/w:rPr", None),
("w:r/w:rPr/w:vertAlign{w:val=baseline}", False),
("w:r/w:rPr/w:vertAlign{w:val=subscript}", True),
("w:r/w:rPr/w:vertAlign{w:val=superscript}", False),
],
)
def it_knows_whether_it_is_subscript(self, r_cxml: str, expected_value: bool | None):
r = cast(CT_R, element(r_cxml))
font = Font(r)
assert font.subscript == expected_value
@pytest.mark.parametrize(
("r_cxml", "value", "expected_r_cxml"),
[
("w:r", True, "w:r/w:rPr/w:vertAlign{w:val=subscript}"),
("w:r", False, "w:r/w:rPr"),
("w:r", None, "w:r/w:rPr"),
(
"w:r/w:rPr/w:vertAlign{w:val=subscript}",
True,
"w:r/w:rPr/w:vertAlign{w:val=subscript}",
),
("w:r/w:rPr/w:vertAlign{w:val=subscript}", False, "w:r/w:rPr"),
("w:r/w:rPr/w:vertAlign{w:val=subscript}", None, "w:r/w:rPr"),
(
"w:r/w:rPr/w:vertAlign{w:val=superscript}",
True,
"w:r/w:rPr/w:vertAlign{w:val=subscript}",
),
(
"w:r/w:rPr/w:vertAlign{w:val=superscript}",
False,
"w:r/w:rPr/w:vertAlign{w:val=superscript}",
),
("w:r/w:rPr/w:vertAlign{w:val=superscript}", None, "w:r/w:rPr"),
(
"w:r/w:rPr/w:vertAlign{w:val=baseline}",
True,
"w:r/w:rPr/w:vertAlign{w:val=subscript}",
),
],
)
def it_can_change_whether_it_is_subscript(
self, r_cxml: str, value: bool | None, expected_r_cxml: str
):
r = cast(CT_R, element(r_cxml))
font = Font(r)
expected_xml = xml(expected_r_cxml)
font.subscript = value
assert font._element.xml == expected_xml
@pytest.mark.parametrize(
("r_cxml", "expected_value"),
[
("w:r", None),
("w:r/w:rPr", None),
("w:r/w:rPr/w:vertAlign{w:val=baseline}", False),
("w:r/w:rPr/w:vertAlign{w:val=subscript}", False),
("w:r/w:rPr/w:vertAlign{w:val=superscript}", True),
],
)
def it_knows_whether_it_is_superscript(self, r_cxml: str, expected_value: bool | None):
r = cast(CT_R, element(r_cxml))
font = Font(r)
assert font.superscript == expected_value
@pytest.mark.parametrize(
("r_cxml", "value", "expected_r_cxml"),
[
("w:r", True, "w:r/w:rPr/w:vertAlign{w:val=superscript}"),
("w:r", False, "w:r/w:rPr"),
("w:r", None, "w:r/w:rPr"),
(
"w:r/w:rPr/w:vertAlign{w:val=superscript}",
True,
"w:r/w:rPr/w:vertAlign{w:val=superscript}",
),
("w:r/w:rPr/w:vertAlign{w:val=superscript}", False, "w:r/w:rPr"),
("w:r/w:rPr/w:vertAlign{w:val=superscript}", None, "w:r/w:rPr"),
(
"w:r/w:rPr/w:vertAlign{w:val=subscript}",
True,
"w:r/w:rPr/w:vertAlign{w:val=superscript}",
),
(
"w:r/w:rPr/w:vertAlign{w:val=subscript}",
False,
"w:r/w:rPr/w:vertAlign{w:val=subscript}",
),
("w:r/w:rPr/w:vertAlign{w:val=subscript}", None, "w:r/w:rPr"),
(
"w:r/w:rPr/w:vertAlign{w:val=baseline}",
True,
"w:r/w:rPr/w:vertAlign{w:val=superscript}",
),
],
)
def it_can_change_whether_it_is_superscript(
self, r_cxml: str, value: bool | None, expected_r_cxml: str
):
r = cast(CT_R, element(r_cxml))
font = Font(r)
expected_xml = xml(expected_r_cxml)
font.superscript = value
assert font._element.xml == expected_xml
@pytest.mark.parametrize(
("r_cxml", "expected_value"),
[
("w:r", None),
("w:r/w:rPr/w:u", None),
("w:r/w:rPr/w:u{w:val=single}", True),
("w:r/w:rPr/w:u{w:val=none}", False),
("w:r/w:rPr/w:u{w:val=double}", WD_UNDERLINE.DOUBLE),
("w:r/w:rPr/w:u{w:val=wave}", WD_UNDERLINE.WAVY),
],
)
def it_knows_its_underline_type(self, r_cxml: str, expected_value: WD_UNDERLINE | bool | None):
r = cast(CT_R, element(r_cxml))
font = Font(r)
assert font.underline is expected_value
@pytest.mark.parametrize(
("r_cxml", "value", "expected_r_cxml"),
[
("w:r", True, "w:r/w:rPr/w:u{w:val=single}"),
("w:r", False, "w:r/w:rPr/w:u{w:val=none}"),
("w:r", None, "w:r/w:rPr"),
("w:r", WD_UNDERLINE.SINGLE, "w:r/w:rPr/w:u{w:val=single}"),
("w:r", WD_UNDERLINE.THICK, "w:r/w:rPr/w:u{w:val=thick}"),
("w:r/w:rPr/w:u{w:val=single}", True, "w:r/w:rPr/w:u{w:val=single}"),
("w:r/w:rPr/w:u{w:val=single}", False, "w:r/w:rPr/w:u{w:val=none}"),
("w:r/w:rPr/w:u{w:val=single}", None, "w:r/w:rPr"),
(
"w:r/w:rPr/w:u{w:val=single}",
WD_UNDERLINE.SINGLE,
"w:r/w:rPr/w:u{w:val=single}",
),
(
"w:r/w:rPr/w:u{w:val=single}",
WD_UNDERLINE.DOTTED,
"w:r/w:rPr/w:u{w:val=dotted}",
),
],
)
def it_can_change_its_underline_type(
self, r_cxml: str, value: bool | None, expected_r_cxml: str
):
r = cast(CT_R, element(r_cxml))
font = Font(r)
expected_xml = xml(expected_r_cxml)
font.underline = value
assert font._element.xml == expected_xml
@pytest.mark.parametrize(
("r_cxml", "expected_value"),
[
("w:r", None),
("w:r/w:rPr", None),
("w:r/w:rPr/w:highlight{w:val=default}", WD_COLOR.AUTO),
("w:r/w:rPr/w:highlight{w:val=blue}", WD_COLOR.BLUE),
],
)
def it_knows_its_highlight_color(self, r_cxml: str, expected_value: WD_COLOR | None):
r = cast(CT_R, element(r_cxml))
font = Font(r)
assert font.highlight_color is expected_value
@pytest.mark.parametrize(
("r_cxml", "value", "expected_r_cxml"),
[
("w:r", WD_COLOR.AUTO, "w:r/w:rPr/w:highlight{w:val=default}"),
("w:r/w:rPr", WD_COLOR.BRIGHT_GREEN, "w:r/w:rPr/w:highlight{w:val=green}"),
(
"w:r/w:rPr/w:highlight{w:val=green}",
WD_COLOR.YELLOW,
"w:r/w:rPr/w:highlight{w:val=yellow}",
),
("w:r/w:rPr/w:highlight{w:val=yellow}", None, "w:r/w:rPr"),
("w:r/w:rPr", None, "w:r/w:rPr"),
("w:r", None, "w:r/w:rPr"),
],
)
def it_can_change_its_highlight_color(
self, r_cxml: str, value: WD_COLOR | None, expected_r_cxml: str
):
r = cast(CT_R, element(r_cxml))
font = Font(r)
expected_xml = xml(expected_r_cxml)
font.highlight_color = value
assert font._element.xml == expected_xml
# -- fixtures ----------------------------------------------------
@pytest.fixture
def color_(self, request: FixtureRequest):
return instance_mock(request, ColorFormat)
@pytest.fixture
def ColorFormat_(self, request: FixtureRequest, color_: Mock):
return class_mock(request, "docx.text.font.ColorFormat", return_value=color_)
| DescribeFont |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 338323,
"end": 361357
} | class ____:
def test_levy_cdf_ppf(self):
# Test levy.cdf, including small arguments.
x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
# Expected values were calculated separately with mpmath.
# E.g.
# >>> mpmath.mp.dps = 100
# >>> x = mpmath.mp.mpf('0.01')
# >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
expected = np.array([0.9747728793699604,
0.3173105078629141,
0.1572992070502851,
0.0015654022580025495,
1.523970604832105e-23,
1.795832784800726e-219])
y = stats.levy.cdf(x)
assert_allclose(y, expected, rtol=1e-10)
# ppf(expected) should get us back to x.
xx = stats.levy.ppf(expected)
assert_allclose(xx, x, rtol=1e-13)
def test_levy_sf(self):
# Large values, far into the tail of the distribution.
x = np.array([1e15, 1e25, 1e35, 1e50])
# Expected values were calculated with mpmath.
expected = np.array([2.5231325220201597e-08,
2.52313252202016e-13,
2.52313252202016e-18,
7.978845608028653e-26])
y = stats.levy.sf(x)
assert_allclose(y, expected, rtol=1e-14)
# The expected values for levy.isf(p) were calculated with mpmath.
# For loc=0 and scale=1, the inverse SF can be computed with
#
# import mpmath
#
# def levy_invsf(p):
# return 1/(2*mpmath.erfinv(p)**2)
#
# For example, with mpmath.mp.dps set to 60, float(levy_invsf(1e-20))
# returns 6.366197723675814e+39.
#
@pytest.mark.parametrize('p, expected_isf',
[(1e-20, 6.366197723675814e+39),
(1e-8, 6366197723675813.0),
(0.375, 4.185810119346273),
(0.875, 0.42489442055310134),
(0.999, 0.09235685880262713),
(0.9999999962747097, 0.028766845244146945)])
def test_levy_isf(self, p, expected_isf):
x = stats.levy.isf(p)
assert_allclose(x, expected_isf, atol=5e-15)
def test_levy_logcdf(self):
x = 1e50
ref = -7.978845608028653e-26
logcdf = stats.levy.logcdf(x)
assert_allclose(logcdf, ref, rtol=5e-15)
def test_levy_logsf(self):
x = 5e-3
ref = -2.0884875837625492e-45
logsf = stats.levy.logsf(x)
assert_allclose(logsf, ref, rtol=5e-15)
def test_540_567():
# test for nan returned in tickets 540, 567
assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),
0.98353464004309321,
decimal=10, err_msg='test_540_567')
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
def test_regression_ticket_1421():
assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
assert_('pmf(x,' in stats.poisson.__doc__)
def test_nan_arguments_gh_issue_1362():
with np.errstate(invalid='ignore'):
assert_(np.isnan(stats.t.logcdf(1, np.nan)))
assert_(np.isnan(stats.t.cdf(1, np.nan)))
assert_(np.isnan(stats.t.logsf(1, np.nan)))
assert_(np.isnan(stats.t.sf(1, np.nan)))
assert_(np.isnan(stats.t.pdf(1, np.nan)))
assert_(np.isnan(stats.t.logpdf(1, np.nan)))
assert_(np.isnan(stats.t.ppf(1, np.nan)))
assert_(np.isnan(stats.t.isf(1, np.nan)))
assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
def test_frozen_fit_ticket_1536():
rng = np.random.default_rng(5678)
true = np.array([0.25, 0., 0.5])
x = stats.lognorm.rvs(true[0], true[1], true[2], size=100, random_state=rng)
with np.errstate(divide='ignore'):
params = np.array(stats.lognorm.fit(x, floc=0.))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
assert_almost_equal(params, true, decimal=2)
rng = np.random.default_rng(5678)
loc = 1
floc = 0.9
x = stats.norm.rvs(loc, 2., size=100, random_state=rng)
params = np.array(stats.norm.fit(x, floc=floc))
expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
assert_almost_equal(params, expected, decimal=4)
def test_regression_ticket_1530():
# Check the starting value works for Cauchy distribution fit.
rng = np.random.default_rng(654321)
rvs = stats.cauchy.rvs(size=100, random_state=rng)
params = stats.cauchy.fit(rvs)
expected = (0.045, 1.142)
assert_almost_equal(params, expected, decimal=1)
def test_gh_pr_4806():
# Check starting values for Cauchy distribution fit.
rng = np.random.RandomState(1234)
x = rng.randn(42)
for offset in 10000.0, 1222333444.0:
loc, scale = stats.cauchy.fit(x + offset)
assert_allclose(loc, offset, atol=1.0)
assert_allclose(scale, 0.6, atol=1.0)
def test_poisson_logpmf_ticket_1436():
assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
def test_powerlaw_stats():
"""Test the powerlaw stats function.
This unit test is also a regression test for ticket 1548.
The exact values are:
mean:
mu = a / (a + 1)
variance:
sigma**2 = a / ((a + 2) * (a + 1) ** 2)
skewness:
One formula (see https://en.wikipedia.org/wiki/Skewness) is
gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
A short calculation shows that E[X**k] is a / (a + k), so gamma_1
can be implemented as
n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
d = sqrt(a/((a+2)*(a+1)**2)) ** 3
gamma_1 = n/d
Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
one gets the more concise formula:
gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
kurtosis: (See https://en.wikipedia.org/wiki/Kurtosis)
The excess kurtosis is
gamma_2 = mu_4 / sigma**4 - 3
A bit of calculus and algebra (sympy helps) shows that
mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
so
gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
which can be rearranged to
gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
"""
cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
(2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
for a, exact_mvsk in cases:
mvsk = stats.powerlaw.stats(a, moments="mvsk")
assert_array_almost_equal(mvsk, exact_mvsk)
def test_powerlaw_edge():
# Regression test for gh-3986.
p = stats.powerlaw.logpdf(0, 1)
assert_equal(p, 0.0)
def test_exponpow_edge():
# Regression test for gh-3982.
p = stats.exponpow.logpdf(0, 1)
assert_equal(p, 0.0)
# Check pdf and logpdf at x = 0 for other values of b.
p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 1.0, 0.0])
p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 0.0, -np.inf])
def test_gengamma_edge():
# Regression test for gh-3985.
p = stats.gengamma.pdf(0, 1, 1)
assert_equal(p, 1.0)
@pytest.mark.parametrize("a, c, ref, tol",
[(1500000.0, 1, 8.529426144018633, 1e-15),
(1e+30, 1, 35.95771492811536, 1e-15),
(1e+100, 1, 116.54819318290696, 1e-15),
(3e3, 1, 5.422011196659015, 1e-13),
(3e6, -1e100, -236.29663213396054, 1e-15),
(3e60, 1e-100, 1.3925371786831085e+102, 1e-15)])
def test_gengamma_extreme_entropy(a, c, ref, tol):
# The reference values were calculated with mpmath:
# from mpmath import mp
# mp.dps = 500
#
# def gen_entropy(a, c):
# a, c = mp.mpf(a), mp.mpf(c)
# val = mp.digamma(a)
# h = (a * (mp.one - val) + val/c + mp.loggamma(a) - mp.log(abs(c)))
# return float(h)
assert_allclose(stats.gengamma.entropy(a, c), ref, rtol=tol)
def test_gengamma_endpoint_with_neg_c():
p = stats.gengamma.pdf(0, 1, -1)
assert p == 0.0
logp = stats.gengamma.logpdf(0, 1, -1)
assert logp == -np.inf
def test_gengamma_munp():
# Regression tests for gh-4724.
p = stats.gengamma._munp(-2, 200, 1.)
assert_almost_equal(p, 1./199/198)
p = stats.gengamma._munp(-2, 10, 1.)
assert_almost_equal(p, 1./9/8)
def test_ksone_fit_freeze():
# Regression test for ticket #1638.
d = np.array(
[-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
-0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
-0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
-0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
-0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
-0.06037974, 0.37670779, -0.21684405])
with np.errstate(invalid='ignore'):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
"The maximum number of subdivisions .50. has been achieved.",
IntegrationWarning,
)
warnings.filterwarnings(
"ignore",
"floating point number truncated to an integer",
RuntimeWarning,
)
stats.ksone.fit(d)
def test_norm_logcdf():
# Test precision of the logcdf of the normal distribution.
# This precision was enhanced in ticket 1614.
x = -np.asarray(list(range(0, 120, 4)))
# Values from R
expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
-131.69539607, -203.91715537, -292.09872100, -396.25241451,
-516.38564863, -652.50322759, -804.60844201, -972.70364403,
-1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
-2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
-3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
-4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
-6277.63751711, -6733.67260303]
assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
# also test the complex-valued code path
assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8)
# test the accuracy: d(logcdf)/dx = pdf / cdf \equiv exp(logpdf - logcdf)
deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag
deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x))
assert_allclose(deriv, deriv_expected, atol=1e-10)
def test_levy_l_sf():
# Test levy_l.sf for small arguments.
x = np.array([-0.016, -0.01, -0.005, -0.0015])
# Expected values were calculated with mpmath.
expected = np.array([2.6644463892359302e-15,
1.523970604832107e-23,
2.0884875837625492e-45,
5.302850374626878e-147])
y = stats.levy_l.sf(x)
assert_allclose(y, expected, rtol=1e-13)
def test_levy_l_isf():
# Test roundtrip sf(isf(p)), including a small input value.
p = np.array([3.0e-15, 0.25, 0.99])
x = stats.levy_l.isf(p)
q = stats.levy_l.sf(x)
assert_allclose(q, p, rtol=5e-14)
def test_hypergeom_interval_1802():
# these two had endless loops
assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
(152.0, 197.0))
assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
(152.0, 197.0))
# this was working also before
assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
(153.0, 196.0))
# degenerate case .a == .b
assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
def test_distribution_too_many_args():
rng = np.random.default_rng(5976604568)
# Check that a TypeError is raised when too many args are given to a method
# Regression test for ticket 1815.
x = np.linspace(0.1, 0.7, num=5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5,
random_state=rng)
assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
# These should not give errors
stats.gamma.pdf(x, 2, 3) # loc=3
stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4
stats.gamma.stats(2., 3)
stats.gamma.stats(2., 3, 4)
stats.gamma.stats(2., 3, 4, 'mv')
stats.gamma.rvs(2., 3, 4, 5, random_state=rng)
stats.gamma.fit(stats.gamma.rvs(2., size=7, random_state=rng), 2.)
# Also for a discrete distribution
stats.geom.pmf(x, 2, loc=3) # no error, loc=3
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
# And for distributions with 0, 2 and 3 args respectively
assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale
def test_ncx2_tails_ticket_955():
# Trac #955 -- check that the cdf computed by special functions
# matches the integrated pdf
a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
assert_allclose(a, b, rtol=1e-3, atol=0)
def test_ncx2_tails_pdf():
# ncx2.pdf does not return nans in extreme tails(example from gh-1577)
# NB: this is to check that nan_to_num is not needed in ncx2.pdf
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0)
logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2)
assert_(np.isneginf(logval).all())
# Verify logpdf has extended precision when pdf underflows to 0
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
assert_equal(stats.ncx2.pdf(10000, 3, 12), 0)
assert_allclose(stats.ncx2.logpdf(10000, 3, 12), -4662.444377524883)
@pytest.mark.parametrize('method, expected', [
('cdf', np.array([2.497951336e-09, 3.437288941e-10])),
('pdf', np.array([1.238579980e-07, 1.710041145e-08])),
('logpdf', np.array([-15.90413011, -17.88416331])),
('ppf', np.array([4.865182052, 7.017182271]))
])
def test_ncx2_zero_nc(method, expected):
# gh-5441
# ncx2 with nc=0 is identical to chi2
# Comparison to R (v3.5.1)
# > options(digits=10)
# > pchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4), log=TRUE)
# > qchisq(0.1, df=10, ncp=c(0,4))
result = getattr(stats.ncx2, method)(0.1, nc=[0, 4], df=10)
assert_allclose(result, expected, atol=1e-15)
def test_ncx2_zero_nc_rvs():
# gh-5441
# ncx2 with nc=0 is identical to chi2
result = stats.ncx2.rvs(df=10, nc=0, random_state=1)
expected = stats.chi2.rvs(df=10, random_state=1)
assert_allclose(result, expected, atol=1e-15)
def test_ncx2_gh12731():
# test that gh-12731 is resolved; previously these were all 0.5
nc = 10**np.arange(5, 10)
assert_equal(stats.ncx2.cdf(1e4, df=1, nc=nc), 0)
def test_ncx2_gh8665():
# test that gh-8665 is resolved; previously this tended to nonzero value
x = np.array([4.99515382e+00, 1.07617327e+01, 2.31854502e+01,
4.99515382e+01, 1.07617327e+02, 2.31854502e+02,
4.99515382e+02, 1.07617327e+03, 2.31854502e+03,
4.99515382e+03, 1.07617327e+04, 2.31854502e+04,
4.99515382e+04])
nu, lam = 20, 499.51538166556196
sf = stats.ncx2.sf(x, df=nu, nc=lam)
# computed in R. Couldn't find a survival function implementation
# options(digits=16)
# x <- c(4.99515382e+00, 1.07617327e+01, 2.31854502e+01, 4.99515382e+01,
# 1.07617327e+02, 2.31854502e+02, 4.99515382e+02, 1.07617327e+03,
# 2.31854502e+03, 4.99515382e+03, 1.07617327e+04, 2.31854502e+04,
# 4.99515382e+04)
# nu <- 20
# lam <- 499.51538166556196
# 1 - pchisq(x, df = nu, ncp = lam)
sf_expected = [1.0000000000000000, 1.0000000000000000, 1.0000000000000000,
1.0000000000000000, 1.0000000000000000, 0.9999999999999888,
0.6646525582135460, 0.0000000000000000, 0.0000000000000000,
0.0000000000000000, 0.0000000000000000, 0.0000000000000000,
0.0000000000000000]
assert_allclose(sf, sf_expected, atol=1e-12)
def test_ncx2_gh11777():
# regression test for gh-11777:
# At high values of degrees of freedom df, ensure the pdf of ncx2 does
# not get clipped to zero when the non-centrality parameter is
# sufficiently less than df
df = 6700
nc = 5300
x = np.linspace(stats.ncx2.ppf(0.001, df, nc),
stats.ncx2.ppf(0.999, df, nc), num=10000)
ncx2_pdf = stats.ncx2.pdf(x, df, nc)
gauss_approx = stats.norm.pdf(x, df + nc, np.sqrt(2 * df + 4 * nc))
# use huge tolerance as we're only looking for obvious discrepancy
assert_allclose(ncx2_pdf, gauss_approx, atol=1e-4)
# Expected values for foldnorm.sf were computed with mpmath:
#
# from mpmath import mp
# mp.dps = 60
# def foldcauchy_sf(x, c):
# x = mp.mpf(x)
# c = mp.mpf(c)
# return mp.one - (mp.atan(x - c) + mp.atan(x + c))/mp.pi
#
# E.g.
#
# >>> float(foldcauchy_sf(2, 1))
# 0.35241638234956674
#
@pytest.mark.parametrize('x, c, expected',
[(2, 1, 0.35241638234956674),
(2, 2, 0.5779791303773694),
(1e13, 1, 6.366197723675813e-14),
(2e16, 1, 3.183098861837907e-17),
(1e13, 2e11, 6.368745221764519e-14),
(0.125, 200, 0.999998010612169)])
def test_foldcauchy_sf(x, c, expected):
sf = stats.foldcauchy.sf(x, c)
assert_allclose(sf, expected, 2e-15)
# The same mpmath code shown in the comments above test_foldcauchy_sf()
# is used to create these expected values.
@pytest.mark.parametrize('x, expected',
[(2, 0.2951672353008665),
(1e13, 6.366197723675813e-14),
(2e16, 3.183098861837907e-17),
(5e80, 1.2732395447351629e-81)])
def test_halfcauchy_sf(x, expected):
sf = stats.halfcauchy.sf(x)
assert_allclose(sf, expected, 2e-15)
# Expected value computed with mpmath:
# expected = mp.cot(mp.pi*p/2)
@pytest.mark.parametrize('p, expected',
[(0.9999995, 7.853981633329977e-07),
(0.975, 0.039290107007669675),
(0.5, 1.0),
(0.01, 63.65674116287158),
(1e-14, 63661977236758.13),
(5e-80, 1.2732395447351627e+79)])
def test_halfcauchy_isf(p, expected):
x = stats.halfcauchy.isf(p)
assert_allclose(x, expected)
def test_foldnorm_zero():
# Parameter value c=0 was not enabled, see gh-2399.
rv = stats.foldnorm(0, scale=1)
assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan
# Expected values for foldnorm.sf were computed with mpmath:
#
# from mpmath import mp
# mp.dps = 60
# def foldnorm_sf(x, c):
# x = mp.mpf(x)
# c = mp.mpf(c)
# return mp.ncdf(-x+c) + mp.ncdf(-x-c)
#
# E.g.
#
# >>> float(foldnorm_sf(2, 1))
# 0.16000515196308715
#
@pytest.mark.parametrize('x, c, expected',
[(2, 1, 0.16000515196308715),
(20, 1, 8.527223952630977e-81),
(10, 15, 0.9999997133484281),
(25, 15, 7.619853024160525e-24)])
def test_foldnorm_sf(x, c, expected):
sf = stats.foldnorm.sf(x, c)
assert_allclose(sf, expected, 1e-14)
def test_stats_shapes_argcheck():
# stats method was failing for vector shapes if some of the values
# were outside of the allowed range, see gh-2678
mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`
mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# -1 is not a legal shape parameter
mv3 = stats.lognorm.stats([2, 2.4, -1])
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
# stats method with multiple shape parameters is not properly vectorized
# anyway, so some distributions may or may not fail.
# Test subclassing distributions w/ explicit shapes
| TestLevy |
python | langchain-ai__langchain | libs/langchain/langchain_classic/smith/evaluation/string_run_evaluator.py | {
"start": 1749,
"end": 5801
} | class ____(StringRunMapper):
"""Extract items to evaluate from the run object."""
def serialize_chat_messages(self, messages: list[dict] | list[list[dict]]) -> str:
"""Extract the input messages from the run."""
if isinstance(messages, list) and messages:
if isinstance(messages[0], dict):
chat_messages = _get_messages_from_run_dict(
cast("list[dict]", messages)
)
elif isinstance(messages[0], list):
# Runs from Tracer have messages as a list of lists of dicts
chat_messages = _get_messages_from_run_dict(messages[0])
else:
msg = f"Could not extract messages to evaluate {messages}" # type: ignore[unreachable]
raise ValueError(msg)
return get_buffer_string(chat_messages)
msg = f"Could not extract messages to evaluate {messages}"
raise ValueError(msg)
def serialize_inputs(self, inputs: dict) -> str:
"""Serialize inputs.
Args:
inputs: The inputs from the run, expected to contain prompts or messages.
Returns:
The serialized input text from the prompts or messages.
Raises:
ValueError: If neither prompts nor messages are found in the inputs.
"""
if "prompts" in inputs: # Should we even accept this?
input_ = "\n\n".join(inputs["prompts"])
elif "prompt" in inputs:
input_ = inputs["prompt"]
elif "messages" in inputs:
input_ = self.serialize_chat_messages(inputs["messages"])
else:
msg = "LLM Run must have either messages or prompts as inputs."
raise ValueError(msg)
return input_
def serialize_outputs(self, outputs: dict) -> str:
"""Serialize outputs.
Args:
outputs: The outputs from the run, expected to contain generations.
Returns:
The serialized output text from the first generation.
Raises:
ValueError: If no generations are found in the outputs or if the generations
are empty.
"""
if not outputs.get("generations"):
msg = "Cannot evaluate LLM Run without generations."
raise ValueError(msg)
generations: list[dict] | list[list[dict]] = outputs["generations"]
if not generations:
msg = "Cannot evaluate LLM run with empty generations."
raise ValueError(msg)
first_generation: dict | list[dict] = generations[0]
if isinstance(first_generation, list):
# Runs from Tracer have generations as a list of lists of dicts
# Whereas Runs from the API have a list of dicts
first_generation = first_generation[0]
if "message" in first_generation:
output_ = self.serialize_chat_messages([first_generation["message"]])
else:
output_ = first_generation["text"]
return output_
def map(self, run: Run) -> dict[str, str]:
"""Maps the Run to a dictionary."""
if run.run_type != "llm":
msg = "LLM RunMapper only supports LLM runs."
raise ValueError(msg)
if not run.outputs:
if run.error:
msg = f"Cannot evaluate errored LLM run {run.id}: {run.error}"
raise ValueError(msg)
msg = f"Run {run.id} has no outputs. Cannot evaluate this run."
raise ValueError(msg)
try:
inputs = self.serialize_inputs(run.inputs)
except Exception as e:
msg = f"Could not parse LM input from run inputs {run.inputs}"
raise ValueError(msg) from e
try:
output_ = self.serialize_outputs(run.outputs)
except Exception as e:
msg = f"Could not parse LM prediction from run outputs {run.outputs}"
raise ValueError(msg) from e
return {"input": inputs, "prediction": output_}
| LLMStringRunMapper |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_symbol.py | {
"start": 671,
"end": 1018
} | class ____(Exception):
def __init__(self, symbol: Symbol, declaration: ASTDeclaration) -> None:
assert symbol
assert declaration
self.symbol = symbol
self.declaration = declaration
def __str__(self) -> str:
return 'Internal C++ duplicate symbol error:\n%s' % self.symbol.dump(0)
| _DuplicateSymbolError |
python | joke2k__faker | tests/providers/test_automotive.py | {
"start": 6670,
"end": 6840
} | class ____(_SimpleAutomotiveTestMixin):
"""Test it_IT automotive provider methods"""
license_plate_pattern: Pattern = re.compile(r"[A-Z]{2}\d{3}[A-Z]{2}")
| TestItIt |
python | doocs__leetcode | solution/1500-1599/1506.Find Root of N-Ary Tree/Solution.py | {
"start": 184,
"end": 451
} | class ____:
def findRoot(self, tree: List['Node']) -> 'Node':
x = 0
for node in tree:
x ^= node.val
for child in node.children:
x ^= child.val
return next(node for node in tree if node.val == x)
| Solution |
python | getsentry__sentry | src/sentry/rules/actions/sentry_apps/base.py | {
"start": 206,
"end": 633
} | class ____(EventAction, abc.ABC):
"""Abstract class to ensure that actions in SENTRY_APP_ACTIONS have all required methods"""
@property
@abc.abstractmethod
def actionType(self) -> str:
pass
@abc.abstractmethod
def get_custom_actions(self, project: Project) -> Sequence[Mapping[str, Any]]:
pass
@abc.abstractmethod
def self_validate(self) -> None:
pass
| SentryAppEventAction |
python | mlflow__mlflow | mlflow/store/artifact/sftp_artifact_repo.py | {
"start": 744,
"end": 1132
} | class ____:
def __init__(self, connections):
self._idle_connection_queue = Queue()
for c in connections:
self._idle_connection_queue.put(c)
@contextmanager
def get_sftp_connection(self):
c = self._idle_connection_queue.get(block=True)
try:
yield c
finally:
self._idle_connection_queue.put(c)
| _SftpPool |
python | pytorch__pytorch | test/test_dataloader.py | {
"start": 4430,
"end": 11417
} | class ____(TestCase):
def test_lengths_must_equal_dataset_size(self):
with self.assertRaises(ValueError):
random_split([1, 2, 3, 4], [1, 2])
def test_splits_have_correct_size(self):
splits = random_split([1, 2, 3, 4, 5, 6], [2, 4])
self.assertEqual(len(splits), 2)
self.assertEqual(len(splits[0]), 2)
self.assertEqual(len(splits[1]), 4)
splits = random_split([1, 2, 3, 4, 5, 6], [0.5, 0.5])
self.assertEqual(len(splits), 2)
self.assertEqual(len(splits[0]), 3)
self.assertEqual(len(splits[1]), 3)
# Odd size splits
self.assertEqual(
len(
random_split(
range(3), [0.5, 0.5], generator=torch.Generator().manual_seed(1)
)
),
2,
)
# Odd sized round-robin splits
splits = random_split(
range(106), [0.1, 0.2, 0.3, 0.4], generator=torch.Generator().manual_seed(1)
)
self.assertEqual(len(splits[0]), 11)
self.assertEqual(len(splits[1]), 22)
self.assertEqual(len(splits[2]), 31)
self.assertEqual(len(splits[3]), 42)
def test_splits_are_mutually_exclusive(self):
data = [5, 2, 3, 4, 1, 6]
splits = random_split(data, [2, 4])
all_values = []
all_values.extend(list(splits[0]))
all_values.extend(list(splits[1]))
data.sort()
all_values.sort()
self.assertListEqual(data, all_values)
splits = random_split(data, [0.33, 0.67])
all_values = []
all_values.extend(list(splits[0]))
all_values.extend(list(splits[1]))
data.sort()
all_values.sort()
self.assertListEqual(data, all_values)
data = [1, 2, 3, 4]
splits = random_split(data, [0.25, 0.75])
all_values = []
all_values.extend(list(splits[0]))
all_values.extend(list(splits[1]))
data.sort()
all_values.sort()
self.assertListEqual(data, all_values)
def test_splits_indexing_type(self):
r"""Indices generated by random_split
should be of integer type
"""
class CustomDataset:
def __init__(self, test_object, custom_list):
self.data = custom_list
self.test_object = test_object
def __getitem__(self, key):
self.test_object.assertEqual(type(key), int)
return self.data[key]
def __len__(self):
return len(self.data)
x = [1, 2, 3, 4, 5]
dataset = CustomDataset(self, x)
dataset = random_split(dataset, [5])[0]
data_loader = DataLoader(dataset)
for _batch in data_loader:
pass
# fractional splitting
dataset = CustomDataset(self, x)
dataset = random_split(dataset, [1.0])[0]
data_loader = DataLoader(dataset)
for _batch in data_loader:
pass
def test_splits_reproducibility(self):
self.assertEqual(
[
list(x)
for x in random_split(
range(10), [3, 7], generator=torch.Generator().manual_seed(1)
)
],
[[5, 6, 1], [2, 0, 8, 9, 3, 7, 4]],
)
self.assertEqual(
random_split(
range(100), [60, 40], generator=torch.Generator().manual_seed(42)
),
random_split(
range(100), [60, 40], generator=torch.Generator().manual_seed(42)
),
)
self.assertEqual(
random_split(
range(100), [0.5, 0.5], generator=torch.Generator().manual_seed(42)
),
random_split(
range(100), [0.5, 0.5], generator=torch.Generator().manual_seed(42)
),
)
self.assertEqual(
random_split(
range(100),
[0.33, 0.33, 0.34],
generator=torch.Generator().manual_seed(42),
),
random_split(
range(100),
[0.33, 0.33, 0.34],
generator=torch.Generator().manual_seed(42),
),
)
def test_incomplete_fractional_splits(self):
with self.assertRaises(ValueError):
# should raise since the sum of fractions is not 1
random_split([1, 2, 3, 4], [0.1])
with self.assertRaises(ValueError):
# should raise since fraction > 1
random_split([1, 2, 3, 4], [1.1])
def test_splits_generator(self):
# A random_split without a specific generator should affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5])
b = torch.rand(10)
self.assertNotEqual(a, b)
# A random_split with a specific generator should not affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5], generator=torch.Generator().manual_seed(42))
b = torch.rand(10)
self.assertEqual(a, b)
def test_slicing_of_subset_of_dataset(self):
# Testing slicing a subset initialized with a dataset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_dataset[:], dataset[:])
self.assertEqual(subset_of_dataset[1:2], dataset[1:2])
self.assertEqual(subset_of_dataset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset from random split
subset1, subset2 = random_split(dataset, [3, 2])
self.assertEqual(subset1[:], dataset[subset1.indices[:]])
self.assertEqual(subset1[0:2], dataset[subset1.indices[0:2]])
self.assertEqual(subset1[0:-1:2], dataset[subset1.indices[0:-1:2]])
def test_slicing_of_subset_of_subset(self):
# Testing slicing a subset initialized with a subset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
subset_of_subset = Subset(subset_of_dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_subset[:], dataset[:])
self.assertEqual(subset_of_subset[0:2], dataset[0:2])
self.assertEqual(subset_of_subset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset of subset from random split
subset1, subset2 = random_split(dataset, [4, 1])
subset_of_subset1, subset_of_subset2 = random_split(subset1, [3, 1])
idx = [subset1.indices[i] for i in subset_of_subset1.indices]
self.assertEqual(subset_of_subset1[:], dataset[idx.copy()])
self.assertEqual(subset_of_subset1[0:2], dataset[idx[0:2]])
self.assertEqual(subset_of_subset1[0:-1:2], dataset[idx[0:-1:2]])
| TestDatasetRandomSplit |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/library/Filters.py | {
"start": 6263,
"end": 6457
} | class ____(CtrlNode):
"""Returns the pointwise derivative of the input"""
nodeName = 'DerivativeFilter'
def processData(self, data):
return data[1:] - data[:-1]
| Derivative |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/callables.py | {
"start": 335,
"end": 637
} | class ____(Generic[EPInputType, EPOutputType]):
def __init__(self, tainted: str, benign: str) -> None:
self.tainted = tainted
self.benign = benign
async def async_run(self) -> None:
pass
async def async_call_tainted(self) -> None:
pass
| AbstractEventProcessor |
python | kamyu104__LeetCode-Solutions | Python/minimum-increments-to-equalize-leaf-paths.py | {
"start": 45,
"end": 1250
} | class ____(object):
def minIncrease(self, n, edges, cost):
"""
:type n: int
:type edges: List[List[int]]
:type cost: List[int]
:rtype: int
"""
def iter_dfs():
result = n-1
mx = [0]*len(adj)
stk = [(1, (0, -1))]
while stk:
step, (u, p) = stk.pop()
if step == 1:
stk.append((2, (u, p)))
for v in reversed(adj[u]):
if v != p:
stk.append((1, (v, u)))
elif step == 2:
cnt = 0
for v in adj[u]:
if v == p or mx[v] < mx[u]:
continue
if mx[v] > mx[u]:
mx[u] = mx[v]
cnt = 0
cnt += 1
result -= cnt
mx[u] += cost[u]
return result
adj = [[] for _ in xrange(n)]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
return iter_dfs()
# Time: O(n)
# Space: O(n)
# dfs
| Solution |
python | pandas-dev__pandas | pandas/tests/plotting/frame/test_frame.py | {
"start": 1235,
"end": 101328
} | class ____:
@pytest.mark.slow
def test_plot(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
)
_check_plot_works(df.plot, grid=False)
@pytest.mark.slow
def test_plot_subplots(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
)
# _check_plot_works adds an ax so use default_axes=True to avoid warning
axes = _check_plot_works(df.plot, default_axes=True, subplots=True)
_check_axes_shape(axes, axes_num=4, layout=(4, 1))
@pytest.mark.slow
def test_plot_subplots_negative_layout(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
)
axes = _check_plot_works(
df.plot,
default_axes=True,
subplots=True,
layout=(-1, 2),
)
_check_axes_shape(axes, axes_num=4, layout=(2, 2))
@pytest.mark.slow
def test_plot_subplots_use_index(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
)
axes = _check_plot_works(
df.plot,
default_axes=True,
subplots=True,
use_index=False,
)
_check_ticks_props(axes, xrot=0)
_check_axes_shape(axes, axes_num=4, layout=(4, 1))
@pytest.mark.xfail(reason="Api changed in 3.6.0")
@pytest.mark.slow
def test_plot_invalid_arg(self):
df = DataFrame({"x": [1, 2], "y": [3, 4]})
msg = "'Line2D' object has no property 'blarg'"
with pytest.raises(AttributeError, match=msg):
df.plot.line(blarg=True)
@pytest.mark.slow
def test_plot_tick_props(self):
df = DataFrame(
np.random.default_rng(2).random((10, 3)),
index=list(string.ascii_letters[:10]),
)
ax = _check_plot_works(df.plot, use_index=True)
_check_ticks_props(ax, xrot=0)
@pytest.mark.slow
@pytest.mark.parametrize(
"kwargs",
[
{"yticks": [1, 5, 10]},
{"xticks": [1, 5, 10]},
{"ylim": (-100, 100), "xlim": (-100, 100)},
{"default_axes": True, "subplots": True, "title": "blah"},
],
)
def test_plot_other_args(self, kwargs):
df = DataFrame(
np.random.default_rng(2).random((10, 3)),
index=list(string.ascii_letters[:10]),
)
_check_plot_works(df.plot, **kwargs)
@pytest.mark.slow
def test_plot_visible_ax(self):
df = DataFrame(
np.random.default_rng(2).random((10, 3)),
index=list(string.ascii_letters[:10]),
)
# We have to redo it here because _check_plot_works does two plots,
# once without an ax kwarg and once with an ax kwarg and the new sharex
# behaviour does not remove the visibility of the latter axis (as ax is
# present). see: https://github.com/pandas-dev/pandas/issues/9737
axes = df.plot(subplots=True, title="blah")
_check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:2]:
_check_visible(ax.xaxis) # xaxis must be visible for grid
_check_visible(ax.get_xticklabels(), visible=False)
_check_visible(ax.get_xticklabels(minor=True), visible=False)
_check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes[2]]:
_check_visible(ax.xaxis)
_check_visible(ax.get_xticklabels())
_check_visible([ax.xaxis.get_label()])
_check_ticks_props(ax, xrot=0)
@pytest.mark.slow
def test_plot_title(self):
df = DataFrame(
np.random.default_rng(2).random((10, 3)),
index=list(string.ascii_letters[:10]),
)
_check_plot_works(df.plot, title="blah")
@pytest.mark.slow
def test_plot_multiindex(self):
tuples = zip(string.ascii_letters[:10], range(10), strict=True)
df = DataFrame(
np.random.default_rng(2).random((10, 3)),
index=MultiIndex.from_tuples(tuples),
)
ax = _check_plot_works(df.plot, use_index=True)
_check_ticks_props(ax, xrot=0)
@pytest.mark.slow
def test_plot_multiindex_unicode(self):
# unicode
index = MultiIndex.from_tuples(
[
("\u03b1", 0),
("\u03b1", 1),
("\u03b2", 2),
("\u03b2", 3),
("\u03b3", 4),
("\u03b3", 5),
("\u03b4", 6),
("\u03b4", 7),
],
names=["i0", "i1"],
)
columns = MultiIndex.from_tuples(
[("bar", "\u0394"), ("bar", "\u0395")], names=["c0", "c1"]
)
df = DataFrame(
np.random.default_rng(2).integers(0, 10, (8, 2)),
columns=columns,
index=index,
)
_check_plot_works(df.plot, title="\u03a3")
@pytest.mark.slow
@pytest.mark.parametrize("layout", [None, (-1, 1)])
def test_plot_single_column_bar(self, layout):
# GH 6951
# Test with single column
df = DataFrame({"x": np.random.default_rng(2).random(10)})
axes = _check_plot_works(df.plot.bar, subplots=True, layout=layout)
_check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_plot_passed_ax(self):
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
df = DataFrame({"x": np.random.default_rng(2).random(10)})
_, ax = mpl.pyplot.subplots()
axes = df.plot.bar(subplots=True, ax=ax)
assert len(axes) == 1
result = ax.axes
assert result is axes[0]
@pytest.mark.parametrize(
"cols, x, y",
[
[list("ABCDE"), "A", "B"],
[["A", "B"], "A", "B"],
[["C", "A"], "C", "A"],
[["A", "C"], "A", "C"],
[["B", "C"], "B", "C"],
[["A", "D"], "A", "D"],
[["A", "E"], "A", "E"],
],
)
def test_nullable_int_plot(self, cols, x, y):
# GH 32073
dates = ["2008", "2009", None, "2011", "2012"]
df = DataFrame(
{
"A": [1, 2, 3, 4, 5],
"B": [1, 2, 3, 4, 5],
"C": np.array([7, 5, np.nan, 3, 2], dtype=object),
"D": pd.to_datetime(dates, format="%Y").view("i8"),
"E": pd.to_datetime(dates, format="%Y", utc=True).view("i8"),
}
)
_check_plot_works(df[cols].plot, x=x, y=y)
@pytest.mark.slow
@pytest.mark.parametrize("plot", ["line", "bar", "hist", "pie"])
def test_integer_array_plot_series(self, plot):
# GH 25587
arr = pd.array([1, 2, 3, 4], dtype="UInt32")
s = Series(arr)
_check_plot_works(getattr(s.plot, plot))
@pytest.mark.slow
@pytest.mark.parametrize(
"plot, kwargs",
[
["line", {}],
["bar", {}],
["hist", {}],
["pie", {"y": "y"}],
["scatter", {"x": "x", "y": "y"}],
["hexbin", {"x": "x", "y": "y"}],
],
)
def test_integer_array_plot_df(self, plot, kwargs):
# GH 25587
arr = pd.array([1, 2, 3, 4], dtype="UInt32")
df = DataFrame({"x": arr, "y": arr})
_check_plot_works(getattr(df.plot, plot), **kwargs)
def test_nonnumeric_exclude(self):
df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]})
ax = df.plot()
assert len(ax.get_lines()) == 1 # B was plotted
def test_implicit_label(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 3)), columns=["a", "b", "c"]
)
ax = df.plot(x="a", y="b")
_check_text_labels(ax.xaxis.get_label(), "a")
def test_donot_overwrite_index_name(self):
# GH 8494
df = DataFrame(
np.random.default_rng(2).standard_normal((2, 2)), columns=["a", "b"]
)
df.index.name = "NAME"
df.plot(y="b", label="LABEL")
assert df.index.name == "NAME"
def test_plot_xy(self):
# columns.inferred_type == 'string'
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=5, freq="B"),
)
_check_data(df.plot(x=0, y=1), df.set_index("A")["B"].plot())
_check_data(df.plot(x=0), df.set_index("A").plot())
_check_data(df.plot(y=0), df.B.plot())
_check_data(df.plot(x="A", y="B"), df.set_index("A").B.plot())
_check_data(df.plot(x="A"), df.set_index("A").plot())
_check_data(df.plot(y="B"), df.B.plot())
def test_plot_xy_int_cols(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=5, freq="B"),
)
# columns.inferred_type == 'integer'
df.columns = np.arange(1, len(df.columns) + 1)
_check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot())
_check_data(df.plot(x=1), df.set_index(1).plot())
_check_data(df.plot(y=1), df[1].plot())
def test_plot_xy_figsize_and_title(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=5, freq="B"),
)
# figsize and title
ax = df.plot(x=1, y=2, title="Test", figsize=(16, 8))
_check_text_labels(ax.title, "Test")
_check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16.0, 8.0))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@pytest.mark.parametrize(
"input_log, expected_log", [(True, "log"), ("sym", "symlog")]
)
def test_logscales(self, input_log, expected_log):
df = DataFrame({"a": np.arange(100)}, index=np.arange(100))
ax = df.plot(logy=input_log)
_check_ax_scales(ax, yaxis=expected_log)
assert ax.get_yscale() == expected_log
ax = df.plot(logx=input_log)
_check_ax_scales(ax, xaxis=expected_log)
assert ax.get_xscale() == expected_log
ax = df.plot(loglog=input_log)
_check_ax_scales(ax, xaxis=expected_log, yaxis=expected_log)
assert ax.get_xscale() == expected_log
assert ax.get_yscale() == expected_log
@pytest.mark.parametrize("input_param", ["logx", "logy", "loglog"])
def test_invalid_logscale(self, input_param):
# GH: 24867
df = DataFrame({"a": np.arange(100)}, index=np.arange(100))
msg = f"keyword '{input_param}' should be bool, None, or 'sym', not 'sm'"
with pytest.raises(ValueError, match=msg):
df.plot(**{input_param: "sm"})
msg = f"PiePlot ignores the '{input_param}' keyword"
with tm.assert_produces_warning(UserWarning, match=msg):
df.plot.pie(subplots=True, **{input_param: True})
def test_xcompat(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
)
ax = df.plot(x_compat=True)
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
_check_ticks_props(ax, xrot=30)
def test_xcompat_plot_params(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
)
plotting.plot_params["xaxis.compat"] = True
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
_check_ticks_props(ax, xrot=30)
def test_xcompat_plot_params_x_compat(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
)
plotting.plot_params["x_compat"] = False
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
msg = r"PeriodDtype\[B\] is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
def test_xcompat_plot_params_context_manager(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
)
# useful if you're plotting a bunch together
with plotting.plot_params.use("x_compat", True):
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
_check_ticks_props(ax, xrot=30)
def test_xcompat_plot_period(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
)
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
msg = r"PeriodDtype\[B\] is deprecated "
with tm.assert_produces_warning(FutureWarning, match=msg):
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
_check_ticks_props(ax, xrot=0)
def test_period_compat(self):
# GH 9012
# period-array conversions
df = DataFrame(
np.random.default_rng(2).random((21, 2)),
index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)),
columns=["a", "b"],
)
df.plot()
mpl.pyplot.axhline(y=0)
@pytest.mark.parametrize("index_dtype", [np.int64, np.float64])
def test_unsorted_index(self, index_dtype):
df = DataFrame(
{"y": np.arange(100)},
index=Index(np.arange(99, -1, -1), dtype=index_dtype),
dtype=np.int64,
)
ax = df.plot()
lines = ax.get_lines()[0]
rs = lines.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name="y")
tm.assert_series_equal(rs, df.y, check_index_type=False)
@pytest.mark.parametrize(
"df",
[
DataFrame({"y": [0.0, 1.0, 2.0, 3.0]}, index=[1.0, 0.0, 3.0, 2.0]),
DataFrame(
{"y": [0.0, 1.0, np.nan, 3.0, 4.0, 5.0, 6.0]},
index=[1.0, 0.0, 3.0, 2.0, np.nan, 3.0, 2.0],
),
],
)
def test_unsorted_index_lims(self, df):
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
def test_unsorted_index_lims_x_y(self):
df = DataFrame({"y": [0.0, 1.0, 2.0, 3.0], "z": [91.0, 90.0, 93.0, 92.0]})
ax = df.plot(x="z", y="y")
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
def test_negative_log(self):
df = -DataFrame(
np.random.default_rng(2).random((6, 4)),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
msg = "Log-y scales are not supported in area plot"
with pytest.raises(ValueError, match=msg):
df.plot.area(logy=True)
with pytest.raises(ValueError, match=msg):
df.plot.area(loglog=True)
def _compare_stacked_y_cood(self, normal_lines, stacked_lines):
base = np.zeros(len(normal_lines[0].get_data()[1]))
for nl, sl in zip(normal_lines, stacked_lines, strict=True):
base += nl.get_data()[1] # get y coordinates
sy = sl.get_data()[1]
tm.assert_numpy_array_equal(base, sy)
@pytest.mark.parametrize("kind", ["line", "area"])
@pytest.mark.parametrize("mult", [1, -1])
def test_line_area_stacked(self, kind, mult):
df = mult * DataFrame(
np.random.default_rng(2).random((6, 4)), columns=["w", "x", "y", "z"]
)
ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
@pytest.mark.parametrize("kind", ["line", "area"])
def test_line_area_stacked_sep_df(self, kind):
# each column has either positive or negative value
sep_df = DataFrame(
{
"w": np.random.default_rng(2).random(6),
"x": np.random.default_rng(2).random(6),
"y": -np.random.default_rng(2).random(6),
"z": -np.random.default_rng(2).random(6),
}
)
ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])
self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])
def test_line_area_stacked_mixed(self):
mixed_df = DataFrame(
np.random.default_rng(2).standard_normal((6, 4)),
index=list(string.ascii_letters[:6]),
columns=["w", "x", "y", "z"],
)
_check_plot_works(mixed_df.plot, stacked=False)
msg = (
"When stacked is True, each column must be either all positive or "
"all negative. Column 'w' contains both positive and negative "
"values"
)
with pytest.raises(ValueError, match=msg):
mixed_df.plot(stacked=True)
@pytest.mark.parametrize("kind", ["line", "area"])
def test_line_area_stacked_positive_idx(self, kind):
df = DataFrame(
np.random.default_rng(2).random((6, 4)), columns=["w", "x", "y", "z"]
)
# Use an index with strictly positive values, preventing
# matplotlib from warning about ignoring xlim
df2 = df.set_index(df.index + 1)
_check_plot_works(df2.plot, kind=kind, logx=True, stacked=True)
@pytest.mark.parametrize(
"idx", [range(4), date_range("2023-01-1", freq="D", periods=4)]
)
def test_line_area_nan_df(self, idx):
values1 = [1, 2, np.nan, 3]
values2 = [3, np.nan, 2, 1]
df = DataFrame({"a": values1, "b": values2}, index=idx)
ax = _check_plot_works(df.plot)
masked1 = ax.lines[0].get_ydata()
masked2 = ax.lines[1].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp)
exp = np.array([3, 2, 1], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp)
tm.assert_numpy_array_equal(masked1.mask, np.array([False, False, True, False]))
tm.assert_numpy_array_equal(masked2.mask, np.array([False, True, False, False]))
@pytest.mark.parametrize(
"idx", [range(4), date_range("2023-01-1", freq="D", periods=4)]
)
def test_line_area_nan_df_stacked(self, idx):
values1 = [1, 2, np.nan, 3]
values2 = [3, np.nan, 2, 1]
df = DataFrame({"a": values1, "b": values2}, index=idx)
expected1 = np.array([1, 2, 0, 3], dtype=np.float64)
expected2 = np.array([3, 0, 2, 1], dtype=np.float64)
ax = _check_plot_works(df.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
@pytest.mark.parametrize(
"idx", [range(4), date_range("2023-01-1", freq="D", periods=4)]
)
@pytest.mark.parametrize("kwargs", [{}, {"stacked": False}])
def test_line_area_nan_df_stacked_area(self, idx, kwargs):
values1 = [1, 2, np.nan, 3]
values2 = [3, np.nan, 2, 1]
df = DataFrame({"a": values1, "b": values2}, index=idx)
expected1 = np.array([1, 2, 0, 3], dtype=np.float64)
expected2 = np.array([3, 0, 2, 1], dtype=np.float64)
ax = _check_plot_works(df.plot.area, **kwargs)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
if kwargs:
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)
else:
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
ax = _check_plot_works(df.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)
@pytest.mark.parametrize("kwargs", [{}, {"secondary_y": True}])
def test_line_lim(self, kwargs):
df = DataFrame(np.random.default_rng(2).random((6, 3)), columns=["x", "y", "z"])
ax = df.plot(**kwargs)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
def test_line_lim_subplots(self):
df = DataFrame(np.random.default_rng(2).random((6, 3)), columns=["x", "y", "z"])
axes = df.plot(secondary_y=True, subplots=True)
_check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes:
assert hasattr(ax, "left_ax")
assert not hasattr(ax, "right_ax")
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
@pytest.mark.xfail(
strict=False,
reason="2020-12-01 this has been failing periodically on the "
"ymin==0 assertion for a week or so.",
)
@pytest.mark.parametrize("stacked", [True, False])
def test_area_lim(self, stacked):
df = DataFrame(
np.random.default_rng(2).random((6, 4)), columns=["x", "y", "z", "four"]
)
neg_df = -df
ax = _check_plot_works(df.plot.area, stacked=stacked)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
assert ymin == 0
ax = _check_plot_works(neg_df.plot.area, stacked=stacked)
ymin, ymax = ax.get_ylim()
assert ymax == 0
def test_area_sharey_dont_overwrite(self):
# GH37942
df = DataFrame(np.random.default_rng(2).random((4, 2)), columns=["x", "y"])
fig, (ax1, ax2) = mpl.pyplot.subplots(1, 2, sharey=True)
df.plot(ax=ax1, kind="area")
df.plot(ax=ax2, kind="area")
assert get_y_axis(ax1).joined(ax1, ax2)
assert get_y_axis(ax2).joined(ax1, ax2)
@pytest.mark.parametrize("stacked", [True, False])
def test_bar_linewidth(self, stacked):
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
ax = df.plot.bar(stacked=stacked, linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
def test_bar_linewidth_subplots(self):
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
# subplots
axes = df.plot.bar(linewidth=2, subplots=True)
_check_axes_shape(axes, axes_num=5, layout=(5, 1))
for ax in axes:
for r in ax.patches:
assert r.get_linewidth() == 2
@pytest.mark.parametrize(
"meth, dim", [("bar", "get_width"), ("barh", "get_height")]
)
@pytest.mark.parametrize("stacked", [True, False])
def test_bar_barwidth(self, meth, dim, stacked):
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
width = 0.9
ax = getattr(df.plot, meth)(stacked=stacked, width=width)
for r in ax.patches:
if not stacked:
assert getattr(r, dim)() == width / len(df.columns)
else:
assert getattr(r, dim)() == width
@pytest.mark.parametrize(
"meth, dim", [("bar", "get_width"), ("barh", "get_height")]
)
def test_barh_barwidth_subplots(self, meth, dim):
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
width = 0.9
axes = getattr(df.plot, meth)(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert getattr(r, dim)() == width
def test_bar_bottom_left_bottom(self):
df = DataFrame(np.random.default_rng(2).random((5, 5)))
ax = df.plot.bar(stacked=False, bottom=1)
result = [p.get_y() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5])
result = [p.get_y() for p in ax.patches[:5]]
assert result == [-1, -2, -3, -4, -5]
def test_bar_bottom_left_left(self):
df = DataFrame(np.random.default_rng(2).random((5, 5)))
ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1]))
result = [p.get_x() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5])
result = [p.get_x() for p in ax.patches[:5]]
assert result == [1, 2, 3, 4, 5]
def test_bar_bottom_left_subplots(self):
df = DataFrame(np.random.default_rng(2).random((5, 5)))
axes = df.plot.bar(subplots=True, bottom=-1)
for ax in axes:
result = [p.get_y() for p in ax.patches]
assert result == [-1] * 5
axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1]))
for ax in axes:
result = [p.get_x() for p in ax.patches]
assert result == [1] * 5
def test_bar_nan(self):
df = DataFrame({"A": [10, np.nan, 20], "B": [5, 10, 20], "C": [1, 2, 3]})
ax = df.plot.bar()
expected = [10, 0, 20, 5, 10, 20, 1, 2, 3]
result = [p.get_height() for p in ax.patches]
assert result == expected
def test_bar_nan_stacked(self):
df = DataFrame({"A": [10, np.nan, 20], "B": [5, 10, 20], "C": [1, 2, 3]})
ax = df.plot.bar(stacked=True)
expected = [10, 0, 20, 5, 10, 20, 1, 2, 3]
result = [p.get_height() for p in ax.patches]
assert result == expected
result = [p.get_y() for p in ax.patches]
expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]
assert result == expected
def test_bar_stacked_label_position_with_zero_height(self):
# GH 59429
df = DataFrame({"A": [3, 0, 1], "B": [0, 2, 4], "C": [5, 0, 2]})
ax = df.plot.bar(stacked=True)
ax.bar_label(ax.containers[-1])
expected = [8.0, 2.0, 7.0]
result = [text.xy[1] for text in ax.texts]
tm.assert_almost_equal(result, expected)
plt.close("all")
@pytest.mark.parametrize("idx", [Index, pd.CategoricalIndex])
def test_bar_categorical(self, idx):
# GH 13019
df = DataFrame(
np.random.default_rng(2).standard_normal((6, 5)),
index=idx(list("ABCDEF")),
columns=idx(list("abcde")),
)
ax = df.plot.bar()
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 5.15
ax = df.plot.bar(stacked=True)
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 4.75
@pytest.mark.parametrize("x, y", [("x", "y"), (1, 2)])
def test_plot_scatter(self, x, y):
df = DataFrame(
np.random.default_rng(2).standard_normal((6, 4)),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
_check_plot_works(df.plot.scatter, x=x, y=y)
def test_plot_scatter_error(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((6, 4)),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
msg = re.escape("scatter() missing 1 required positional argument: 'y'")
with pytest.raises(TypeError, match=msg):
df.plot.scatter(x="x")
msg = re.escape("scatter() missing 1 required positional argument: 'x'")
with pytest.raises(TypeError, match=msg):
df.plot.scatter(y="y")
def test_plot_scatter_shape(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((6, 4)),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
# GH 6951
axes = df.plot(x="x", y="y", kind="scatter", subplots=True)
_check_axes_shape(axes, axes_num=1, layout=(1, 1))
def test_scatter_on_datetime_time_data(self):
# datetime.time type is now supported in scatter, since a converter
# is implemented in ScatterPlot
df = DataFrame(np.random.default_rng(2).standard_normal(10), columns=["a"])
df["dtime"] = date_range(start="2014-01-01", freq="h", periods=10).time
df.plot(kind="scatter", x="dtime", y="a")
def test_scatter_line_xticks(self):
# GH#61005
df = DataFrame(
[(datetime(year=2025, month=1, day=1, hour=n), n) for n in range(3)],
columns=["datetime", "y"],
)
fig, ax = plt.subplots(2, sharex=True)
df.plot.scatter(x="datetime", y="y", ax=ax[0])
scatter_xticks = ax[0].get_xticks()
df.plot(x="datetime", y="y", ax=ax[1])
line_xticks = ax[1].get_xticks()
assert scatter_xticks[0] == line_xticks[0]
assert scatter_xticks[-1] == line_xticks[-1]
@pytest.mark.parametrize("x, y", [("dates", "vals"), (0, 1)])
def test_scatterplot_datetime_data(self, x, y):
# GH 30391
dates = date_range(start=date(2019, 1, 1), periods=12, freq="W")
vals = np.random.default_rng(2).normal(0, 1, len(dates))
df = DataFrame({"dates": dates, "vals": vals})
_check_plot_works(df.plot.scatter, x=x, y=y)
@pytest.mark.parametrize(
"infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
)
@pytest.mark.parametrize("x, y", [("a", "b"), (0, 1)])
@pytest.mark.parametrize("b_col", [[2, 3, 4], ["a", "b", "c"]])
def test_scatterplot_object_data(self, b_col, x, y, infer_string):
# GH 18755
with option_context("future.infer_string", infer_string):
df = DataFrame({"a": ["A", "B", "C"], "b": b_col})
_check_plot_works(df.plot.scatter, x=x, y=y)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize(
"categories",
(["setosa", "versicolor", "virginica"], ["versicolor", "virginica", "setosa"]),
)
def test_scatterplot_color_by_categorical(self, ordered, categories):
df = DataFrame(
[[5.1, 3.5], [4.9, 3.0], [7.0, 3.2], [6.4, 3.2], [5.9, 3.0]],
columns=["length", "width"],
)
df["species"] = pd.Categorical(
["setosa", "setosa", "virginica", "virginica", "versicolor"],
ordered=ordered,
categories=categories,
)
ax = df.plot.scatter(x=0, y=1, c="species")
(colorbar_collection,) = ax.collections
colorbar = colorbar_collection.colorbar
expected_ticks = np.array([0.5, 1.5, 2.5])
result_ticks = colorbar.get_ticks()
tm.assert_numpy_array_equal(result_ticks, expected_ticks)
expected_boundaries = np.array([0.0, 1.0, 2.0, 3.0])
result_boundaries = colorbar._boundaries
tm.assert_numpy_array_equal(result_boundaries, expected_boundaries)
expected_yticklabels = categories
result_yticklabels = [i.get_text() for i in colorbar.ax.get_ymajorticklabels()]
assert all(
i == j
for i, j in zip(result_yticklabels, expected_yticklabels, strict=True)
)
@pytest.mark.parametrize("x, y", [("x", "y"), ("y", "x"), ("y", "y")])
def test_plot_scatter_with_categorical_data(self, x, y):
# after fixing GH 18755, should be able to plot categorical data
df = DataFrame({"x": [1, 2, 3, 4], "y": pd.Categorical(["a", "b", "a", "c"])})
_check_plot_works(df.plot.scatter, x=x, y=y)
@pytest.mark.parametrize("x, y, c", [("x", "y", "z"), (0, 1, 2)])
def test_plot_scatter_with_c(self, x, y, c):
df = DataFrame(
np.random.default_rng(2).integers(low=0, high=100, size=(6, 4)),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
ax = df.plot.scatter(x=x, y=y, c=c)
# default to Greys
assert ax.collections[0].cmap.name == "Greys"
assert ax.collections[0].colorbar.ax.get_ylabel() == "z"
def test_plot_scatter_with_c_props(self):
df = DataFrame(
np.random.default_rng(2).integers(low=0, high=100, size=(6, 4)),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
cm = "cubehelix"
ax = df.plot.scatter(x="x", y="y", c="z", colormap=cm)
assert ax.collections[0].cmap.name == cm
# verify turning off colorbar works
ax = df.plot.scatter(x="x", y="y", c="z", colorbar=False)
assert ax.collections[0].colorbar is None
# verify that we can still plot a solid color
ax = df.plot.scatter(x=0, y=1, c="red")
assert ax.collections[0].colorbar is None
_check_colors(ax.collections, facecolors=["r"])
def test_plot_scatter_with_c_array(self):
# Ensure that we can pass an np.array straight through to matplotlib,
# this functionality was accidentally removed previously.
# See https://github.com/pandas-dev/pandas/issues/8852 for bug report
#
# Exercise colormap path and non-colormap path as they are independent
#
df = DataFrame({"A": [1, 2], "B": [3, 4]})
red_rgba = [1.0, 0.0, 0.0, 1.0]
green_rgba = [0.0, 1.0, 0.0, 1.0]
rgba_array = np.array([red_rgba, green_rgba])
ax = df.plot.scatter(x="A", y="B", c=rgba_array)
# expect the face colors of the points in the non-colormap path to be
# identical to the values we supplied, normally we'd be on shaky ground
# comparing floats for equality but here we expect them to be
# identical.
tm.assert_numpy_array_equal(ax.collections[0].get_facecolor(), rgba_array)
# we don't test the colors of the faces in this next plot because they
# are dependent on the spring colormap, which may change its colors
# later.
float_array = np.array([0.0, 1.0])
df.plot.scatter(x="A", y="B", c=float_array, cmap="spring")
def test_plot_scatter_with_s(self):
# this refers to GH 32904
df = DataFrame(
np.random.default_rng(2).random((10, 3)) * 100, columns=["a", "b", "c"]
)
ax = df.plot.scatter(x="a", y="b", s="c")
tm.assert_numpy_array_equal(df["c"].values, right=ax.collections[0].get_sizes())
def test_plot_scatter_with_norm(self):
# added while fixing GH 45809
df = DataFrame(
np.random.default_rng(2).random((10, 3)) * 100, columns=["a", "b", "c"]
)
norm = mpl.colors.LogNorm()
ax = df.plot.scatter(x="a", y="b", c="c", norm=norm)
assert ax.collections[0].norm is norm
def test_plot_scatter_without_norm(self):
# added while fixing GH 45809
df = DataFrame(
np.random.default_rng(2).random((10, 3)) * 100, columns=["a", "b", "c"]
)
ax = df.plot.scatter(x="a", y="b", c="c")
plot_norm = ax.collections[0].norm
color_min_max = (df.c.min(), df.c.max())
default_norm = mpl.colors.Normalize(*color_min_max)
for value in df.c:
assert plot_norm(value) == default_norm(value)
@pytest.mark.slow
@pytest.mark.parametrize(
"kwargs",
[
{},
{"legend": False},
{"default_axes": True, "subplots": True},
{"stacked": True},
],
)
def test_plot_bar(self, kwargs):
df = DataFrame(
np.random.default_rng(2).standard_normal((6, 4)),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
_check_plot_works(df.plot.bar, **kwargs)
@pytest.mark.slow
def test_plot_bar_int_col(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 15)),
index=list(string.ascii_letters[:10]),
columns=range(15),
)
_check_plot_works(df.plot.bar)
@pytest.mark.slow
def test_plot_bar_ticks(self):
df = DataFrame({"a": [0, 1], "b": [1, 0]})
ax = _check_plot_works(df.plot.bar)
_check_ticks_props(ax, xrot=90)
ax = df.plot.bar(rot=35, fontsize=10)
_check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)
@pytest.mark.slow
def test_plot_barh_ticks(self):
df = DataFrame({"a": [0, 1], "b": [1, 0]})
ax = _check_plot_works(df.plot.barh)
_check_ticks_props(ax, yrot=0)
ax = df.plot.barh(rot=55, fontsize=11)
_check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
def test_boxplot(self, hist_df):
df = hist_df
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
ax = _check_plot_works(df.plot.box)
_check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(
ax.xaxis.get_ticklocs(), np.arange(1, len(numeric_cols) + 1)
)
assert len(ax.lines) == 7 * len(numeric_cols)
def test_boxplot_series(self, hist_df):
df = hist_df
series = df["height"]
axes = series.plot.box(rot=40)
_check_ticks_props(axes, xrot=40, yrot=0)
_check_plot_works(series.plot.box)
def test_boxplot_series_positions(self, hist_df):
df = hist_df
positions = np.array([1, 6, 7])
ax = df.plot.box(positions=positions)
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
_check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)
assert len(ax.lines) == 7 * len(numeric_cols)
@pytest.mark.filterwarnings("ignore:set_ticklabels:UserWarning")
@pytest.mark.xfail(
Version(mpl.__version__) >= Version("3.10"),
reason="Fails starting with matplotlib 3.10",
)
def test_boxplot_vertical(self, hist_df):
df = hist_df
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
# if horizontal, yticklabels are rotated
kwargs = (
{"vert": False}
if Version(mpl.__version__) < Version("3.10")
else {"orientation": "horizontal"}
)
ax = df.plot.box(rot=50, fontsize=8, **kwargs)
_check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)
_check_text_labels(ax.get_yticklabels(), labels)
assert len(ax.lines) == 7 * len(numeric_cols)
@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.xfail(
Version(mpl.__version__) >= Version("3.10"),
reason="Fails starting with matplotlib version 3.10",
)
def test_boxplot_vertical_subplots(self, hist_df):
df = hist_df
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
kwargs = (
{"vert": False}
if Version(mpl.__version__) < Version("3.10")
else {"orientation": "horizontal"}
)
axes = _check_plot_works(
df.plot.box, default_axes=True, subplots=True, logx=True, **kwargs
)
_check_axes_shape(axes, axes_num=3, layout=(1, 3))
_check_ax_scales(axes, xaxis="log")
for ax, label in zip(axes, labels, strict=True):
_check_text_labels(ax.get_yticklabels(), [label])
assert len(ax.lines) == 7
@pytest.mark.filterwarnings("ignore:set_ticklabels:UserWarning")
@pytest.mark.xfail(
Version(mpl.__version__) >= Version("3.10"),
reason="Fails starting with matplotlib 3.10",
)
def test_boxplot_vertical_positions(self, hist_df):
df = hist_df
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
positions = np.array([3, 2, 8])
kwargs = (
{"vert": False}
if Version(mpl.__version__) < Version("3.10")
else {"orientation": "horizontal"}
)
ax = df.plot.box(positions=positions, **kwargs)
_check_text_labels(ax.get_yticklabels(), labels)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)
assert len(ax.lines) == 7 * len(numeric_cols)
def test_boxplot_return_type_invalid(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((6, 4)),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
msg = "return_type must be {None, 'axes', 'dict', 'both'}"
with pytest.raises(ValueError, match=msg):
df.plot.box(return_type="not_a_type")
@pytest.mark.parametrize("return_type", ["dict", "axes", "both"])
def test_boxplot_return_type_invalid_type(self, return_type):
df = DataFrame(
np.random.default_rng(2).standard_normal((6, 4)),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
result = df.plot.box(return_type=return_type)
_check_box_return_type(result, return_type)
def test_kde_df(self):
pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
ax = _check_plot_works(df.plot, kind="kde")
expected = [pprint_thing(c) for c in df.columns]
_check_legend_labels(ax, labels=expected)
_check_ticks_props(ax, xrot=0)
def test_kde_df_rot(self):
pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
ax = df.plot(kind="kde", rot=20, fontsize=5)
_check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
def test_kde_df_subplots(self):
pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
axes = _check_plot_works(
df.plot,
default_axes=True,
kind="kde",
subplots=True,
)
_check_axes_shape(axes, axes_num=4, layout=(4, 1))
def test_kde_df_logy(self):
pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
axes = df.plot(kind="kde", logy=True, subplots=True)
_check_ax_scales(axes, yaxis="log")
def test_kde_missing_vals(self):
pytest.importorskip("scipy")
df = DataFrame(np.random.default_rng(2).uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
_check_plot_works(df.plot, kind="kde")
def test_hist_df(self):
df = DataFrame(np.random.default_rng(2).standard_normal((100, 4)))
ax = _check_plot_works(df.plot.hist)
expected = [pprint_thing(c) for c in df.columns]
_check_legend_labels(ax, labels=expected)
axes = _check_plot_works(
df.plot.hist,
default_axes=True,
subplots=True,
logy=True,
)
_check_axes_shape(axes, axes_num=4, layout=(4, 1))
_check_ax_scales(axes, yaxis="log")
def test_hist_df_series(self):
series = Series(np.random.default_rng(2).random(10))
axes = series.plot.hist(rot=40)
_check_ticks_props(axes, xrot=40, yrot=0)
def test_hist_df_series_cumulative_density(self):
series = Series(np.random.default_rng(2).random(10))
ax = series.plot.hist(cumulative=True, bins=4, density=True)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, mpl.patches.Rectangle)]
tm.assert_almost_equal(rects[-1].get_height(), 1.0)
def test_hist_df_series_cumulative(self):
series = Series(np.random.default_rng(2).random(10))
ax = series.plot.hist(cumulative=True, bins=4)
rects = [x for x in ax.get_children() if isinstance(x, mpl.patches.Rectangle)]
tm.assert_almost_equal(rects[-2].get_height(), 10.0)
def test_hist_df_orientation(self):
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
# if horizontal, yticklabels are rotated
axes = df.plot.hist(rot=50, fontsize=8, orientation="horizontal")
_check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)
@pytest.mark.parametrize("weight_shape", [(100,), (100, 2)])
def test_hist_weights(self, weight_shape):
# GH 33173
weights = 0.1 * np.ones(shape=weight_shape)
df = DataFrame(
dict(
zip(
["A", "B"],
np.random.default_rng(2).standard_normal((2, 100)),
strict=True,
)
)
)
ax1 = _check_plot_works(df.plot, kind="hist", weights=weights)
ax2 = _check_plot_works(df.plot, kind="hist")
patch_height_with_weights = [patch.get_height() for patch in ax1.patches]
# original heights with no weights, and we manually multiply with example
# weights, so after multiplication, they should be almost same
expected_patch_height = [0.1 * patch.get_height() for patch in ax2.patches]
tm.assert_almost_equal(patch_height_with_weights, expected_patch_height)
def _check_box_coord(
self,
patches,
expected_y=None,
expected_h=None,
expected_x=None,
expected_w=None,
):
result_y = np.array([p.get_y() for p in patches])
result_height = np.array([p.get_height() for p in patches])
result_x = np.array([p.get_x() for p in patches])
result_width = np.array([p.get_width() for p in patches])
# dtype is depending on above values, no need to check
if expected_y is not None:
tm.assert_numpy_array_equal(result_y, expected_y, check_dtype=False)
if expected_h is not None:
tm.assert_numpy_array_equal(result_height, expected_h, check_dtype=False)
if expected_x is not None:
tm.assert_numpy_array_equal(result_x, expected_x, check_dtype=False)
if expected_w is not None:
tm.assert_numpy_array_equal(result_width, expected_w, check_dtype=False)
@pytest.mark.parametrize(
"data",
[
{
"A": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([10, 9, 8, 7, 6])),
"B": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([8, 8, 8, 8, 8])),
"C": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([6, 7, 8, 9, 10])),
},
{
"A": np.repeat(
np.array([np.nan, 1, 2, 3, 4, 5]), np.array([3, 10, 9, 8, 7, 6])
),
"B": np.repeat(
np.array([1, np.nan, 2, 3, 4, 5]), np.array([8, 3, 8, 8, 8, 8])
),
"C": np.repeat(
np.array([1, 2, 3, np.nan, 4, 5]), np.array([6, 7, 8, 3, 9, 10])
),
},
],
)
def test_hist_df_coord(self, data):
df = DataFrame(data)
ax = df.plot.hist(bins=5)
self._check_box_coord(
ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
ax.patches[5:10],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
ax.patches[10:],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]),
)
ax = df.plot.hist(bins=5, stacked=True)
self._check_box_coord(
ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
ax.patches[5:10],
expected_y=np.array([10, 9, 8, 7, 6]),
expected_h=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
ax.patches[10:],
expected_y=np.array([18, 17, 16, 15, 14]),
expected_h=np.array([6, 7, 8, 9, 10]),
)
axes = df.plot.hist(bins=5, stacked=True, subplots=True)
self._check_box_coord(
axes[0].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
axes[1].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
axes[2].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]),
)
# horizontal
ax = df.plot.hist(bins=5, orientation="horizontal")
self._check_box_coord(
ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
ax.patches[5:10],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
ax.patches[10:],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]),
)
ax = df.plot.hist(bins=5, stacked=True, orientation="horizontal")
self._check_box_coord(
ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
ax.patches[5:10],
expected_x=np.array([10, 9, 8, 7, 6]),
expected_w=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
ax.patches[10:],
expected_x=np.array([18, 17, 16, 15, 14]),
expected_w=np.array([6, 7, 8, 9, 10]),
)
axes = df.plot.hist(
bins=5, stacked=True, subplots=True, orientation="horizontal"
)
self._check_box_coord(
axes[0].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
axes[1].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
axes[2].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]),
)
def test_plot_int_columns(self):
df = DataFrame(np.random.default_rng(2).standard_normal((100, 4))).cumsum()
_check_plot_works(df.plot, legend=True)
@pytest.mark.parametrize(
"markers",
[
{0: "^", 1: "+", 2: "o"},
{0: "^", 1: "+"},
["^", "+", "o"],
["^", "+"],
],
)
def test_style_by_column(self, markers):
fig = plt.gcf()
fig.clf()
fig.add_subplot(111)
df = DataFrame(np.random.default_rng(2).standard_normal((10, 3)))
ax = df.plot(style=markers)
for idx, line in enumerate(ax.get_lines()[: len(markers)]):
assert line.get_marker() == markers[idx]
def test_line_label_none(self):
s = Series([1, 2])
ax = s.plot()
assert ax.get_legend() is None
ax = s.plot(legend=True)
assert ax.get_legend().get_texts()[0].get_text() == ""
@pytest.mark.parametrize(
"props, expected",
[
("boxprops", "boxes"),
("whiskerprops", "whiskers"),
("capprops", "caps"),
("medianprops", "medians"),
],
)
def test_specified_props_kwd_plot_box(self, props, expected):
# GH 30346
df = DataFrame({k: np.random.default_rng(2).random(100) for k in "ABC"})
kwd = {props: {"color": "C1"}}
result = df.plot.box(return_type="dict", **kwd)
assert result[expected][0].get_color() == "C1"
def test_unordered_ts(self):
# GH#2609, GH#55906
index = [date(2012, 10, 1), date(2012, 9, 1), date(2012, 8, 1)]
values = [3.0, 2.0, 1.0]
df = DataFrame(
np.array(values),
index=index,
columns=["test"],
)
ax = df.plot()
xticks = ax.lines[0].get_xdata()
tm.assert_numpy_array_equal(xticks, np.array(index, dtype=object))
ydata = ax.lines[0].get_ydata()
tm.assert_numpy_array_equal(ydata, np.array(values))
# even though we don't sort the data before passing it to matplotlib,
# the ticks are sorted
xticks = ax.xaxis.get_ticklabels()
xlocs = [x.get_position()[0] for x in xticks]
assert Index(xlocs).is_monotonic_increasing
xlabels = [x.get_text() for x in xticks]
assert pd.to_datetime(xlabels, format="%Y-%m-%d").is_monotonic_increasing
@pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds)
def test_kind_both_ways(self, kind):
pytest.importorskip("scipy")
df = DataFrame({"x": [1, 2, 3]})
df.plot(kind=kind)
getattr(df.plot, kind)()
@pytest.mark.parametrize("kind", ["scatter", "hexbin"])
def test_kind_both_ways_x_y(self, kind):
pytest.importorskip("scipy")
df = DataFrame({"x": [1, 2, 3]})
df.plot("x", "x", kind=kind)
getattr(df.plot, kind)("x", "x")
@pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds)
def test_all_invalid_plot_data(self, kind):
df = DataFrame(list("abcd"))
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
df.plot(kind=kind)
@pytest.mark.parametrize(
"kind", list(plotting.PlotAccessor._common_kinds) + ["area"]
)
def test_partially_invalid_plot_data_numeric(self, kind):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 2)),
dtype=object,
)
df[np.random.default_rng(2).random(df.shape[0]) > 0.5] = "a"
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
df.plot(kind=kind)
def test_invalid_kind(self):
df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)))
msg = "invalid_plot_kind is not a valid plot kind"
with pytest.raises(ValueError, match=msg):
df.plot(kind="invalid_plot_kind")
@pytest.mark.parametrize(
"x,y,lbl",
[
(["B", "C"], "A", "a"),
(["A"], ["B", "C"], ["b", "c"]),
],
)
def test_invalid_xy_args(self, x, y, lbl):
# GH 18671, 19699 allows y to be list-like but not x
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
with pytest.raises(ValueError, match="x must be a label or position"):
df.plot(x=x, y=y, label=lbl)
def test_bad_label(self):
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
msg = "label should be list-like and same length as y"
with pytest.raises(ValueError, match=msg):
df.plot(x="A", y=["B", "C"], label="bad_label")
@pytest.mark.parametrize("x,y", [("A", "B"), (["A"], "B")])
def test_invalid_xy_args_dup_cols(self, x, y):
# GH 18671, 19699 allows y to be list-like but not x
df = DataFrame([[1, 3, 5], [2, 4, 6]], columns=list("AAB"))
with pytest.raises(ValueError, match="x must be a label or position"):
df.plot(x=x, y=y)
@pytest.mark.parametrize(
"x,y,lbl,colors",
[
("A", ["B"], ["b"], ["red"]),
("A", ["B", "C"], ["b", "c"], ["red", "blue"]),
(0, [1, 2], ["bokeh", "cython"], ["green", "yellow"]),
],
)
def test_y_listlike(self, x, y, lbl, colors):
# GH 19699: tests list-like y and verifies lbls & colors
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
_check_plot_works(df.plot, x="A", y=y, label=lbl)
ax = df.plot(x=x, y=y, label=lbl, color=colors)
assert len(ax.lines) == len(y)
_check_colors(ax.get_lines(), linecolors=colors)
@pytest.mark.parametrize("x,y,colnames", [(0, 1, ["A", "B"]), (1, 0, [0, 1])])
def test_xy_args_integer(self, x, y, colnames):
# GH 20056: tests integer args for xy and checks col names
df = DataFrame({"A": [1, 2], "B": [3, 4]})
df.columns = colnames
_check_plot_works(df.plot, x=x, y=y)
def test_hexbin_basic(self):
df = DataFrame(
{
"A": np.random.default_rng(2).uniform(size=20),
"B": np.random.default_rng(2).uniform(size=20),
"C": np.arange(20) + np.random.default_rng(2).uniform(size=20),
}
)
ax = df.plot.hexbin(x="A", y="B", gridsize=10)
# TODO: need better way to test. This just does existence.
assert len(ax.collections) == 1
def test_hexbin_basic_subplots(self):
df = DataFrame(
{
"A": np.random.default_rng(2).uniform(size=20),
"B": np.random.default_rng(2).uniform(size=20),
"C": np.arange(20) + np.random.default_rng(2).uniform(size=20),
}
)
# GH 6951
axes = df.plot.hexbin(x="A", y="B", subplots=True)
# hexbin should have 2 axes in the figure, 1 for plotting and another
# is colorbar
assert len(axes[0].figure.axes) == 2
# return value is single axes
_check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.parametrize("reduce_C", [None, np.std])
def test_hexbin_with_c(self, reduce_C):
df = DataFrame(
{
"A": np.random.default_rng(2).uniform(size=20),
"B": np.random.default_rng(2).uniform(size=20),
"C": np.arange(20) + np.random.default_rng(2).uniform(size=20),
}
)
ax = df.plot.hexbin(x="A", y="B", C="C", reduce_C_function=reduce_C)
assert len(ax.collections) == 1
@pytest.mark.parametrize(
"kwargs, expected",
[
({}, "BuGn"), # default cmap
({"colormap": "cubehelix"}, "cubehelix"),
({"cmap": "YlGn"}, "YlGn"),
],
)
def test_hexbin_cmap(self, kwargs, expected):
df = DataFrame(
{
"A": np.random.default_rng(2).uniform(size=20),
"B": np.random.default_rng(2).uniform(size=20),
"C": np.arange(20) + np.random.default_rng(2).uniform(size=20),
}
)
ax = df.plot.hexbin(x="A", y="B", **kwargs)
assert ax.collections[0].cmap.name == expected
def test_pie_df_err(self):
df = DataFrame(
np.random.default_rng(2).random((5, 3)),
columns=["X", "Y", "Z"],
index=["a", "b", "c", "d", "e"],
)
msg = "pie requires either y column or 'subplots=True'"
with pytest.raises(ValueError, match=msg):
df.plot.pie()
@pytest.mark.parametrize("y", ["Y", 2])
def test_pie_df(self, y):
df = DataFrame(
np.random.default_rng(2).random((5, 3)),
columns=["X", "Y", "Z"],
index=["a", "b", "c", "d", "e"],
)
ax = _check_plot_works(df.plot.pie, y=y)
_check_text_labels(ax.texts, df.index)
def test_pie_df_subplots(self):
df = DataFrame(
np.random.default_rng(2).random((5, 3)),
columns=["X", "Y", "Z"],
index=["a", "b", "c", "d", "e"],
)
axes = _check_plot_works(
df.plot.pie,
default_axes=True,
subplots=True,
)
assert len(axes) == len(df.columns)
for ax in axes:
_check_text_labels(ax.texts, df.index)
for ax, ylabel in zip(axes, df.columns, strict=True):
assert ax.get_ylabel() == ""
def test_pie_df_labels_colors(self):
df = DataFrame(
np.random.default_rng(2).random((5, 3)),
columns=["X", "Y", "Z"],
index=["a", "b", "c", "d", "e"],
)
labels = ["A", "B", "C", "D", "E"]
color_args = ["r", "g", "b", "c", "m"]
axes = _check_plot_works(
df.plot.pie,
default_axes=True,
subplots=True,
labels=labels,
colors=color_args,
)
assert len(axes) == len(df.columns)
for ax in axes:
_check_text_labels(ax.texts, labels)
_check_colors(ax.patches, facecolors=color_args)
def test_pie_df_nan(self):
df = DataFrame(np.random.default_rng(2).random((4, 4)))
for i in range(4):
df.iloc[i, i] = np.nan
_, axes = mpl.pyplot.subplots(ncols=4)
# GH 37668
kwargs = {"normalize": True}
with tm.assert_produces_warning(None):
df.plot.pie(subplots=True, ax=axes, legend=True, **kwargs)
base_expected = ["0", "1", "2", "3"]
for i, ax in enumerate(axes):
expected = list(base_expected) # force copy
expected[i] = ""
result = [x.get_text() for x in ax.texts]
assert result == expected
# legend labels
# NaN's not included in legend with subplots
# see https://github.com/pandas-dev/pandas/issues/8390
result_labels = [x.get_text() for x in ax.get_legend().get_texts()]
expected_labels = base_expected[:i] + base_expected[i + 1 :]
assert result_labels == expected_labels
@pytest.mark.slow
@pytest.mark.parametrize(
"kwargs",
[
{"logy": True},
{"logx": True, "logy": True},
{"loglog": True},
],
)
def test_errorbar_plot(self, kwargs):
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}
df_err = DataFrame(d_err)
# check line plots
ax = _check_plot_works(df.plot, yerr=df_err, **kwargs)
_check_has_errorbars(ax, xerr=0, yerr=2)
@pytest.mark.slow
def test_errorbar_plot_bar(self):
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}
df_err = DataFrame(d_err)
ax = _check_plot_works(
(df + 1).plot, yerr=df_err, xerr=df_err, kind="bar", log=True
)
_check_has_errorbars(ax, xerr=2, yerr=2)
@pytest.mark.slow
def test_errorbar_plot_yerr_array(self):
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
# yerr is raw error values
ax = _check_plot_works(df["y"].plot, yerr=np.ones(12) * 0.4)
_check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4)
_check_has_errorbars(ax, xerr=0, yerr=2)
@pytest.mark.slow
@pytest.mark.parametrize("yerr", ["yerr", "誤差"])
def test_errorbar_plot_column_name(self, yerr):
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
df[yerr] = np.ones(12) * 0.2
ax = _check_plot_works(df.plot, yerr=yerr)
_check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, y="y", x="x", yerr=yerr)
_check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
def test_errorbar_plot_external_valueerror(self):
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
with tm.external_error_raised(ValueError):
df.plot(yerr=np.random.default_rng(2).standard_normal(11))
@pytest.mark.slow
def test_errorbar_plot_external_typeerror(self):
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
df_err = DataFrame({"x": ["zzz"] * 12, "y": ["zzz"] * 12})
with tm.external_error_raised(TypeError):
df.plot(yerr=df_err)
@pytest.mark.slow
@pytest.mark.parametrize("kind", ["line", "bar", "barh"])
@pytest.mark.parametrize(
"y_err",
[
Series(np.ones(12) * 0.2, name="x"),
DataFrame({"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}),
],
)
def test_errorbar_plot_different_yerr(self, kind, y_err):
df = DataFrame({"x": np.arange(12), "y": np.arange(12, 0, -1)})
ax = _check_plot_works(df.plot, yerr=y_err, kind=kind)
_check_has_errorbars(ax, xerr=0, yerr=2)
@pytest.mark.slow
@pytest.mark.parametrize("kind", ["line", "bar", "barh"])
@pytest.mark.parametrize(
"y_err, x_err",
[
(
DataFrame({"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}),
DataFrame({"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}),
),
(Series(np.ones(12) * 0.2, name="x"), Series(np.ones(12) * 0.2, name="x")),
(0.2, 0.2),
],
)
def test_errorbar_plot_different_yerr_xerr(self, kind, y_err, x_err):
df = DataFrame({"x": np.arange(12), "y": np.arange(12, 0, -1)})
ax = _check_plot_works(df.plot, yerr=y_err, xerr=x_err, kind=kind)
_check_has_errorbars(ax, xerr=2, yerr=2)
@pytest.mark.slow
@pytest.mark.parametrize("kind", ["line", "bar", "barh"])
def test_errorbar_plot_different_yerr_xerr_subplots(self, kind):
df = DataFrame({"x": np.arange(12), "y": np.arange(12, 0, -1)})
df_err = DataFrame({"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4})
axes = _check_plot_works(
df.plot,
default_axes=True,
yerr=df_err,
xerr=df_err,
subplots=True,
kind=kind,
)
_check_has_errorbars(axes, xerr=1, yerr=1)
@pytest.mark.xfail(reason="Iterator is consumed", raises=ValueError)
def test_errorbar_plot_iterator(self):
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
# yerr is iterator
ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df)))
_check_has_errorbars(ax, xerr=0, yerr=2)
def test_errorbar_with_integer_column_names(self):
# test with integer column names
df = DataFrame(np.abs(np.random.default_rng(2).standard_normal((10, 2))))
df_err = DataFrame(np.abs(np.random.default_rng(2).standard_normal((10, 2))))
ax = _check_plot_works(df.plot, yerr=df_err)
_check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, y=0, yerr=1)
_check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
@pytest.mark.parametrize("kind", ["line", "bar"])
def test_errorbar_with_partial_columns_kind(self, kind):
df = DataFrame(np.abs(np.random.default_rng(2).standard_normal((10, 3))))
df_err = DataFrame(
np.abs(np.random.default_rng(2).standard_normal((10, 2))), columns=[0, 2]
)
ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)
_check_has_errorbars(ax, xerr=0, yerr=2)
@pytest.mark.slow
def test_errorbar_with_partial_columns_dti(self):
df = DataFrame(np.abs(np.random.default_rng(2).standard_normal((10, 3))))
df_err = DataFrame(
np.abs(np.random.default_rng(2).standard_normal((10, 2))), columns=[0, 2]
)
ix = date_range("1/1/2000", periods=10, freq="ME")
df.set_index(ix, inplace=True)
df_err.set_index(ix, inplace=True)
ax = _check_plot_works(df.plot, yerr=df_err, kind="line")
_check_has_errorbars(ax, xerr=0, yerr=2)
@pytest.mark.slow
@pytest.mark.parametrize("err_box", [lambda x: x, DataFrame])
def test_errorbar_with_partial_columns_box(self, err_box):
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
err = err_box({"x": np.ones(12) * 0.2, "z": np.ones(12) * 0.4})
ax = _check_plot_works(df.plot, yerr=err)
_check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.parametrize("kind", ["line", "bar", "barh"])
def test_errorbar_timeseries(self, kind):
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}
# check time-series plots
ix = date_range("1/1/2000", "1/1/2001", freq="ME")
tdf = DataFrame(d, index=ix)
tdf_err = DataFrame(d_err, index=ix)
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
_check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
_check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, y="y", yerr=tdf_err["x"], kind=kind)
_check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, y="y", yerr="x", kind=kind)
_check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
_check_has_errorbars(ax, xerr=0, yerr=2)
axes = _check_plot_works(
tdf.plot,
default_axes=True,
kind=kind,
yerr=tdf_err,
subplots=True,
)
_check_has_errorbars(axes, xerr=0, yerr=1)
def test_errorbar_asymmetrical(self):
err = np.random.default_rng(2).random((3, 2, 5))
# each column is [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]...
df = DataFrame(np.arange(15).reshape(3, 5)).T
ax = df.plot(yerr=err, xerr=err / 2)
yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1]
expected_0_0 = err[0, :, 0] * np.array([-1, 1])
tm.assert_almost_equal(yerr_0_0, expected_0_0)
msg = re.escape(
"Asymmetrical error bars should be provided with the shape (3, 2, 5)"
)
with pytest.raises(ValueError, match=msg):
df.plot(yerr=err.T)
def test_table(self):
df = DataFrame(
np.random.default_rng(2).random((10, 3)),
index=list(string.ascii_letters[:10]),
)
_check_plot_works(df.plot, table=True)
_check_plot_works(df.plot, table=df)
# GH 35945 UserWarning
with tm.assert_produces_warning(None):
ax = df.plot()
assert len(ax.tables) == 0
plotting.table(ax, df.T)
assert len(ax.tables) == 1
def test_errorbar_scatter(self):
df = DataFrame(
np.abs(np.random.default_rng(2).standard_normal((5, 2))),
index=range(5),
columns=["x", "y"],
)
df_err = DataFrame(
np.abs(np.random.default_rng(2).standard_normal((5, 2))) / 5,
index=range(5),
columns=["x", "y"],
)
ax = _check_plot_works(df.plot.scatter, x="x", y="y")
_check_has_errorbars(ax, xerr=0, yerr=0)
ax = _check_plot_works(df.plot.scatter, x="x", y="y", xerr=df_err)
_check_has_errorbars(ax, xerr=1, yerr=0)
ax = _check_plot_works(df.plot.scatter, x="x", y="y", yerr=df_err)
_check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot.scatter, x="x", y="y", xerr=df_err, yerr=df_err)
_check_has_errorbars(ax, xerr=1, yerr=1)
def test_errorbar_scatter_color(self):
def _check_errorbar_color(containers, expected, has_err="has_xerr"):
lines = []
errs = next(c.lines for c in ax.containers if getattr(c, has_err, False))
for el in errs:
if is_list_like(el):
lines.extend(el)
else:
lines.append(el)
err_lines = [x for x in lines if x in ax.collections]
_check_colors(err_lines, linecolors=np.array([expected] * len(err_lines)))
# GH 8081
df = DataFrame(
np.abs(np.random.default_rng(2).standard_normal((10, 5))),
columns=["a", "b", "c", "d", "e"],
)
ax = df.plot.scatter(x="a", y="b", xerr="d", yerr="e", c="red")
_check_has_errorbars(ax, xerr=1, yerr=1)
_check_errorbar_color(ax.containers, "red", has_err="has_xerr")
_check_errorbar_color(ax.containers, "red", has_err="has_yerr")
ax = df.plot.scatter(x="a", y="b", yerr="e", color="green")
_check_has_errorbars(ax, xerr=0, yerr=1)
_check_errorbar_color(ax.containers, "green", has_err="has_yerr")
def test_scatter_unknown_colormap(self):
# GH#48726
df = DataFrame({"a": [1, 2, 3], "b": 4})
with pytest.raises((ValueError, KeyError), match="'unknown' is not a"):
df.plot(x="a", y="b", colormap="unknown", kind="scatter")
def test_sharex_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6],
}
)
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
_check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[0], axes[2]]:
_check_visible(ax.get_xticklabels(), visible=False)
_check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[1], axes[3]]:
_check_visible(ax.get_xticklabels(), visible=True)
_check_visible(ax.get_xticklabels(minor=True), visible=True)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharex=True)
gs.tight_layout(plt.gcf())
_check(axes)
plt.close("all")
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning, match="sharex and sharey"):
axes = df.plot(subplots=True, ax=axes, sharex=True)
_check(axes)
def test_sharex_false_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
df = DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6],
}
)
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
_check_visible(ax.get_yticklabels(), visible=True)
_check_visible(ax.get_xticklabels(), visible=True)
_check_visible(ax.get_xticklabels(minor=True), visible=True)
def test_sharey_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6],
}
)
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
_check_visible(ax.get_xticklabels(), visible=True)
_check_visible(ax.get_xticklabels(minor=True), visible=True)
for ax in [axes[0], axes[1]]:
_check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[2], axes[3]]:
_check_visible(ax.get_yticklabels(), visible=False)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
plt.close("all")
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning, match="sharex and sharey"):
axes = df.plot(subplots=True, ax=axes, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
def test_sharey_and_ax_tight(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
df = DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6],
}
)
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
_check_visible(ax.get_yticklabels(), visible=True)
_check_visible(ax.get_xticklabels(), visible=True)
_check_visible(ax.get_xticklabels(minor=True), visible=True)
@pytest.mark.parametrize("kind", plotting.PlotAccessor._all_kinds)
def test_memory_leak(self, kind):
"""Check that every plot type gets properly collected."""
pytest.importorskip("scipy")
args = {}
if kind in ["hexbin", "scatter", "pie"]:
df = DataFrame(
{
"A": np.random.default_rng(2).uniform(size=20),
"B": np.random.default_rng(2).uniform(size=20),
"C": np.arange(20) + np.random.default_rng(2).uniform(size=20),
}
)
args = {"x": "A", "y": "B"}
elif kind == "area":
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
).abs()
else:
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
)
ax = df.plot(kind=kind, **args)
# https://github.com/pandas-dev/pandas/issues/9003#issuecomment-70544889
if kind in ["line", "area"]:
for i, (cached_data, _, _) in enumerate(ax._plot_data):
ser = df.iloc[:, i]
assert not tm.shares_memory(ser, cached_data)
tm.assert_numpy_array_equal(ser._values, cached_data._values)
else:
assert not hasattr(ax, "_plot_data")
def test_df_gridspec_patterns_vert_horiz(self):
# GH 10819
ts = Series(
np.random.default_rng(2).standard_normal(10),
index=date_range("1/1/2000", periods=10),
)
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 2)),
index=ts.index,
columns=list("AB"),
)
def _get_vertical_grid():
gs = mpl.gridspec.GridSpec(3, 1)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :])
ax2 = fig.add_subplot(gs[2, :])
return ax1, ax2
def _get_horizontal_grid():
gs = mpl.gridspec.GridSpec(1, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:, :2])
ax2 = fig.add_subplot(gs[:, 2])
return ax1, ax2
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
ax1 = ts.plot(ax=ax1)
assert len(ax1.lines) == 1
ax2 = df.plot(ax=ax2)
assert len(ax2.lines) == 2
for ax in [ax1, ax2]:
_check_visible(ax.get_yticklabels(), visible=True)
_check_visible(ax.get_xticklabels(), visible=True)
_check_visible(ax.get_xticklabels(minor=True), visible=True)
plt.close("all")
# subplots=True
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
axes = df.plot(subplots=True, ax=[ax1, ax2])
assert len(ax1.lines) == 1
assert len(ax2.lines) == 1
for ax in axes:
_check_visible(ax.get_yticklabels(), visible=True)
_check_visible(ax.get_xticklabels(), visible=True)
_check_visible(ax.get_xticklabels(minor=True), visible=True)
plt.close("all")
# vertical / subplots / sharex=True / sharey=True
ax1, ax2 = _get_vertical_grid()
with tm.assert_produces_warning(UserWarning, match="sharex and sharey"):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
for ax in [ax1, ax2]:
# yaxis are visible because there is only one column
_check_visible(ax.get_yticklabels(), visible=True)
# xaxis of axes0 (top) are hidden
_check_visible(axes[0].get_xticklabels(), visible=False)
_check_visible(axes[0].get_xticklabels(minor=True), visible=False)
_check_visible(axes[1].get_xticklabels(), visible=True)
_check_visible(axes[1].get_xticklabels(minor=True), visible=True)
plt.close("all")
# horizontal / subplots / sharex=True / sharey=True
ax1, ax2 = _get_horizontal_grid()
with tm.assert_produces_warning(UserWarning, match="sharex and sharey"):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
_check_visible(axes[0].get_yticklabels(), visible=True)
# yaxis of axes1 (right) are hidden
_check_visible(axes[1].get_yticklabels(), visible=False)
for ax in [ax1, ax2]:
# xaxis are visible because there is only one column
_check_visible(ax.get_xticklabels(), visible=True)
_check_visible(ax.get_xticklabels(minor=True), visible=True)
plt.close("all")
def test_df_gridspec_patterns_boxed(self):
# GH 10819
ts = Series(
np.random.default_rng(2).standard_normal(10),
index=date_range("1/1/2000", periods=10),
)
# boxed
def _get_boxed_grid():
gs = mpl.gridspec.GridSpec(3, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :2])
ax2 = fig.add_subplot(gs[:2, 2])
ax3 = fig.add_subplot(gs[2, :2])
ax4 = fig.add_subplot(gs[2, 2])
return ax1, ax2, ax3, ax4
axes = _get_boxed_grid()
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
index=ts.index,
columns=list("ABCD"),
)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
# axis are visible because these are not shared
_check_visible(ax.get_yticklabels(), visible=True)
_check_visible(ax.get_xticklabels(), visible=True)
_check_visible(ax.get_xticklabels(minor=True), visible=True)
plt.close("all")
# subplots / sharex=True / sharey=True
axes = _get_boxed_grid()
with tm.assert_produces_warning(UserWarning, match="sharex and sharey"):
axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True)
for ax in axes:
assert len(ax.lines) == 1
for ax in [axes[0], axes[2]]: # left column
_check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[1], axes[3]]: # right column
_check_visible(ax.get_yticklabels(), visible=False)
for ax in [axes[0], axes[1]]: # top row
_check_visible(ax.get_xticklabels(), visible=False)
_check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[2], axes[3]]: # bottom row
_check_visible(ax.get_xticklabels(), visible=True)
_check_visible(ax.get_xticklabels(minor=True), visible=True)
plt.close("all")
def test_df_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
_check_grid_settings(
DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]}),
plotting.PlotAccessor._dataframe_kinds,
kws={"x": "a", "y": "b"},
)
def test_plain_axes(self):
# supplied ax itself is a SubplotAxes, but figure contains also
# a plain Axes object (GH11556)
fig, ax = mpl.pyplot.subplots()
fig.add_axes([0.2, 0.2, 0.2, 0.2])
Series(np.random.default_rng(2).random(10)).plot(ax=ax)
def test_plain_axes_df(self):
# supplied ax itself is a plain Axes, but because the cmap keyword
# a new ax is created for the colorbar -> also multiples axes (GH11520)
df = DataFrame(
{
"a": np.random.default_rng(2).standard_normal(8),
"b": np.random.default_rng(2).standard_normal(8),
}
)
fig = mpl.pyplot.figure()
ax = fig.add_axes((0, 0, 1, 1))
df.plot(kind="scatter", ax=ax, x="a", y="b", c="a", cmap="hsv")
def test_plain_axes_make_axes_locatable(self):
# other examples
fig, ax = mpl.pyplot.subplots()
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
Series(np.random.default_rng(2).random(10)).plot(ax=ax)
Series(np.random.default_rng(2).random(10)).plot(ax=cax)
def test_plain_axes_make_inset_axes(self):
fig, ax = mpl.pyplot.subplots()
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
iax = inset_axes(ax, width="30%", height=1.0, loc=3)
Series(np.random.default_rng(2).random(10)).plot(ax=ax)
Series(np.random.default_rng(2).random(10)).plot(ax=iax)
@pytest.mark.parametrize("method", ["line", "barh", "bar"])
def test_secondary_axis_font_size(self, method):
# GH: 12565
df = (
DataFrame(
np.random.default_rng(2).standard_normal((15, 2)), columns=list("AB")
)
.assign(C=lambda df: df.B.cumsum())
.assign(D=lambda df: df.C * 1.1)
)
fontsize = 20
sy = ["C", "D"]
kwargs = {"secondary_y": sy, "fontsize": fontsize, "mark_right": True}
ax = getattr(df.plot, method)(**kwargs)
_check_ticks_props(axes=ax.right_ax, ylabelsize=fontsize)
def test_x_string_values_ticks(self):
# Test if string plot index have a fixed xtick position
# GH: 7612, GH: 22334
df = DataFrame(
{
"sales": [3, 2, 3],
"visits": [20, 42, 28],
"day": ["Monday", "Tuesday", "Wednesday"],
}
)
ax = df.plot.area(x="day")
ax.set_xlim(-1, 3)
xticklabels = [t.get_text() for t in ax.get_xticklabels()]
labels_position = dict(zip(xticklabels, ax.get_xticks(), strict=False))
# Testing if the label stayed at the right position
assert labels_position["Monday"] == 0.0
assert labels_position["Tuesday"] == 1.0
assert labels_position["Wednesday"] == 2.0
def test_x_multiindex_values_ticks(self):
# Test if multiindex plot index have a fixed xtick position
# GH: 15912
index = MultiIndex.from_product([[2012, 2013], [1, 2]])
df = DataFrame(
np.random.default_rng(2).standard_normal((4, 2)),
columns=["A", "B"],
index=index,
)
ax = df.plot()
ax.set_xlim(-1, 4)
xticklabels = [t.get_text() for t in ax.get_xticklabels()]
labels_position = dict(zip(xticklabels, ax.get_xticks(), strict=False))
# Testing if the label stayed at the right position
assert labels_position["(2012, 1)"] == 0.0
assert labels_position["(2012, 2)"] == 1.0
assert labels_position["(2013, 1)"] == 2.0
assert labels_position["(2013, 2)"] == 3.0
@pytest.mark.parametrize("kind", ["line", "area"])
def test_xlim_plot_line(self, kind):
# test if xlim is set correctly in plot.line and plot.area
# GH 27686
df = DataFrame([2, 4], index=[1, 2])
ax = df.plot(kind=kind)
xlims = ax.get_xlim()
assert xlims[0] < 1
assert xlims[1] > 2
def test_xlim_plot_line_correctly_in_mixed_plot_type(self):
# test if xlim is set correctly when ax contains multiple different kinds
# of plots, GH 27686
fig, ax = mpl.pyplot.subplots()
indexes = ["k1", "k2", "k3", "k4"]
df = DataFrame(
{
"s1": [1000, 2000, 1500, 2000],
"s2": [900, 1400, 2000, 3000],
"s3": [1500, 1500, 1600, 1200],
"secondary_y": [1, 3, 4, 3],
},
index=indexes,
)
df[["s1", "s2", "s3"]].plot.bar(ax=ax, stacked=False)
df[["secondary_y"]].plot(ax=ax, secondary_y=True)
xlims = ax.get_xlim()
assert xlims[0] < 0
assert xlims[1] > 3
# make sure axis labels are plotted correctly as well
xticklabels = [t.get_text() for t in ax.get_xticklabels()]
assert xticklabels == indexes
def test_plot_no_rows(self):
# GH 27758
df = DataFrame(columns=["foo"], dtype=int)
assert df.empty
ax = df.plot()
assert len(ax.get_lines()) == 1
line = ax.get_lines()[0]
assert len(line.get_xdata()) == 0
assert len(line.get_ydata()) == 0
def test_plot_no_numeric_data(self):
df = DataFrame(["a", "b", "c"])
with pytest.raises(TypeError, match="no numeric data to plot"):
df.plot()
@pytest.mark.parametrize(
"kind", ("line", "bar", "barh", "hist", "kde", "density", "area", "pie")
)
def test_group_subplot(self, kind):
pytest.importorskip("scipy")
d = {
"a": np.arange(10),
"b": np.arange(10) + 1,
"c": np.arange(10) + 1,
"d": np.arange(10),
"e": np.arange(10),
}
df = DataFrame(d)
axes = df.plot(subplots=[("b", "e"), ("c", "d")], kind=kind)
assert len(axes) == 3 # 2 groups + single column a
expected_labels = (["b", "e"], ["c", "d"], ["a"])
for ax, labels in zip(axes, expected_labels, strict=True):
if kind != "pie":
_check_legend_labels(ax, labels=labels)
if kind == "line":
assert len(ax.lines) == len(labels)
def test_group_subplot_series_notimplemented(self):
ser = Series(range(1))
msg = "An iterable subplots for a Series"
with pytest.raises(NotImplementedError, match=msg):
ser.plot(subplots=[("a",)])
def test_group_subplot_multiindex_notimplemented(self):
df = DataFrame(np.eye(2), columns=MultiIndex.from_tuples([(0, 1), (1, 2)]))
msg = "An iterable subplots for a DataFrame with a MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
df.plot(subplots=[(0, 1)])
def test_group_subplot_nonunique_cols_notimplemented(self):
df = DataFrame(np.eye(2), columns=["a", "a"])
msg = "An iterable subplots for a DataFrame with non-unique"
with pytest.raises(NotImplementedError, match=msg):
df.plot(subplots=[("a",)])
@pytest.mark.parametrize(
"subplots, expected_msg",
[
(123, "subplots should be a bool or an iterable"),
("a", "each entry should be a list/tuple"), # iterable of non-iterable
((1,), "each entry should be a list/tuple"), # iterable of non-iterable
(("a",), "each entry should be a list/tuple"), # iterable of strings
],
)
def test_group_subplot_bad_input(self, subplots, expected_msg):
# Make sure error is raised when subplots is not a properly
# formatted iterable. Only iterables of iterables are permitted, and
# entries should not be strings.
d = {"a": np.arange(10), "b": np.arange(10)}
df = DataFrame(d)
with pytest.raises(ValueError, match=expected_msg):
df.plot(subplots=subplots)
def test_group_subplot_invalid_column_name(self):
d = {"a": np.arange(10), "b": np.arange(10)}
df = DataFrame(d)
if Version(np.__version__) < Version("2.0.0"):
with pytest.raises(ValueError, match=r"Column label\(s\) \['bad_name'\]"):
df.plot(subplots=[("a", "bad_name")])
else:
with pytest.raises(
ValueError, match=r"Column label\(s\) \[np\.str\_\('bad_name'\)\]"
):
df.plot(subplots=[("a", "bad_name")])
def test_group_subplot_duplicated_column(self):
d = {"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
df = DataFrame(d)
with pytest.raises(ValueError, match="should be in only one subplot"):
df.plot(subplots=[("a", "b"), ("a", "c")])
@pytest.mark.parametrize("kind", ("box", "scatter", "hexbin"))
def test_group_subplot_invalid_kind(self, kind):
d = {"a": np.arange(10), "b": np.arange(10)}
df = DataFrame(d)
with pytest.raises(
ValueError, match="When subplots is an iterable, kind must be one of"
):
df.plot(subplots=[("a", "b")], kind=kind)
@pytest.mark.parametrize(
"index_name, old_label, new_label",
[
(None, "", "new"),
("old", "old", "new"),
(None, "", ""),
(None, "", 1),
(None, "", [1, 2]),
],
)
@pytest.mark.parametrize("kind", ["line", "area", "bar"])
def test_xlabel_ylabel_dataframe_single_plot(
self, kind, index_name, old_label, new_label
):
# GH 9093
df = DataFrame([[1, 2], [2, 5]], columns=["Type A", "Type B"])
df.index.name = index_name
# default is the ylabel is not shown and xlabel is index name
ax = df.plot(kind=kind)
assert ax.get_xlabel() == old_label
assert ax.get_ylabel() == ""
# old xlabel will be overridden and assigned ylabel will be used as ylabel
ax = df.plot(kind=kind, ylabel=new_label, xlabel=new_label)
assert ax.get_ylabel() == str(new_label)
assert ax.get_xlabel() == str(new_label)
@pytest.mark.parametrize(
"xlabel, ylabel",
[
(None, None),
("X Label", None),
(None, "Y Label"),
("X Label", "Y Label"),
],
)
@pytest.mark.parametrize("kind", ["scatter", "hexbin"])
def test_xlabel_ylabel_dataframe_plane_plot(self, kind, xlabel, ylabel):
# GH 37001
xcol = "Type A"
ycol = "Type B"
df = DataFrame([[1, 2], [2, 5]], columns=[xcol, ycol])
# default is the labels are column names
ax = df.plot(kind=kind, x=xcol, y=ycol, xlabel=xlabel, ylabel=ylabel)
assert ax.get_xlabel() == (xcol if xlabel is None else xlabel)
assert ax.get_ylabel() == (ycol if ylabel is None else ylabel)
@pytest.mark.parametrize("secondary_y", (False, True))
def test_secondary_y(self, secondary_y):
ax_df = DataFrame([0]).plot(
secondary_y=secondary_y, ylabel="Y", ylim=(0, 100), yticks=[99]
)
for ax in ax_df.figure.axes:
if ax.yaxis.get_visible():
assert ax.get_ylabel() == "Y"
assert ax.get_ylim() == (0, 100)
assert ax.get_yticks()[0] == 99
@pytest.mark.slow
def test_plot_no_warning(self):
# GH 55138
# TODO(3.0): this can be removed once Period[B] deprecation is enforced
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
)
with tm.assert_produces_warning(False):
_ = df.plot()
_ = df.T.plot()
@pytest.mark.parametrize("freq", ["h", "7h", "60min", "120min", "3M"])
def test_plot_period_index_makes_no_right_shift(self, freq):
# GH#57587
idx = pd.period_range("01/01/2000", freq=freq, periods=4)
df = DataFrame(
np.array([0, 1, 0, 1]),
index=idx,
columns=["A"],
)
expected = idx.values
ax = df.plot()
result = ax.get_lines()[0].get_xdata()
assert all(str(result[i]) == str(expected[i]) for i in range(4))
def test_plot_display_xlabel_and_xticks(self):
# GH#44050
df = DataFrame(np.random.default_rng(2).random((10, 2)), columns=["a", "b"])
ax = df.plot.hexbin(x="a", y="b")
_check_visible([ax.xaxis.get_label()], visible=True)
_check_visible(ax.get_xticklabels(), visible=True)
def _generate_4_axes_via_gridspec():
gs = mpl.gridspec.GridSpec(2, 2)
ax_tl = plt.subplot(gs[0, 0])
ax_ll = plt.subplot(gs[1, 0])
ax_tr = plt.subplot(gs[0, 1])
ax_lr = plt.subplot(gs[1, 1])
return gs, [ax_tl, ax_ll, ax_tr, ax_lr]
| TestDataFramePlots |
python | getsentry__sentry | src/sentry/issues/endpoints/group_attachments.py | {
"start": 2643,
"end": 5186
} | class ____(GroupEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, group) -> Response:
"""
List Event Attachments
``````````````````````
Returns a list of event attachments for an issue.
:pparam string issue_id: the ID of the issue to retrieve.
:pparam list types: a list of attachment types to filter for.
:qparam string start: Beginning date. You must also provide ``end``.
:qparam string end: End date. You must also provide ``start``.
:qparam string statsPeriod: An optional stat period (defaults to ``"90d"``).
:qparam string query: If set, will filter to only attachments from events matching that query.
:qparam string environment: If set, will filter to only attachments from events within a specific environment.
:auth: required
"""
if not features.has(
"organizations:event-attachments", group.project.organization, actor=request.user
):
return self.respond(status=404)
attachments = EventAttachment.objects.filter(group_id=group.id)
types = request.GET.getlist("types") or ()
event_ids = request.GET.getlist("event_id") or None
screenshot = "screenshot" in request.GET
try:
start, end = get_date_range_from_params(request.GET, optional=True)
except InvalidParams as e:
raise ParseError(detail=str(e))
if start:
attachments = attachments.filter(date_added__gte=start)
if end:
attachments = attachments.filter(date_added__lte=end)
if not event_ids:
event_ids = get_event_ids_from_filters(
request=request,
group=group,
start=start,
end=end,
)
if screenshot:
attachments = event_attachment_screenshot_filter(attachments)
if types:
attachments = attachments.filter(type__in=types)
# If event_ids is [], we still want attachments to filter to an empty list.
if event_ids is not None:
attachments = attachments.filter(event_id__in=event_ids)
return self.paginate(
request=request,
queryset=attachments,
order_by="-date_added",
on_results=lambda x: serialize(x, request.user, EventAttachmentSerializer()),
paginator_cls=DateTimePaginator,
)
| GroupAttachmentsEndpoint |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_types.py | {
"start": 61111,
"end": 65411
} | class ____(
_LiteralRoundTripFixture, fixtures.TablesTest
):
"""test JSON index access with "cast to string", which we have documented
for a long time as how to compare JSON values, but is ultimately not
reliable in all cases. The "as_XYZ()" comparators should be used
instead.
"""
__requires__ = ("json_type", "legacy_unconditional_json_extract")
__backend__ = True
datatype = JSON
data1 = {"key1": "value1", "key2": "value2"}
data2 = {
"Key 'One'": "value1",
"key two": "value2",
"key three": "value ' three '",
}
data3 = {
"key1": [1, 2, 3],
"key2": ["one", "two", "three"],
"key3": [{"four": "five"}, {"six": "seven"}],
}
data4 = ["one", "two", "three"]
data5 = {
"nested": {
"elem1": [{"a": "b", "c": "d"}, {"e": "f", "g": "h"}],
"elem2": {"elem3": {"elem4": "elem5"}},
}
}
data6 = {"a": 5, "b": "some value", "c": {"foo": "bar"}}
@classmethod
def define_tables(cls, metadata):
Table(
"data_table",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(30), nullable=False),
Column("data", cls.datatype),
Column("nulldata", cls.datatype(none_as_null=True)),
)
def _criteria_fixture(self):
with config.db.begin() as conn:
conn.execute(
self.tables.data_table.insert(),
[
{"name": "r1", "data": self.data1},
{"name": "r2", "data": self.data2},
{"name": "r3", "data": self.data3},
{"name": "r4", "data": self.data4},
{"name": "r5", "data": self.data5},
{"name": "r6", "data": self.data6},
],
)
def _test_index_criteria(self, crit, expected, test_literal=True):
self._criteria_fixture()
with config.db.connect() as conn:
stmt = select(self.tables.data_table.c.name).where(crit)
eq_(conn.scalar(stmt), expected)
if test_literal:
literal_sql = str(
stmt.compile(
config.db, compile_kwargs={"literal_binds": True}
)
)
eq_(conn.exec_driver_sql(literal_sql).scalar(), expected)
def test_string_cast_crit_spaces_in_key(self):
name = self.tables.data_table.c.name
col = self.tables.data_table.c["data"]
# limit the rows here to avoid PG error
# "cannot extract field from a non-object", which is
# fixed in 9.4 but may exist in 9.3
self._test_index_criteria(
and_(
name.in_(["r1", "r2", "r3"]),
cast(col["key two"], String) == '"value2"',
),
"r2",
)
@config.requirements.json_array_indexes
def test_string_cast_crit_simple_int(self):
name = self.tables.data_table.c.name
col = self.tables.data_table.c["data"]
# limit the rows here to avoid PG error
# "cannot extract array element from a non-array", which is
# fixed in 9.4 but may exist in 9.3
self._test_index_criteria(
and_(
name == "r4",
cast(col[1], String) == '"two"',
),
"r4",
)
def test_string_cast_crit_mixed_path(self):
col = self.tables.data_table.c["data"]
self._test_index_criteria(
cast(col[("key3", 1, "six")], String) == '"seven"',
"r3",
)
def test_string_cast_crit_string_path(self):
col = self.tables.data_table.c["data"]
self._test_index_criteria(
cast(col[("nested", "elem2", "elem3", "elem4")], String)
== '"elem5"',
"r5",
)
def test_string_cast_crit_against_string_basic(self):
name = self.tables.data_table.c.name
col = self.tables.data_table.c["data"]
self._test_index_criteria(
and_(
name == "r6",
cast(col["b"], String) == '"some value"',
),
"r6",
)
| JSONLegacyStringCastIndexTest |
python | facebookresearch__faiss | tests/test_local_search_quantizer.py | {
"start": 2847,
"end": 9741
} | class ____(unittest.TestCase):
def test_decode(self):
"""Test LSQ decode"""
d = 16
n = 500
M = 4
nbits = 6
K = (1 << nbits)
rs = np.random.RandomState(123)
x = rs.rand(n, d).astype(np.float32)
codes = rs.randint(0, K, (n, M)).astype(np.int32)
lsq = faiss.LocalSearchQuantizer(d, M, nbits)
lsq.train(x)
# decode x
pack_codes = np.zeros((n, lsq.code_size)).astype(np.uint8)
decoded_x = np.zeros((n, d)).astype(np.float32)
lsq.pack_codes(n, sp(codes), sp(pack_codes))
lsq.decode_c(sp(pack_codes), sp(decoded_x), n)
# decode in Python
codebooks = faiss.vector_float_to_array(lsq.codebooks)
codebooks = codebooks.reshape(M, K, d).copy()
decoded_x_ref = decode_ref(x, codebooks, codes)
np.testing.assert_allclose(decoded_x, decoded_x_ref, rtol=1e-6)
@unittest.skipIf(platform.system() == 'Windows',
'Does not work on Windows after numpy 2 upgrade.')
def test_update_codebooks(self):
"""Test codebooks updatation."""
d = 16
n = 500
M = 4
nbits = 6
K = (1 << nbits)
# set a larger value to make the updating process more stable
lambd = 1e-2
rs = np.random.RandomState(123)
x = rs.rand(n, d).astype(np.float32)
codes = rs.randint(0, K, (n, M)).astype(np.int32)
lsq = faiss.LocalSearchQuantizer(d, M, nbits)
lsq.lambd = lambd
lsq.train(x) # just for allocating memory for codebooks
codebooks = faiss.vector_float_to_array(lsq.codebooks)
codebooks = codebooks.reshape(M, K, d).copy()
lsq.update_codebooks(sp(x), sp(codes), n)
new_codebooks = faiss.vector_float_to_array(lsq.codebooks)
new_codebooks = new_codebooks.reshape(M, K, d).copy()
ref_codebooks = update_codebooks_ref(x, codes, K, lambd)
np.testing.assert_allclose(new_codebooks, ref_codebooks, rtol=1e-3, atol=1e-3)
def test_update_codebooks_with_double(self):
"""If the data is not zero-centering, it would be more accurate to
use double-precision floating-point numbers."""
ds = datasets.SyntheticDataset(16, 1000, 1000, 0)
xt = ds.get_train() + 1000
xb = ds.get_database() + 1000
M = 4
nbits = 4
lsq = faiss.LocalSearchQuantizer(ds.d, M, nbits)
lsq.train(xt)
err_double = eval_codec(lsq, xb)
lsq = faiss.LocalSearchQuantizer(ds.d, M, nbits)
lsq.update_codebooks_with_double = False
lsq.train(xt)
err_float = eval_codec(lsq, xb)
# 6533.377 vs 25457.99
self.assertLess(err_double, err_float)
def test_compute_binary_terms(self):
d = 16
n = 500
M = 4
nbits = 6
K = (1 << nbits)
rs = np.random.RandomState(123)
x = rs.rand(n, d).astype(np.float32)
binaries = np.zeros((M, M, K, K)).astype(np.float32)
lsq = faiss.LocalSearchQuantizer(d, M, nbits)
lsq.train(x) # just for allocating memory for codebooks
lsq.compute_binary_terms(sp(binaries))
codebooks = faiss.vector_float_to_array(lsq.codebooks)
codebooks = codebooks.reshape(M, K, d).copy()
ref_binaries = compute_binary_terms_ref(codebooks)
np.testing.assert_allclose(
binaries, ref_binaries, rtol=1e-4, atol=1e-4
)
def test_compute_unary_terms(self):
d = 16
n = 500
M = 4
nbits = 6
K = (1 << nbits)
rs = np.random.RandomState(123)
x = rs.rand(n, d).astype(np.float32)
unaries = np.zeros((M, n, K)).astype(np.float32)
lsq = faiss.LocalSearchQuantizer(d, M, nbits)
lsq.train(x) # just for allocating memory for codebooks
lsq.compute_unary_terms(sp(x), sp(unaries), n)
codebooks = faiss.vector_float_to_array(lsq.codebooks)
codebooks = codebooks.reshape(M, K, d).copy()
ref_unaries = compute_unary_terms_ref(codebooks, x)
np.testing.assert_allclose(unaries, ref_unaries, rtol=1e-4, atol=1e-4)
def test_icm_encode_step(self):
d = 16
n = 500
M = 4
nbits = 6
K = (1 << nbits)
rs = np.random.RandomState(123)
# randomly generate codes and unary terms
codes = rs.randint(0, K, (n, M)).astype(np.int32)
new_codes = codes.copy()
unaries = rs.rand(M, n, K).astype(np.float32)
# binary terms should be symmetric, because binary terms
# represent cached dot products between the code C1 in codebook M1
# and the code C2 in codebook M2.
# so, binaries[M1, M2, C1, C2] == binaries[M2, M1, C2, C1]
#
# generate binary terms in a standard way that provides
# the needed symmetry
codebooks = rs.rand(M, K, d).astype(np.float32)
binaries = compute_binary_terms_ref(codebooks)
binaries = np.ascontiguousarray(binaries)
# do icm encoding given binary and unary terms
lsq = faiss.LocalSearchQuantizer(d, M, nbits)
lsq.icm_encode_step(
sp(new_codes),
sp(unaries),
sp(binaries),
n,
1)
# do icm encoding given binary and unary terms in Python
ref_codes = icm_encode_step_ref(unaries, binaries, codes)
np.testing.assert_array_equal(new_codes, ref_codes)
def test_icm_encode(self):
d = 16
n = 500
M = 4
nbits = 4
K = (1 << nbits)
rs = np.random.RandomState(123)
x = rs.rand(n, d).astype(np.float32)
lsq = faiss.LocalSearchQuantizer(d, M, nbits)
lsq.train(x) # just for allocating memory for codebooks
# compute binary terms
binaries = np.zeros((M, M, K, K)).astype(np.float32)
lsq.compute_binary_terms(sp(binaries))
# compute unary terms
unaries = np.zeros((M, n, K)).astype(np.float32)
lsq.compute_unary_terms(sp(x), sp(unaries), n)
# randomly generate codes
codes = rs.randint(0, K, (n, M)).astype(np.int32)
new_codes = codes.copy()
# do icm encoding given binary and unary terms
lsq.icm_encode_step(
sp(new_codes),
sp(unaries),
sp(binaries),
n,
1)
# do icm encoding without pre-computed unary and binary terms in Python
codebooks = faiss.vector_float_to_array(lsq.codebooks)
codebooks = codebooks.reshape(M, K, d).copy()
ref_codes = icm_encode_ref(x, codebooks, codes)
np.testing.assert_array_equal(new_codes, ref_codes)
def eval_codec(q, xb):
codes = q.compute_codes(xb)
decoded = q.decode(codes)
return ((xb - decoded) ** 2).sum()
| TestComponents |
python | pandas-dev__pandas | asv_bench/benchmarks/strftime.py | {
"start": 3116,
"end": 3535
} | class ____:
timeout = 1500
params = [1000, 10000]
param_names = ["nobs"]
def setup(self, nobs):
self.data = pd.DataFrame(
{
"off": [offsets.BusinessHour()] * nobs,
}
)
def time_frame_offset_str(self, nobs):
self.data["off"].apply(str)
def time_frame_offset_repr(self, nobs):
self.data["off"].apply(repr)
| BusinessHourStrftime |
python | google__pytype | pytype/tools/traces/traces_test.py | {
"start": 1044,
"end": 2760
} | class ____(unittest.TestCase):
"""Tests for traces.trace."""
def test_traces(self):
src = traces.trace("")
trace, = src.traces[0 if _PYVER >= (3, 11) else 1]
self.assertEqual(
trace.op, "RETURN_CONST" if _PYVER >= (3, 12) else "LOAD_CONST"
)
self.assertIsNone(trace.symbol)
pyval, = trace.types
self.assertEqual(pyval.name, "builtins.NoneType")
self.assertEqual(pyval.cls.name, "builtins.NoneType")
def test_options(self):
src = traces.trace("", config.Options.create("rumpelstiltskin"))
self.assertEqual(src.filename, "rumpelstiltskin")
def test_external_type(self):
with test_utils.Tempdir() as d:
pyi_path = d.create_file("foo.pyi", "class Foo: ...")
imports_info = d.create_file("imports_info", f"foo {pyi_path}")
src = traces.trace(
"import foo\nx = foo.Foo()",
config.Options.create(imports_map=imports_info))
trace, = (x for x in src.traces[2] if x.op == "STORE_NAME")
pyval, = trace.types
self.assertEqual(pyval.name, "foo.Foo")
self.assertEqual(pyval.cls.name, "foo.Foo")
def test_py3_class(self):
src = traces.trace(textwrap.dedent("""
class Foo:
pass
""").lstrip())
trace, = (x for x in src.traces[1] if x.op == "LOAD_BUILD_CLASS")
pyval, = trace.types
self.assertEqual(pyval.name, "typing.Callable")
def test_unknown(self):
# pytype represents unannotated function parameters as unknowns. Make sure
# unknowns don't appear in the traced types.
src = traces.trace("def f(x): return x")
trace = next(x for x in src.traces[1] if x.op == "LOAD_FAST")
pyval, = trace.types
self.assertIsInstance(pyval, pytd.AnythingType)
| TraceTest |
python | pypa__pip | tests/unit/test_utils_subprocess.py | {
"start": 2083,
"end": 2376
} | class ____(SpinnerInterface):
def __init__(self) -> None:
self.spin_count = 0
self.final_status: str | None = None
def spin(self) -> None:
self.spin_count += 1
def finish(self, final_status: str) -> None:
self.final_status = final_status
| FakeSpinner |
python | ray-project__ray | python/ray/tests/test_output.py | {
"start": 13552,
"end": 17586
} | class ____:
def __init__(self):
print("init stdout")
print("init stderr", file=sys.stderr)
self.name = "ActorX"
def f(self):
print("bye stdout")
print("bye stderr", file=sys.stderr)
def __repr__(self):
return self.name
ray.init(num_cpus=2)
ray.get([Actor1.remote().f.remote(), Actor2.remote().f.remote()])
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
assert "stderr" not in out_str
assert "stdout" not in err_str
assert re.search("(Actor1 pid=.*) hi stdout", out_str), out_str
assert re.search("(Actor1 pid=.*) hi stderr", err_str), err_str
assert re.search("(Actor2 pid=.*) init stdout", out_str), out_str
assert re.search("(Actor2 pid=.*) init stderr", err_str), err_str
assert not re.search("(ActorX pid=.*) init", out_str), out_str
assert not re.search("(ActorX pid=.*) init", err_str), err_str
assert re.search("(ActorX pid=.*) bye stdout", out_str), out_str
assert re.search("(ActorX pid=.*) bye stderr", err_str), err_str
def test_output_local_ray():
script = """
import ray
ray.init()
"""
output = run_string_as_driver(script)
lines = output.strip("\n").split("\n")
lines = [line for line in lines if "The object store is using /tmp" not in line]
assert len(lines) >= 1
assert "Started a local Ray instance." in output
if os.environ.get("RAY_MINIMAL") == "1":
assert "View the dashboard" not in output
else:
assert "View the dashboard" in output
def test_output_ray_cluster(call_ray_start):
script = """
import ray
ray.init()
"""
output = run_string_as_driver(script)
lines = output.strip("\n").split("\n")
assert len(lines) >= 1
assert "Connecting to existing Ray cluster at address:" in output
assert "Connected to Ray cluster." in output
if os.environ.get("RAY_MINIMAL") == "1":
assert "View the dashboard" not in output
else:
assert "View the dashboard" in output
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
# TODO: fix this test to support minimal installation
@pytest.mark.skipif(
os.environ.get("RAY_MINIMAL") == "1",
reason="This test currently fails with minimal install.",
)
def test_output_on_driver_shutdown():
with tempfile.NamedTemporaryFile("w+", suffix=".py", prefix="_", delete=True) as f:
script = """
import ray
@ray.remote
def t(i: int):
return i
obj_refs = [t.remote(i) for i in range(10)]
with open("{ready_path}", "w") as f:
f.write("ready")
f.flush()
while True:
assert len(obj_refs) == 10
ready, pending = ray.wait(obj_refs, num_returns=2)
for i in ray.get(ready):
obj_refs[i] = t.remote(i)
""".format(
ready_path=f.name
)
# Start the driver and wait for it to start executing Ray code.
proc = run_string_as_driver_nonblocking(script)
wait_for_condition(lambda: len(f.read()) > 0)
print(f"Script is running... pid: {proc.pid}")
# Send multiple signals to terminate the driver like a real-world scenario.
for _ in range(3):
time.sleep(0.1)
os.kill(proc.pid, signal.SIGINT)
proc.wait(timeout=10)
err_str = proc.stderr.read().decode("ascii")
assert len(err_str) > 0
assert "KeyboardInterrupt" in err_str
assert "StackTrace Information" not in err_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
@pytest.mark.skipif(
os.environ.get("RAY_MINIMAL") == "1",
reason="This test currently fails with minimal install.",
)
def test_empty_line_thread_safety_bug():
"""Make sure when new threads are used within __init__,
the empty line is not printed.
Related: https://github.com/ray-project/ray/pull/20987
"""
actor_repr = "TESTER"
script = f"""
import threading
import ray
@ray.remote
| Actor2 |
python | dask__distributed | distributed/http/scheduler/info.py | {
"start": 7145,
"end": 8592
} | class ____(WebSocketHandler):
def initialize(self, dask_server=None, extra=None):
self.server = dask_server
self.extra = extra or {}
self.plugin = WebsocketPlugin(self, self.server)
self.server.add_plugin(self.plugin)
def send(self, name, data):
data["name"] = name
for k in list(data):
# Drop bytes objects for now
if isinstance(data[k], bytes):
del data[k]
self.write_message(data)
def open(self):
for worker in self.server.workers:
self.plugin.add_worker(self.server, worker)
def on_message(self, message):
message = json.loads(message)
if message["name"] == "ping":
self.send("pong", {"timestamp": str(datetime.now())})
def on_close(self):
self.server.remove_plugin(name=self.plugin.name)
routes: list[tuple] = [
(r"info", redirect("info/main/workers.html"), {}),
(r"info/main/workers.html", Workers, {}),
(r"info/main/exceptions.html", Exceptions, {}),
(r"info/worker/(.*).html", Worker, {}),
(r"info/task/(.*).html", Task, {}),
(r"info/main/logs.html", Logs, {}),
(r"info/call-stacks/(.*).html", WorkerCallStacks, {}),
(r"info/call-stack/(.*).html", TaskCallStack, {}),
(r"info/logs/(.*).html", WorkerLogs, {}),
(r"individual-plots.json", IndividualPlots, {}),
(r"eventstream", EventstreamHandler, {}),
]
| EventstreamHandler |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_xcom.py | {
"start": 8101,
"end": 18650
} | class ____(TestXComEndpoint):
@pytest.fixture(autouse=True)
def setup(self, dag_maker) -> None:
self.clear_db()
def test_should_respond_200(self, test_client):
self._create_xcom_entries(TEST_DAG_ID, run_id, logical_date_parsed, TEST_TASK_ID)
with assert_queries_count(4):
response = test_client.get(
f"/dags/{TEST_DAG_ID}/dagRuns/{run_id}/taskInstances/{TEST_TASK_ID}/xcomEntries"
)
assert response.status_code == 200
response_data = response.json()
for xcom_entry in response_data["xcom_entries"]:
xcom_entry["timestamp"] = "TIMESTAMP"
expected_response = {
"xcom_entries": [
{
"dag_id": TEST_DAG_ID,
"dag_display_name": TEST_DAG_DISPLAY_NAME,
"logical_date": logical_date_formatted,
"run_id": run_id,
"key": f"{TEST_XCOM_KEY}-0",
"task_id": TEST_TASK_ID,
"task_display_name": TEST_TASK_DISPLAY_NAME,
"timestamp": "TIMESTAMP",
"map_index": -1,
},
{
"dag_id": TEST_DAG_ID,
"dag_display_name": TEST_DAG_DISPLAY_NAME,
"logical_date": logical_date_formatted,
"run_id": run_id,
"key": f"{TEST_XCOM_KEY}-1",
"task_id": TEST_TASK_ID,
"task_display_name": TEST_TASK_DISPLAY_NAME,
"timestamp": "TIMESTAMP",
"map_index": -1,
},
],
"total_entries": 2,
}
assert response_data == expected_response
def test_should_respond_200_with_tilde(self, test_client):
self._create_xcom_entries(TEST_DAG_ID, run_id, logical_date_parsed, TEST_TASK_ID)
self._create_xcom_entries(TEST_DAG_ID_2, run_id, logical_date_parsed, TEST_TASK_ID_2)
with assert_queries_count(4):
response = test_client.get("/dags/~/dagRuns/~/taskInstances/~/xcomEntries")
assert response.status_code == 200
response_data = response.json()
for xcom_entry in response_data["xcom_entries"]:
xcom_entry["timestamp"] = "TIMESTAMP"
expected_response = {
"xcom_entries": [
{
"dag_id": TEST_DAG_ID,
"dag_display_name": TEST_DAG_DISPLAY_NAME,
"logical_date": logical_date_formatted,
"run_id": run_id,
"key": f"{TEST_XCOM_KEY}-0",
"task_id": TEST_TASK_ID,
"task_display_name": TEST_TASK_DISPLAY_NAME,
"timestamp": "TIMESTAMP",
"map_index": -1,
},
{
"dag_id": TEST_DAG_ID,
"dag_display_name": TEST_DAG_DISPLAY_NAME,
"logical_date": logical_date_formatted,
"run_id": run_id,
"key": f"{TEST_XCOM_KEY}-1",
"task_id": TEST_TASK_ID,
"task_display_name": TEST_TASK_DISPLAY_NAME,
"timestamp": "TIMESTAMP",
"map_index": -1,
},
{
"dag_id": TEST_DAG_ID_2,
"dag_display_name": TEST_DAG_DISPLAY_NAME_2,
"logical_date": logical_date_formatted,
"run_id": run_id,
"key": f"{TEST_XCOM_KEY}-0",
"task_id": TEST_TASK_ID_2,
"task_display_name": TEST_TASK_DISPLAY_NAME_2,
"timestamp": "TIMESTAMP",
"map_index": -1,
},
{
"dag_id": TEST_DAG_ID_2,
"dag_display_name": TEST_DAG_DISPLAY_NAME_2,
"logical_date": logical_date_formatted,
"run_id": run_id,
"key": f"{TEST_XCOM_KEY}-1",
"task_id": TEST_TASK_ID_2,
"task_display_name": TEST_TASK_DISPLAY_NAME_2,
"timestamp": "TIMESTAMP",
"map_index": -1,
},
],
"total_entries": 4,
}
assert response_data == expected_response
@pytest.mark.parametrize("map_index", (0, 1, None))
def test_should_respond_200_with_map_index(self, map_index, test_client):
self._create_xcom_entries(TEST_DAG_ID, run_id, logical_date_parsed, TEST_TASK_ID, mapped_ti=True)
with assert_queries_count(4):
response = test_client.get(
"/dags/~/dagRuns/~/taskInstances/~/xcomEntries",
params={"map_index": map_index} if map_index is not None else None,
)
assert response.status_code == 200
response_data = response.json()
if map_index is None:
expected_entries = [
{
"dag_id": TEST_DAG_ID,
"dag_display_name": TEST_DAG_DISPLAY_NAME,
"logical_date": logical_date_formatted,
"run_id": run_id,
"key": TEST_XCOM_KEY,
"task_id": TEST_TASK_ID,
"task_display_name": TEST_TASK_DISPLAY_NAME,
"timestamp": "TIMESTAMP",
"map_index": idx,
}
for idx in range(2)
]
else:
expected_entries = [
{
"dag_id": TEST_DAG_ID,
"dag_display_name": TEST_DAG_DISPLAY_NAME,
"logical_date": logical_date_formatted,
"run_id": run_id,
"key": TEST_XCOM_KEY,
"task_id": TEST_TASK_ID,
"task_display_name": TEST_TASK_DISPLAY_NAME,
"timestamp": "TIMESTAMP",
"map_index": map_index,
}
]
for xcom_entry in response_data["xcom_entries"]:
xcom_entry["timestamp"] = "TIMESTAMP"
assert response_data == {
"xcom_entries": expected_entries,
"total_entries": len(expected_entries),
}
@pytest.mark.parametrize(
("key", "expected_entries"),
[
(
TEST_XCOM_KEY,
[
{
"dag_id": TEST_DAG_ID,
"dag_display_name": TEST_DAG_DISPLAY_NAME,
"logical_date": logical_date_formatted,
"run_id": run_id,
"key": TEST_XCOM_KEY,
"task_id": TEST_TASK_ID,
"task_display_name": TEST_TASK_DISPLAY_NAME,
"timestamp": "TIMESTAMP",
"map_index": 0,
},
{
"dag_id": TEST_DAG_ID,
"dag_display_name": TEST_DAG_DISPLAY_NAME,
"logical_date": logical_date_formatted,
"run_id": run_id,
"key": TEST_XCOM_KEY,
"task_id": TEST_TASK_ID,
"task_display_name": TEST_TASK_DISPLAY_NAME,
"timestamp": "TIMESTAMP",
"map_index": 1,
},
],
),
(f"{TEST_XCOM_KEY}-0", []),
],
)
def test_should_respond_200_with_xcom_key(self, key, expected_entries, test_client):
self._create_xcom_entries(TEST_DAG_ID, run_id, logical_date_parsed, TEST_TASK_ID, mapped_ti=True)
with assert_queries_count(4):
response = test_client.get(
"/dags/~/dagRuns/~/taskInstances/~/xcomEntries",
params={"xcom_key_pattern": key} if key is not None else None,
)
assert response.status_code == 200
response_data = response.json()
for xcom_entry in response_data["xcom_entries"]:
xcom_entry["timestamp"] = "TIMESTAMP"
assert response_data == {
"xcom_entries": expected_entries,
"total_entries": len(expected_entries),
}
@provide_session
def _create_xcom_entries(self, dag_id, run_id, logical_date, task_id, mapped_ti=False, session=None):
bundle_name = "testing"
orm_dag_bundle = DagBundleModel(name=bundle_name)
session.merge(orm_dag_bundle)
session.flush()
dag = DAG(dag_id=dag_id)
sync_dag_to_db(dag)
dagrun = DagRun(
dag_id=dag_id,
run_id=run_id,
logical_date=logical_date,
start_date=logical_date,
run_type=DagRunType.MANUAL,
)
session.add(dagrun)
dag_version = DagVersion.get_latest_version(dag.dag_id)
if mapped_ti:
for i in [0, 1]:
ti = TaskInstance(
EmptyOperator(task_id=task_id), run_id=run_id, map_index=i, dag_version_id=dag_version.id
)
ti.dag_id = dag_id
session.add(ti)
else:
ti = TaskInstance(EmptyOperator(task_id=task_id), run_id=run_id, dag_version_id=dag_version.id)
ti.dag_id = dag_id
session.add(ti)
session.commit()
for i in [0, 1]:
if mapped_ti:
key = TEST_XCOM_KEY
map_index = i
else:
key = f"{TEST_XCOM_KEY}-{i}"
map_index = -1
XComModel.set(
key=key,
value=TEST_XCOM_VALUE,
run_id=run_id,
task_id=task_id,
dag_id=dag_id,
map_index=map_index,
)
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get(
"/dags/~/dagRuns/~/taskInstances/~/xcomEntries",
params={},
)
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.get(
"/dags/~/dagRuns/~/taskInstances/~/xcomEntries",
params={},
)
assert response.status_code == 403
| TestGetXComEntries |
python | gevent__gevent | src/greentest/3.12/test_socket.py | {
"start": 20360,
"end": 20873
} | class ____(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def bindSock(self, sock):
path = socket_helper.create_unix_domain_name()
self.addCleanup(os_helper.unlink, path)
socket_helper.bind_unix_socket(sock, path)
| UnixSocketTestBase |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 172088,
"end": 172465
} | class ____(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
| RecvmsgIntoSCMRightsStreamTest |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 20590,
"end": 20943
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.scale = torch.nn.Parameter(torch.randn(1, 10))
def forward(self, x):
x = F.relu(x)
if hasattr(self, "scale"):
x *= self.scale
if hasattr(self, "scale2"):
x *= self.scale2
return x
| HasAttrModule |
python | kamyu104__LeetCode-Solutions | Python/find-champion-ii.py | {
"start": 49,
"end": 546
} | class ____(object):
def findChampion(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: int
"""
lookup = [False]*n
for u, v in edges:
lookup[v] = True
result = -1
for u in xrange(n):
if lookup[u]:
continue
if result != -1:
return -1
result = u
return result
# Time: O(n)
# Space: O(n)
# graph, hash table
| Solution2 |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 1856,
"end": 2118
} | class ____(AccountsError):
"""
Raised when there are issues with signing.
"""
def __init__(self, message: str, transaction: Optional["TransactionAPI"] = None):
self.transaction = transaction
super().__init__(message)
| SignatureError |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 1318,
"end": 1571
} | class ____:
"""
Base Gemm configuration used for most backends (CPU, CUDA)
"""
block_m: int
block_n: int
block_k: int
num_stages: int
num_warps: int
hint_override: Optional[int] = None
@dataclasses.dataclass
| BaseConfig |
python | sympy__sympy | sympy/integrals/transforms.py | {
"start": 28815,
"end": 33615
} | class ____(IntegralTransform):
"""
Class representing unevaluated inverse Mellin transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Mellin transforms, see the
:func:`inverse_mellin_transform` docstring.
"""
_name = 'Inverse Mellin'
_none_sentinel = Dummy('None')
_c = Dummy('c')
def __new__(cls, F, s, x, a, b, **opts):
if a is None:
a = InverseMellinTransform._none_sentinel
if b is None:
b = InverseMellinTransform._none_sentinel
return IntegralTransform.__new__(cls, F, s, x, a, b, **opts)
@property
def fundamental_strip(self):
a, b = self.args[3], self.args[4]
if a is InverseMellinTransform._none_sentinel:
a = None
if b is InverseMellinTransform._none_sentinel:
b = None
return a, b
def _compute_transform(self, F, s, x, **hints):
# IntegralTransform's doit will cause this hint to exist, but
# InverseMellinTransform should ignore it
hints.pop('simplify', True)
global _allowed
if _allowed is None:
_allowed = {
exp, gamma, sin, cos, tan, cot, cosh, sinh, tanh, coth,
factorial, rf}
for f in postorder_traversal(F):
if f.is_Function and f.has(s) and f.func not in _allowed:
raise IntegralTransformError('Inverse Mellin', F,
'Component %s not recognised.' % f)
strip = self.fundamental_strip
return _inverse_mellin_transform(F, s, x, strip, **hints)
def _as_integral(self, F, s, x):
c = self.__class__._c
return Integral(F*x**(-s), (s, c - S.ImaginaryUnit*S.Infinity, c +
S.ImaginaryUnit*S.Infinity))/(2*S.Pi*S.ImaginaryUnit)
def inverse_mellin_transform(F, s, x, strip, **hints):
r"""
Compute the inverse Mellin transform of `F(s)` over the fundamental
strip given by ``strip=(a, b)``.
Explanation
===========
This can be defined as
.. math:: f(x) = \frac{1}{2\pi i} \int_{c - i\infty}^{c + i\infty} x^{-s} F(s) \mathrm{d}s,
for any `c` in the fundamental strip. Under certain regularity
conditions on `F` and/or `f`,
this recovers `f` from its Mellin transform `F`
(and vice versa), for positive real `x`.
One of `a` or `b` may be passed as ``None``; a suitable `c` will be
inferred.
If the integral cannot be computed in closed form, this function returns
an unevaluated :class:`InverseMellinTransform` object.
Note that this function will assume x to be positive and real, regardless
of the SymPy assumptions!
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Examples
========
>>> from sympy import inverse_mellin_transform, oo, gamma
>>> from sympy.abc import x, s
>>> inverse_mellin_transform(gamma(s), s, x, (0, oo))
exp(-x)
The fundamental strip matters:
>>> f = 1/(s**2 - 1)
>>> inverse_mellin_transform(f, s, x, (-oo, -1))
x*(1 - 1/x**2)*Heaviside(x - 1)/2
>>> inverse_mellin_transform(f, s, x, (-1, 1))
-x*Heaviside(1 - x)/2 - Heaviside(x - 1)/(2*x)
>>> inverse_mellin_transform(f, s, x, (1, oo))
(1/2 - x**2/2)*Heaviside(1 - x)/x
See Also
========
mellin_transform
hankel_transform, inverse_hankel_transform
"""
return InverseMellinTransform(F, s, x, strip[0], strip[1]).doit(**hints)
##########################################################################
# Fourier Transform
##########################################################################
@_noconds_(True)
def _fourier_transform(f, x, k, a, b, name, simplify=True):
r"""
Compute a general Fourier-type transform
.. math::
F(k) = a \int_{-\infty}^{\infty} e^{bixk} f(x)\, dx.
For suitable choice of *a* and *b*, this reduces to the standard Fourier
and inverse Fourier transforms.
"""
F = integrate(a*f*exp(b*S.ImaginaryUnit*x*k), (x, S.NegativeInfinity, S.Infinity))
if not F.has(Integral):
return _simplify(F, simplify), S.true
integral_f = integrate(f, (x, S.NegativeInfinity, S.Infinity))
if integral_f in (S.NegativeInfinity, S.Infinity, S.NaN) or integral_f.has(Integral):
raise IntegralTransformError(name, f, 'function not integrable on real axis')
if not F.is_Piecewise:
raise IntegralTransformError(name, f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(name, f, 'integral in unexpected form')
return _simplify(F, simplify), cond
| InverseMellinTransform |
python | scrapy__scrapy | tests/test_dupefilters.py | {
"start": 8533,
"end": 9034
} | class ____:
def test_log_deprecation(self):
dupefilter = _get_dupefilter(
settings={"DUPEFILTER_CLASS": BaseDupeFilter},
)
with catch_warnings(record=True) as warning_list:
dupefilter.log(None, None)
assert len(warning_list) == 1
assert (
str(warning_list[0].message)
== "Calling BaseDupeFilter.log() is deprecated."
)
assert warning_list[0].category == ScrapyDeprecationWarning
| TestBaseDupeFilter |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/taint_in_taint_out.py | {
"start": 9401,
"end": 10628
} | class ____:
def __init__(self, a, b):
self.a = a
self.b = b
def set_a(self, a):
self.a = a
def set_join(self, x, y):
if 1 < 1:
self.a = x
else:
self.a = y
def set_subfield(self, x):
self.a.x = perfect_tito(x)
def self_tito(self):
self = perfect_tito(self)
def recursive_tito(self):
self.a = self
def test_tito_self():
o = TitoSelf(_test_source(), "")
_test_sink(o.a) # Issue.
_test_sink(o.b) # No issue.
o = TitoSelf("", "")
o.set_a(_test_source())
_test_sink(o.a) # Issue.
_test_sink(o.b) # No issue.
o = TitoSelf("", "")
o.set_a(["", _test_source()])
_test_sink(o.a[1]) # Issue.
_test_sink(o.a[0]) # No issue.
_test_sink(o.b) # No issue.
o = TitoSelf("", "")
o.set_join(_test_source(), "")
_test_sink(o.a) # Issue.
_test_sink(o.b) # No issue.
o.set_join("", _test_source())
_test_sink(o.a) # Issue.
_test_sink(o.b) # No issue.
o = TitoSelf("", "")
o.set_subfield(_test_source())
_test_sink(o.a.x) # Issue.
_test_sink(o.a.y) # No issue.
_test_sink(o.b) # No issue.
# Test tito to cls.
| TitoSelf |
python | ipython__ipython | tests/test_formatters.py | {
"start": 9268,
"end": 15546
} | class ____(object):
def _repr_pdf_(self):
return "PDF"
def test_pdf_formatter():
pdf = MakePDF()
f = PDFFormatter()
assert f(pdf) == "PDF"
def test_print_method_bound():
f = HTMLFormatter()
class MyHTML(object):
def _repr_html_(self):
return "hello"
with capture_output() as captured:
result = f(MyHTML)
assert result is None
assert "FormatterWarning" not in captured.stderr
with capture_output() as captured:
result = f(MyHTML())
assert result == "hello"
assert captured.stderr == ""
def test_print_method_weird():
class TextMagicHat(object):
def __getattr__(self, key):
return key
f = HTMLFormatter()
text_hat = TextMagicHat()
assert text_hat._repr_html_ == "_repr_html_"
with capture_output() as captured:
result = f(text_hat)
assert result is None
assert "FormatterWarning" not in captured.stderr
class CallableMagicHat(object):
def __getattr__(self, key):
return lambda: key
call_hat = CallableMagicHat()
with capture_output() as captured:
result = f(call_hat)
assert result is None
class BadReprArgs(object):
def _repr_html_(self, extra, args):
return "html"
bad = BadReprArgs()
with capture_output() as captured:
result = f(bad)
assert result is None
assert "FormatterWarning" not in captured.stderr
def test_format_config():
"""config objects don't pretend to support fancy reprs with lazy attrs"""
f = HTMLFormatter()
cfg = Config()
with capture_output() as captured:
result = f(cfg)
assert result is None
assert captured.stderr == ""
with capture_output() as captured:
result = f(Config)
assert result is None
assert captured.stderr == ""
def test_pretty_max_seq_length():
f = PlainTextFormatter(max_seq_length=1)
lis = list(range(3))
text = f(lis)
assert text == "[0, ...]"
f.max_seq_length = 0
text = f(lis)
assert text == "[0, 1, 2]"
text = f(list(range(1024)))
lines = text.splitlines()
assert len(lines) == 1024
def test_ipython_display_formatter():
"""Objects with _ipython_display_ defined bypass other formatters"""
f = get_ipython().display_formatter
catcher = []
class SelfDisplaying(object):
def _ipython_display_(self):
catcher.append(self)
class NotSelfDisplaying(object):
def __repr__(self):
return "NotSelfDisplaying"
def _ipython_display_(self):
raise NotImplementedError
save_enabled = f.ipython_display_formatter.enabled
f.ipython_display_formatter.enabled = True
yes = SelfDisplaying()
no = NotSelfDisplaying()
d, md = f.format(no)
assert d == {"text/plain": repr(no)}
assert md == {}
assert catcher == []
d, md = f.format(yes)
assert d == {}
assert md == {}
assert catcher == [yes]
f.ipython_display_formatter.enabled = save_enabled
def test_repr_mime():
class HasReprMime(object):
def _repr_mimebundle_(self, include=None, exclude=None):
return {
"application/json+test.v2": {"x": "y"},
"plain/text": "<HasReprMime>",
"image/png": "i-overwrite",
}
def _repr_png_(self):
return "should-be-overwritten"
def _repr_html_(self):
return "<b>hi!</b>"
f = get_ipython().display_formatter
html_f = f.formatters["text/html"]
save_enabled = html_f.enabled
html_f.enabled = True
obj = HasReprMime()
d, md = f.format(obj)
html_f.enabled = save_enabled
assert sorted(d) == [
"application/json+test.v2",
"image/png",
"plain/text",
"text/html",
"text/plain",
]
assert md == {}
d, md = f.format(obj, include={"image/png"})
assert list(d.keys()) == [
"image/png"
], "Include should filter out even things from repr_mimebundle"
assert d["image/png"] == "i-overwrite", "_repr_mimebundle_ take precedence"
def test_pass_correct_include_exclude():
class Tester(object):
def __init__(self, include=None, exclude=None):
self.include = include
self.exclude = exclude
def _repr_mimebundle_(self, include, exclude, **kwargs):
if include and (include != self.include):
raise ValueError("include got modified: display() may be broken.")
if exclude and (exclude != self.exclude):
raise ValueError("exclude got modified: display() may be broken.")
return None
include = {"a", "b", "c"}
exclude = {"c", "e", "f"}
f = get_ipython().display_formatter
f.format(Tester(include=include, exclude=exclude), include=include, exclude=exclude)
f.format(Tester(exclude=exclude), exclude=exclude)
f.format(Tester(include=include), include=include)
def test_repr_mime_meta():
class HasReprMimeMeta(object):
def _repr_mimebundle_(self, include=None, exclude=None):
data = {
"image/png": "base64-image-data",
}
metadata = {
"image/png": {
"width": 5,
"height": 10,
}
}
return (data, metadata)
f = get_ipython().display_formatter
obj = HasReprMimeMeta()
d, md = f.format(obj)
assert sorted(d) == ["image/png", "text/plain"]
assert md == {
"image/png": {
"width": 5,
"height": 10,
}
}
def test_repr_mime_failure():
class BadReprMime(object):
def _repr_mimebundle_(self, include=None, exclude=None):
raise RuntimeError
f = get_ipython().display_formatter
obj = BadReprMime()
d, md = f.format(obj)
assert "text/plain" in d
def test_custom_repr_namedtuple_partialmethod():
from functools import partialmethod
from typing import NamedTuple
class Foo(NamedTuple):
...
Foo.__repr__ = partialmethod(lambda obj: "Hello World")
foo = Foo()
f = PlainTextFormatter()
assert f.pprint
assert f(foo) == "Hello World"
| MakePDF |
python | pandas-dev__pandas | pandas/tests/series/methods/test_between.py | {
"start": 151,
"end": 2565
} | class ____:
def test_between(self):
series = Series(date_range("1/1/2000", periods=10))
left, right = series[[2, 7]]
result = series.between(left, right)
expected = (series >= left) & (series <= right)
tm.assert_series_equal(result, expected)
def test_between_datetime_object_dtype(self):
ser = Series(bdate_range("1/1/2000", periods=20), dtype=object)
ser[::2] = np.nan
result = ser[ser.between(ser[3], ser[17])]
expected = ser[3:18].dropna()
tm.assert_series_equal(result, expected)
result = ser[ser.between(ser[3], ser[17], inclusive="neither")]
expected = ser[5:16].dropna()
tm.assert_series_equal(result, expected)
def test_between_period_values(self):
ser = Series(period_range("2000-01-01", periods=10, freq="D"))
left, right = ser[[2, 7]]
result = ser.between(left, right)
expected = (ser >= left) & (ser <= right)
tm.assert_series_equal(result, expected)
def test_between_inclusive_string(self):
# GH 40628
series = Series(date_range("1/1/2000", periods=10))
left, right = series[[2, 7]]
result = series.between(left, right, inclusive="both")
expected = (series >= left) & (series <= right)
tm.assert_series_equal(result, expected)
result = series.between(left, right, inclusive="left")
expected = (series >= left) & (series < right)
tm.assert_series_equal(result, expected)
result = series.between(left, right, inclusive="right")
expected = (series > left) & (series <= right)
tm.assert_series_equal(result, expected)
result = series.between(left, right, inclusive="neither")
expected = (series > left) & (series < right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("inclusive", ["yes", True, False])
def test_between_error_args(self, inclusive):
# GH 40628
series = Series(date_range("1/1/2000", periods=10))
left, right = series[[2, 7]]
value_error_msg = (
"Inclusive has to be either string of 'both','left', 'right', or 'neither'."
)
series = Series(date_range("1/1/2000", periods=10))
with pytest.raises(ValueError, match=value_error_msg):
series.between(left, right, inclusive=inclusive)
| TestBetween |
python | automl__auto-sklearn | test/test_pipeline/components/data_preprocessing/test_data_preprocessing.py | {
"start": 163,
"end": 5634
} | class ____(unittest.TestCase):
def do_a_fit_transform(self, sparse_input):
# X will be the input and Y is what we expect after transform. categ_feat stores
# indicators of feature type (True if categorical, False if numerical)
X, Y, categ_feat = [], [], []
# Feature 1 (numerical):
# This feature should be dropped due to lack of variance.
categ_feat.append(False)
X.append(np.array([3.14, 3.14, 3.14]).reshape(3, 1))
Y.append(np.array([]).reshape(3, 0))
# Feature 2 (numerical):
# This feature should be normalized by having its mean subtracted from all
# elements and by having them divided by the standard deviation.
categ_feat.append(False)
nf = np.array([1.0, 2.0, 3.0]).reshape(3, 1) # mean = 2.
sdev = np.sqrt(2.0 / 3.0)
shift = (
0 if sparse_input else 2.0
) # if sparse_input, there is no mean subtraction
nft = (nf - shift) / sdev
X.append(nf)
Y.append(nft)
# Feature 3 (numerical):
# This feature has a missing value that should be imputed by the mean of the
# other values (2.).
# This feature should also be normalized as in the previous feature.
categ_feat.append(False)
X.append(np.array([1.0, np.nan, 3.0]).reshape(3, 1))
Y.append(nft.copy())
# Feature 4 (categorical)
# This feature should be one hot encoded.
categ_feat.append(True)
X.append(np.array([1, 3, 2]).reshape(3, 1))
Y.append(np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0]]))
# Feature 5 (categorical)
# This feature should be one hot encoded. (A discontinuous category set or
# a category 0 shouldn't be problems.)
categ_feat.append(True)
X.append(np.array([2, 1, 9]).reshape(3, 1))
Y.append(np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]))
# Feature 6 (categorical)
# This feature should be one hot encoded. The missing value gets imputed as
# a category on its own.
categ_feat.append(True)
X.append(np.array([1, 1, np.nan]).reshape(3, 1))
Y.append(np.array([[0, 1], [0, 1], [1, 0]]))
# Combine datasets and shuffle columns:
n_feats = len(categ_feat)
random_order = np.random.choice(np.arange(n_feats), size=n_feats, replace=False)
# Shuffle X according to random_order
X = np.array(X)[random_order]
X_comb = np.hstack(X)
# Shuffle Y according to random_order and reorder it as the
# PreprocessingPipeline does (i.e. categorical features come first in Y).
categ_feat = {
i: "categorical" if categ_feat[order] else "numerical"
for i, order in enumerate(random_order)
}
cat_to_left_order = [
index
for col, index in sorted(
[(col_type, i) for i, col_type in categ_feat.items()]
)
]
# Sort so that Y Matches the random ordering
Y = [Y[n] for n in random_order]
# Then move the categorical columns to the left
Y = [Y[n] for n in cat_to_left_order]
Y_comb = np.hstack(Y)
# Data preprocessing
DPP = FeatTypeSplit(feat_type=categ_feat)
X_comb = sparse.csc_matrix(X_comb) if sparse_input else X_comb
Y_comb_out_1 = DPP.fit_transform(X_comb)
# Check if Y_comb_out is what we expect it to be:
self.assertEqual(sparse_input, sparse.issparse(Y_comb_out_1))
Y_comb_out_1 = Y_comb_out_1.todense() if sparse_input else Y_comb_out_1
np.testing.assert_array_almost_equal(Y_comb_out_1, Y_comb)
# Do it again, but using the already fitted pipeline
Y_comb_out_2 = DPP.transform(X_comb)
# Consistency check
self.assertEqual(sparse_input, sparse.issparse(Y_comb_out_2))
Y_comb_out_2 = Y_comb_out_2.todense() if sparse_input else Y_comb_out_2
np.testing.assert_array_equal(Y_comb_out_1, Y_comb_out_2)
def test_fit_transform(self):
self.do_a_fit_transform(sparse_input=False)
def test_fit_transform_sparse(self):
self.do_a_fit_transform(sparse_input=True)
def test_string_categories(self):
# Numerical dataset (as used in NumericalPreprocessingPipelineTest)
X_num = np.array(
[
[3.14, 1.0, 1.0], # noqa : matrix legibility
[3.14, 2.0, np.nan], # noqa : matrix legibility
[3.14, 3.0, 3.0],
]
) # noqa : matrix legibility
# Categorical string dataset
X_cat = np.array(
[
["red", "medium", "small"],
["blue", "short", "big"],
["white", "tall", np.nan],
]
)
# Combined dataset with shuffled columns:
X_comb = np.hstack((X_num, X_cat))
categ_feat = [False] * 3 + [True] * 3
random_order = np.random.choice(np.arange(6), size=6, replace=False)
X_comb = X_comb[:, random_order]
categ_feat = [categ_feat[order] for order in random_order]
# Strings are not allowed, therefore:
with self.assertRaises(ValueError):
categ_feat = {
i: "categorical" if feat else "numerical"
for i, feat in enumerate(categ_feat)
}
FeatTypeSplit(feat_type=categ_feat).fit_transform(X_comb)
| PreprocessingPipelineTest |
python | numpy__numpy | numpy/f2py/tests/test_crackfortran.py | {
"start": 235,
"end": 771
} | class ____(util.F2PyTest):
# issue gh-15035: add handling for endsubroutine, endfunction with no space
# between "end" and the block name
sources = [util.getpath("tests", "src", "crackfortran", "gh15035.f")]
def test_module(self):
k = np.array([1, 2, 3], dtype=np.float64)
w = np.array([1, 2, 3], dtype=np.float64)
self.module.subb(k)
assert np.allclose(k, w + 1)
self.module.subc([w, k])
assert np.allclose(k, w + 1)
assert self.module.t0("23") == b"2"
| TestNoSpace |
python | django__django | tests/model_formsets/tests.py | {
"start": 5416,
"end": 86029
} | class ____(TestCase):
def test_modelformset_factory_without_fields(self):
"""Regression for #19733"""
message = (
"Calling modelformset_factory without defining 'fields' or 'exclude' "
"explicitly is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
modelformset_factory(Author)
def test_simple_save(self):
qs = Author.objects.all()
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label>'
'<input id="id_form-0-name" type="text" name="form-0-name" maxlength="100">'
'<input type="hidden" name="form-0-id" id="id_form-0-id"></p>',
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label>'
'<input id="id_form-1-name" type="text" name="form-1-name" maxlength="100">'
'<input type="hidden" name="form-1-id" id="id_form-1-id"></p>',
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label>'
'<input id="id_form-2-name" type="text" name="form-2-name" maxlength="100">'
'<input type="hidden" name="form-2-id" id="id_form-2-id"></p>',
)
data = {
"form-TOTAL_FORMS": "3", # the number of forms rendered
"form-INITIAL_FORMS": "0", # the number of forms with initial data
"form-MAX_NUM_FORMS": "", # the max number of forms
"form-0-name": "Charles Baudelaire",
"form-1-name": "Arthur Rimbaud",
"form-2-name": "",
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
author1, author2 = saved
self.assertEqual(author1, Author.objects.get(name="Charles Baudelaire"))
self.assertEqual(author2, Author.objects.get(name="Arthur Rimbaud"))
authors = list(Author.objects.order_by("name"))
self.assertEqual(authors, [author2, author1])
# Gah! We forgot Paul Verlaine. Let's create a formset to edit the
# existing authors with an extra form to add him. We *could* pass in a
# queryset to restrict the Author objects we edit, but in this case
# we'll use it to display them in alphabetical order by name.
qs = Author.objects.order_by("name")
AuthorFormSet = modelformset_factory(
Author, fields="__all__", extra=1, can_delete=False
)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label>'
'<input id="id_form-0-name" type="text" name="form-0-name" '
'value="Arthur Rimbaud" maxlength="100">'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id"></p>'
% author2.id,
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label>'
'<input id="id_form-1-name" type="text" name="form-1-name" '
'value="Charles Baudelaire" maxlength="100">'
'<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id"></p>'
% author1.id,
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label>'
'<input id="id_form-2-name" type="text" name="form-2-name" maxlength="100">'
'<input type="hidden" name="form-2-id" id="id_form-2-id"></p>',
)
data = {
"form-TOTAL_FORMS": "3", # the number of forms rendered
"form-INITIAL_FORMS": "2", # the number of forms with initial data
"form-MAX_NUM_FORMS": "", # the max number of forms
"form-0-id": str(author2.id),
"form-0-name": "Arthur Rimbaud",
"form-1-id": str(author1.id),
"form-1-name": "Charles Baudelaire",
"form-2-name": "Paul Verlaine",
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# Only changed or new objects are returned from formset.save()
saved = formset.save()
self.assertEqual(len(saved), 1)
author3 = saved[0]
self.assertEqual(author3, Author.objects.get(name="Paul Verlaine"))
authors = list(Author.objects.order_by("name"))
self.assertEqual(authors, [author2, author1, author3])
# This probably shouldn't happen, but it will. If an add form was
# marked for deletion, make sure we don't save that form.
qs = Author.objects.order_by("name")
AuthorFormSet = modelformset_factory(
Author, fields="__all__", extra=1, can_delete=True
)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label>'
'<input id="id_form-0-name" type="text" name="form-0-name" '
'value="Arthur Rimbaud" maxlength="100"></p>'
'<p><label for="id_form-0-DELETE">Delete:</label>'
'<input type="checkbox" name="form-0-DELETE" id="id_form-0-DELETE">'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id"></p>'
% author2.id,
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label>'
'<input id="id_form-1-name" type="text" name="form-1-name" '
'value="Charles Baudelaire" maxlength="100"></p>'
'<p><label for="id_form-1-DELETE">Delete:</label>'
'<input type="checkbox" name="form-1-DELETE" id="id_form-1-DELETE">'
'<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id"></p>'
% author1.id,
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label>'
'<input id="id_form-2-name" type="text" name="form-2-name" '
'value="Paul Verlaine" maxlength="100"></p>'
'<p><label for="id_form-2-DELETE">Delete:</label>'
'<input type="checkbox" name="form-2-DELETE" id="id_form-2-DELETE">'
'<input type="hidden" name="form-2-id" value="%d" id="id_form-2-id"></p>'
% author3.id,
)
self.assertHTMLEqual(
formset.forms[3].as_p(),
'<p><label for="id_form-3-name">Name:</label>'
'<input id="id_form-3-name" type="text" name="form-3-name" maxlength="100">'
'</p><p><label for="id_form-3-DELETE">Delete:</label>'
'<input type="checkbox" name="form-3-DELETE" id="id_form-3-DELETE">'
'<input type="hidden" name="form-3-id" id="id_form-3-id"></p>',
)
data = {
"form-TOTAL_FORMS": "4", # the number of forms rendered
"form-INITIAL_FORMS": "3", # the number of forms with initial data
"form-MAX_NUM_FORMS": "", # the max number of forms
"form-0-id": str(author2.id),
"form-0-name": "Arthur Rimbaud",
"form-1-id": str(author1.id),
"form-1-name": "Charles Baudelaire",
"form-2-id": str(author3.id),
"form-2-name": "Paul Verlaine",
"form-3-name": "Walt Whitman",
"form-3-DELETE": "on",
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# No objects were changed or saved so nothing will come back.
self.assertEqual(formset.save(), [])
authors = list(Author.objects.order_by("name"))
self.assertEqual(authors, [author2, author1, author3])
# Let's edit a record to ensure save only returns that one record.
data = {
"form-TOTAL_FORMS": "4", # the number of forms rendered
"form-INITIAL_FORMS": "3", # the number of forms with initial data
"form-MAX_NUM_FORMS": "", # the max number of forms
"form-0-id": str(author2.id),
"form-0-name": "Walt Whitman",
"form-1-id": str(author1.id),
"form-1-name": "Charles Baudelaire",
"form-2-id": str(author3.id),
"form-2-name": "Paul Verlaine",
"form-3-name": "",
"form-3-DELETE": "",
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# One record has changed.
saved = formset.save()
self.assertEqual(len(saved), 1)
self.assertEqual(saved[0], Author.objects.get(name="Walt Whitman"))
def test_commit_false(self):
# Test the behavior of commit=False and save_m2m
author1 = Author.objects.create(name="Charles Baudelaire")
author2 = Author.objects.create(name="Paul Verlaine")
author3 = Author.objects.create(name="Walt Whitman")
meeting = AuthorMeeting.objects.create(created=date.today())
meeting.authors.set(Author.objects.all())
# create an Author instance to add to the meeting.
author4 = Author.objects.create(name="John Steinbeck")
AuthorMeetingFormSet = modelformset_factory(
AuthorMeeting, fields="__all__", extra=1, can_delete=True
)
data = {
"form-TOTAL_FORMS": "2", # the number of forms rendered
"form-INITIAL_FORMS": "1", # the number of forms with initial data
"form-MAX_NUM_FORMS": "", # the max number of forms
"form-0-id": str(meeting.id),
"form-0-name": "2nd Tuesday of the Week Meeting",
"form-0-authors": [author2.id, author1.id, author3.id, author4.id],
"form-1-name": "",
"form-1-authors": "",
"form-1-DELETE": "",
}
formset = AuthorMeetingFormSet(data=data, queryset=AuthorMeeting.objects.all())
self.assertTrue(formset.is_valid())
instances = formset.save(commit=False)
for instance in instances:
instance.created = date.today()
instance.save()
formset.save_m2m()
self.assertSequenceEqual(
instances[0].authors.all(),
[author1, author4, author2, author3],
)
def test_max_num(self):
# Test the behavior of max_num with model formsets. It should allow
# all existing related objects/inlines for a given object to be
# displayed, but not allow the creation of new inlines beyond max_num.
a1 = Author.objects.create(name="Charles Baudelaire")
a2 = Author.objects.create(name="Paul Verlaine")
a3 = Author.objects.create(name="Walt Whitman")
qs = Author.objects.order_by("name")
AuthorFormSet = modelformset_factory(
Author, fields="__all__", max_num=None, extra=3
)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 6)
self.assertEqual(len(formset.extra_forms), 3)
AuthorFormSet = modelformset_factory(
Author, fields="__all__", max_num=4, extra=3
)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 4)
self.assertEqual(len(formset.extra_forms), 1)
AuthorFormSet = modelformset_factory(
Author, fields="__all__", max_num=0, extra=3
)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertEqual(len(formset.extra_forms), 0)
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=None)
formset = AuthorFormSet(queryset=qs)
self.assertSequenceEqual(formset.get_queryset(), [a1, a2, a3])
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=0)
formset = AuthorFormSet(queryset=qs)
self.assertSequenceEqual(formset.get_queryset(), [a1, a2, a3])
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=4)
formset = AuthorFormSet(queryset=qs)
self.assertSequenceEqual(formset.get_queryset(), [a1, a2, a3])
def test_min_num(self):
# Test the behavior of min_num with model formsets. It should be
# added to extra.
qs = Author.objects.none()
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=0)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 0)
AuthorFormSet = modelformset_factory(
Author, fields="__all__", min_num=1, extra=0
)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 1)
AuthorFormSet = modelformset_factory(
Author, fields="__all__", min_num=1, extra=1
)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 2)
def test_min_num_with_existing(self):
# Test the behavior of min_num with existing objects.
Author.objects.create(name="Charles Baudelaire")
qs = Author.objects.all()
AuthorFormSet = modelformset_factory(
Author, fields="__all__", extra=0, min_num=1
)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 1)
def test_custom_save_method(self):
class PoetForm(forms.ModelForm):
def save(self, commit=True):
# change the name to "Vladimir Mayakovsky" just to be a jerk.
author = super().save(commit=False)
author.name = "Vladimir Mayakovsky"
if commit:
author.save()
return author
PoetFormSet = modelformset_factory(Poet, fields="__all__", form=PoetForm)
data = {
"form-TOTAL_FORMS": "3", # the number of forms rendered
"form-INITIAL_FORMS": "0", # the number of forms with initial data
"form-MAX_NUM_FORMS": "", # the max number of forms
"form-0-name": "Walt Whitman",
"form-1-name": "Charles Baudelaire",
"form-2-name": "",
}
qs = Poet.objects.all()
formset = PoetFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
poets = formset.save()
self.assertEqual(len(poets), 2)
poet1, poet2 = poets
self.assertEqual(poet1.name, "Vladimir Mayakovsky")
self.assertEqual(poet2.name, "Vladimir Mayakovsky")
def test_custom_form(self):
"""
model_formset_factory() respects fields and exclude parameters of a
custom form.
"""
class PostForm1(forms.ModelForm):
class Meta:
model = Post
fields = ("title", "posted")
class PostForm2(forms.ModelForm):
class Meta:
model = Post
exclude = ("subtitle",)
PostFormSet = modelformset_factory(Post, form=PostForm1)
formset = PostFormSet()
self.assertNotIn("subtitle", formset.forms[0].fields)
PostFormSet = modelformset_factory(Post, form=PostForm2)
formset = PostFormSet()
self.assertNotIn("subtitle", formset.forms[0].fields)
def test_custom_queryset_init(self):
"""
A queryset can be overridden in the formset's __init__() method.
"""
Author.objects.create(name="Charles Baudelaire")
Author.objects.create(name="Paul Verlaine")
class BaseAuthorFormSet(BaseModelFormSet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.queryset = Author.objects.filter(name__startswith="Charles")
AuthorFormSet = modelformset_factory(
Author, fields="__all__", formset=BaseAuthorFormSet
)
formset = AuthorFormSet()
self.assertEqual(len(formset.get_queryset()), 1)
def test_model_inheritance(self):
BetterAuthorFormSet = modelformset_factory(BetterAuthor, fields="__all__")
formset = BetterAuthorFormSet()
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label>'
'<input id="id_form-0-name" type="text" name="form-0-name" maxlength="100">'
'</p><p><label for="id_form-0-write_speed">Write speed:</label>'
'<input type="number" name="form-0-write_speed" id="id_form-0-write_speed">'
'<input type="hidden" name="form-0-author_ptr" id="id_form-0-author_ptr">'
"</p>",
)
data = {
"form-TOTAL_FORMS": "1", # the number of forms rendered
"form-INITIAL_FORMS": "0", # the number of forms with initial data
"form-MAX_NUM_FORMS": "", # the max number of forms
"form-0-author_ptr": "",
"form-0-name": "Ernest Hemingway",
"form-0-write_speed": "10",
}
formset = BetterAuthorFormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
(author1,) = saved
self.assertEqual(author1, BetterAuthor.objects.get(name="Ernest Hemingway"))
hemingway_id = BetterAuthor.objects.get(name="Ernest Hemingway").pk
formset = BetterAuthorFormSet()
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label>'
'<input id="id_form-0-name" type="text" name="form-0-name" '
'value="Ernest Hemingway" maxlength="100"></p>'
'<p><label for="id_form-0-write_speed">Write speed:</label>'
'<input type="number" name="form-0-write_speed" value="10" '
'id="id_form-0-write_speed">'
'<input type="hidden" name="form-0-author_ptr" value="%d" '
'id="id_form-0-author_ptr"></p>' % hemingway_id,
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label>'
'<input id="id_form-1-name" type="text" name="form-1-name" maxlength="100">'
'</p><p><label for="id_form-1-write_speed">Write speed:</label>'
'<input type="number" name="form-1-write_speed" id="id_form-1-write_speed">'
'<input type="hidden" name="form-1-author_ptr" id="id_form-1-author_ptr">'
"</p>",
)
data = {
"form-TOTAL_FORMS": "2", # the number of forms rendered
"form-INITIAL_FORMS": "1", # the number of forms with initial data
"form-MAX_NUM_FORMS": "", # the max number of forms
"form-0-author_ptr": hemingway_id,
"form-0-name": "Ernest Hemingway",
"form-0-write_speed": "10",
"form-1-author_ptr": "",
"form-1-name": "",
"form-1-write_speed": "",
}
formset = BetterAuthorFormSet(data)
self.assertTrue(formset.is_valid())
self.assertEqual(formset.save(), [])
def test_inline_formsets(self):
# We can also create a formset that is tied to a parent model. This is
# how the admin system's edit inline functionality works.
AuthorBooksFormSet = inlineformset_factory(
Author, Book, can_delete=False, extra=3, fields="__all__"
)
author = Author.objects.create(name="Charles Baudelaire")
formset = AuthorBooksFormSet(instance=author)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label>'
'<input id="id_book_set-0-title" type="text" name="book_set-0-title" '
'maxlength="100">'
'<input type="hidden" name="book_set-0-author" value="%d" '
'id="id_book_set-0-author">'
'<input type="hidden" name="book_set-0-id" id="id_book_set-0-id">'
"</p>" % author.id,
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label>'
'<input id="id_book_set-1-title" type="text" name="book_set-1-title" '
'maxlength="100">'
'<input type="hidden" name="book_set-1-author" value="%d" '
'id="id_book_set-1-author">'
'<input type="hidden" name="book_set-1-id" id="id_book_set-1-id"></p>'
% author.id,
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label>'
'<input id="id_book_set-2-title" type="text" name="book_set-2-title" '
'maxlength="100">'
'<input type="hidden" name="book_set-2-author" value="%d" '
'id="id_book_set-2-author">'
'<input type="hidden" name="book_set-2-id" id="id_book_set-2-id"></p>'
% author.id,
)
data = {
"book_set-TOTAL_FORMS": "3", # the number of forms rendered
"book_set-INITIAL_FORMS": "0", # the number of forms with initial data
"book_set-MAX_NUM_FORMS": "", # the max number of forms
"book_set-0-title": "Les Fleurs du Mal",
"book_set-1-title": "",
"book_set-2-title": "",
}
formset = AuthorBooksFormSet(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
(book1,) = saved
self.assertEqual(book1, Book.objects.get(title="Les Fleurs du Mal"))
self.assertSequenceEqual(author.book_set.all(), [book1])
# Now that we've added a book to Charles Baudelaire, let's try adding
# another one. This time though, an edit form will be available for
# every existing book.
AuthorBooksFormSet = inlineformset_factory(
Author, Book, can_delete=False, extra=2, fields="__all__"
)
author = Author.objects.get(name="Charles Baudelaire")
formset = AuthorBooksFormSet(instance=author)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label>'
'<input id="id_book_set-0-title" type="text" name="book_set-0-title" '
'value="Les Fleurs du Mal" maxlength="100">'
'<input type="hidden" name="book_set-0-author" value="%d" '
'id="id_book_set-0-author">'
'<input type="hidden" name="book_set-0-id" value="%d" '
'id="id_book_set-0-id"></p>'
% (
author.id,
book1.id,
),
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label>'
'<input id="id_book_set-1-title" type="text" name="book_set-1-title" '
'maxlength="100">'
'<input type="hidden" name="book_set-1-author" value="%d" '
'id="id_book_set-1-author">'
'<input type="hidden" name="book_set-1-id" id="id_book_set-1-id"></p>'
% author.id,
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label>'
'<input id="id_book_set-2-title" type="text" name="book_set-2-title" '
'maxlength="100">'
'<input type="hidden" name="book_set-2-author" value="%d" '
'id="id_book_set-2-author">'
'<input type="hidden" name="book_set-2-id" id="id_book_set-2-id"></p>'
% author.id,
)
data = {
"book_set-TOTAL_FORMS": "3", # the number of forms rendered
"book_set-INITIAL_FORMS": "1", # the number of forms with initial data
"book_set-MAX_NUM_FORMS": "", # the max number of forms
"book_set-0-id": str(book1.id),
"book_set-0-title": "Les Fleurs du Mal",
"book_set-1-title": "Les Paradis Artificiels",
"book_set-2-title": "",
}
formset = AuthorBooksFormSet(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
(book2,) = saved
self.assertEqual(book2, Book.objects.get(title="Les Paradis Artificiels"))
# As you can see, 'Les Paradis Artificiels' is now a book belonging to
# Charles Baudelaire.
self.assertSequenceEqual(author.book_set.order_by("title"), [book1, book2])
def test_inline_formsets_save_as_new(self):
# The save_as_new parameter lets you re-associate the data to a new
# instance. This is used in the admin for save_as functionality.
AuthorBooksFormSet = inlineformset_factory(
Author, Book, can_delete=False, extra=2, fields="__all__"
)
Author.objects.create(name="Charles Baudelaire")
# An immutable QueryDict simulates request.POST.
data = QueryDict(mutable=True)
data.update(
{
"book_set-TOTAL_FORMS": "3", # the number of forms rendered
"book_set-INITIAL_FORMS": "2", # the number of forms with initial data
"book_set-MAX_NUM_FORMS": "", # the max number of forms
"book_set-0-id": "1",
"book_set-0-title": "Les Fleurs du Mal",
"book_set-1-id": "2",
"book_set-1-title": "Les Paradis Artificiels",
"book_set-2-title": "",
}
)
data._mutable = False
formset = AuthorBooksFormSet(data, instance=Author(), save_as_new=True)
self.assertTrue(formset.is_valid())
self.assertIs(data._mutable, False)
new_author = Author.objects.create(name="Charles Baudelaire")
formset = AuthorBooksFormSet(data, instance=new_author, save_as_new=True)
saved = formset.save()
self.assertEqual(len(saved), 2)
book1, book2 = saved
self.assertEqual(book1.title, "Les Fleurs du Mal")
self.assertEqual(book2.title, "Les Paradis Artificiels")
# Test using a custom prefix on an inline formset.
formset = AuthorBooksFormSet(prefix="test")
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_test-0-title">Title:</label>'
'<input id="id_test-0-title" type="text" name="test-0-title" '
'maxlength="100">'
'<input type="hidden" name="test-0-author" id="id_test-0-author">'
'<input type="hidden" name="test-0-id" id="id_test-0-id"></p>',
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_test-1-title">Title:</label>'
'<input id="id_test-1-title" type="text" name="test-1-title" '
'maxlength="100">'
'<input type="hidden" name="test-1-author" id="id_test-1-author">'
'<input type="hidden" name="test-1-id" id="id_test-1-id"></p>',
)
def test_inline_formsets_with_custom_pk(self):
# Test inline formsets where the inline-edited object has a custom
# primary key that is not the fk to the parent object.
self.maxDiff = 1024
AuthorBooksFormSet2 = inlineformset_factory(
Author, BookWithCustomPK, can_delete=False, extra=1, fields="__all__"
)
author = Author.objects.create(pk=1, name="Charles Baudelaire")
formset = AuthorBooksFormSet2(instance=author)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_bookwithcustompk_set-0-my_pk">My pk:</label>'
'<input id="id_bookwithcustompk_set-0-my_pk" type="number" '
'name="bookwithcustompk_set-0-my_pk" step="1"></p>'
'<p><label for="id_bookwithcustompk_set-0-title">Title:</label>'
'<input id="id_bookwithcustompk_set-0-title" type="text" '
'name="bookwithcustompk_set-0-title" maxlength="100">'
'<input type="hidden" name="bookwithcustompk_set-0-author" '
'value="1" id="id_bookwithcustompk_set-0-author"></p>',
)
data = {
# The number of forms rendered.
"bookwithcustompk_set-TOTAL_FORMS": "1",
# The number of forms with initial data.
"bookwithcustompk_set-INITIAL_FORMS": "0",
# The max number of forms.
"bookwithcustompk_set-MAX_NUM_FORMS": "",
"bookwithcustompk_set-0-my_pk": "77777",
"bookwithcustompk_set-0-title": "Les Fleurs du Mal",
}
formset = AuthorBooksFormSet2(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
(book1,) = saved
self.assertEqual(book1.pk, 77777)
book1 = author.bookwithcustompk_set.get()
self.assertEqual(book1.title, "Les Fleurs du Mal")
def test_inline_formsets_with_multi_table_inheritance(self):
# Test inline formsets where the inline-edited object uses multi-table
# inheritance, thus has a non AutoField yet auto-created primary key.
AuthorBooksFormSet3 = inlineformset_factory(
Author, AlternateBook, can_delete=False, extra=1, fields="__all__"
)
author = Author.objects.create(pk=1, name="Charles Baudelaire")
formset = AuthorBooksFormSet3(instance=author)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_alternatebook_set-0-title">Title:</label>'
'<input id="id_alternatebook_set-0-title" type="text" '
'name="alternatebook_set-0-title" maxlength="100"></p>'
'<p><label for="id_alternatebook_set-0-notes">Notes:</label>'
'<input id="id_alternatebook_set-0-notes" type="text" '
'name="alternatebook_set-0-notes" maxlength="100">'
'<input type="hidden" name="alternatebook_set-0-author" value="1" '
'id="id_alternatebook_set-0-author">'
'<input type="hidden" name="alternatebook_set-0-book_ptr" '
'id="id_alternatebook_set-0-book_ptr"></p>',
)
data = {
# The number of forms rendered.
"alternatebook_set-TOTAL_FORMS": "1",
# The number of forms with initial data.
"alternatebook_set-INITIAL_FORMS": "0",
# The max number of forms.
"alternatebook_set-MAX_NUM_FORMS": "",
"alternatebook_set-0-title": "Flowers of Evil",
"alternatebook_set-0-notes": "English translation of Les Fleurs du Mal",
}
formset = AuthorBooksFormSet3(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
(book1,) = saved
self.assertEqual(book1.title, "Flowers of Evil")
self.assertEqual(book1.notes, "English translation of Les Fleurs du Mal")
@skipUnlessDBFeature("supports_partially_nullable_unique_constraints")
def test_inline_formsets_with_nullable_unique_together(self):
# Test inline formsets where the inline-edited object has a
# unique_together constraint with a nullable member
AuthorBooksFormSet4 = inlineformset_factory(
Author,
BookWithOptionalAltEditor,
can_delete=False,
extra=2,
fields="__all__",
)
author = Author.objects.create(pk=1, name="Charles Baudelaire")
data = {
# The number of forms rendered.
"bookwithoptionalalteditor_set-TOTAL_FORMS": "2",
# The number of forms with initial data.
"bookwithoptionalalteditor_set-INITIAL_FORMS": "0",
# The max number of forms.
"bookwithoptionalalteditor_set-MAX_NUM_FORMS": "",
"bookwithoptionalalteditor_set-0-author": "1",
"bookwithoptionalalteditor_set-0-title": "Les Fleurs du Mal",
"bookwithoptionalalteditor_set-1-author": "1",
"bookwithoptionalalteditor_set-1-title": "Les Fleurs du Mal",
}
formset = AuthorBooksFormSet4(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
book1, book2 = saved
self.assertEqual(book1.author_id, 1)
self.assertEqual(book1.title, "Les Fleurs du Mal")
self.assertEqual(book2.author_id, 1)
self.assertEqual(book2.title, "Les Fleurs du Mal")
def test_inline_formsets_with_custom_save_method(self):
AuthorBooksFormSet = inlineformset_factory(
Author, Book, can_delete=False, extra=2, fields="__all__"
)
author = Author.objects.create(pk=1, name="Charles Baudelaire")
book1 = Book.objects.create(
pk=1, author=author, title="Les Paradis Artificiels"
)
book2 = Book.objects.create(pk=2, author=author, title="Les Fleurs du Mal")
book3 = Book.objects.create(pk=3, author=author, title="Flowers of Evil")
class PoemForm(forms.ModelForm):
def save(self, commit=True):
# change the name to "Brooklyn Bridge" just to be a jerk.
poem = super().save(commit=False)
poem.name = "Brooklyn Bridge"
if commit:
poem.save()
return poem
PoemFormSet = inlineformset_factory(Poet, Poem, form=PoemForm, fields="__all__")
data = {
"poem_set-TOTAL_FORMS": "3", # the number of forms rendered
"poem_set-INITIAL_FORMS": "0", # the number of forms with initial data
"poem_set-MAX_NUM_FORMS": "", # the max number of forms
"poem_set-0-name": "The Cloud in Trousers",
"poem_set-1-name": "I",
"poem_set-2-name": "",
}
poet = Poet.objects.create(name="Vladimir Mayakovsky")
formset = PoemFormSet(data=data, instance=poet)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
poem1, poem2 = saved
self.assertEqual(poem1.name, "Brooklyn Bridge")
self.assertEqual(poem2.name, "Brooklyn Bridge")
# We can provide a custom queryset to our InlineFormSet:
custom_qs = Book.objects.order_by("-title")
formset = AuthorBooksFormSet(instance=author, queryset=custom_qs)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label>'
'<input id="id_book_set-0-title" type="text" name="book_set-0-title" '
'value="Les Paradis Artificiels" maxlength="100">'
'<input type="hidden" name="book_set-0-author" value="1" '
'id="id_book_set-0-author">'
'<input type="hidden" name="book_set-0-id" value="1" id="id_book_set-0-id">'
"</p>",
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label>'
'<input id="id_book_set-1-title" type="text" name="book_set-1-title" '
'value="Les Fleurs du Mal" maxlength="100">'
'<input type="hidden" name="book_set-1-author" value="1" '
'id="id_book_set-1-author">'
'<input type="hidden" name="book_set-1-id" value="2" id="id_book_set-1-id">'
"</p>",
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label>'
'<input id="id_book_set-2-title" type="text" name="book_set-2-title" '
'value="Flowers of Evil" maxlength="100">'
'<input type="hidden" name="book_set-2-author" value="1" '
'id="id_book_set-2-author">'
'<input type="hidden" name="book_set-2-id" value="3" '
'id="id_book_set-2-id"></p>',
)
self.assertHTMLEqual(
formset.forms[3].as_p(),
'<p><label for="id_book_set-3-title">Title:</label>'
'<input id="id_book_set-3-title" type="text" name="book_set-3-title" '
'maxlength="100">'
'<input type="hidden" name="book_set-3-author" value="1" '
'id="id_book_set-3-author">'
'<input type="hidden" name="book_set-3-id" id="id_book_set-3-id"></p>',
)
self.assertHTMLEqual(
formset.forms[4].as_p(),
'<p><label for="id_book_set-4-title">Title:</label>'
'<input id="id_book_set-4-title" type="text" name="book_set-4-title" '
'maxlength="100">'
'<input type="hidden" name="book_set-4-author" value="1" '
'id="id_book_set-4-author">'
'<input type="hidden" name="book_set-4-id" id="id_book_set-4-id"></p>',
)
data = {
"book_set-TOTAL_FORMS": "5", # the number of forms rendered
"book_set-INITIAL_FORMS": "3", # the number of forms with initial data
"book_set-MAX_NUM_FORMS": "", # the max number of forms
"book_set-0-id": str(book1.id),
"book_set-0-title": "Les Paradis Artificiels",
"book_set-1-id": str(book2.id),
"book_set-1-title": "Les Fleurs du Mal",
"book_set-2-id": str(book3.id),
"book_set-2-title": "Flowers of Evil",
"book_set-3-title": "Revue des deux mondes",
"book_set-4-title": "",
}
formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs)
self.assertTrue(formset.is_valid())
custom_qs = Book.objects.filter(title__startswith="F")
formset = AuthorBooksFormSet(instance=author, queryset=custom_qs)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label>'
'<input id="id_book_set-0-title" type="text" name="book_set-0-title" '
'value="Flowers of Evil" maxlength="100">'
'<input type="hidden" name="book_set-0-author" value="1" '
'id="id_book_set-0-author">'
'<input type="hidden" name="book_set-0-id" value="3" '
'id="id_book_set-0-id"></p>',
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label>'
'<input id="id_book_set-1-title" type="text" name="book_set-1-title" '
'maxlength="100">'
'<input type="hidden" name="book_set-1-author" value="1" '
'id="id_book_set-1-author">'
'<input type="hidden" name="book_set-1-id" id="id_book_set-1-id"></p>',
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label>'
'<input id="id_book_set-2-title" type="text" name="book_set-2-title" '
'maxlength="100">'
'<input type="hidden" name="book_set-2-author" value="1" '
'id="id_book_set-2-author">'
'<input type="hidden" name="book_set-2-id" id="id_book_set-2-id"></p>',
)
data = {
"book_set-TOTAL_FORMS": "3", # the number of forms rendered
"book_set-INITIAL_FORMS": "1", # the number of forms with initial data
"book_set-MAX_NUM_FORMS": "", # the max number of forms
"book_set-0-id": str(book3.id),
"book_set-0-title": "Flowers of Evil",
"book_set-1-title": "Revue des deux mondes",
"book_set-2-title": "",
}
formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs)
self.assertTrue(formset.is_valid())
def test_inline_formsets_with_custom_save_method_related_instance(self):
"""
The ModelForm.save() method should be able to access the related object
if it exists in the database (#24395).
"""
class PoemForm2(forms.ModelForm):
def save(self, commit=True):
poem = super().save(commit=False)
poem.name = "%s by %s" % (poem.name, poem.poet.name)
if commit:
poem.save()
return poem
PoemFormSet = inlineformset_factory(
Poet, Poem, form=PoemForm2, fields="__all__"
)
data = {
"poem_set-TOTAL_FORMS": "1",
"poem_set-INITIAL_FORMS": "0",
"poem_set-MAX_NUM_FORMS": "",
"poem_set-0-name": "Le Lac",
}
poet = Poet()
formset = PoemFormSet(data=data, instance=poet)
self.assertTrue(formset.is_valid())
# The Poet instance is saved after the formset instantiation. This
# happens in admin's changeform_view() when adding a new object and
# some inlines in the same request.
poet.name = "Lamartine"
poet.save()
poem = formset.save()[0]
self.assertEqual(poem.name, "Le Lac by Lamartine")
def test_inline_formsets_with_wrong_fk_name(self):
"""Regression for #23451"""
message = "fk_name 'title' is not a ForeignKey to 'model_formsets.Author'."
with self.assertRaisesMessage(ValueError, message):
inlineformset_factory(Author, Book, fields="__all__", fk_name="title")
def test_custom_pk(self):
# We need to ensure that it is displayed
CustomPrimaryKeyFormSet = modelformset_factory(
CustomPrimaryKey, fields="__all__"
)
formset = CustomPrimaryKeyFormSet()
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_form-0-my_pk">My pk:</label>'
'<input id="id_form-0-my_pk" type="text" name="form-0-my_pk" '
'maxlength="10"></p>'
'<p><label for="id_form-0-some_field">Some field:</label>'
'<input id="id_form-0-some_field" type="text" name="form-0-some_field" '
'maxlength="100"></p>',
)
# Custom primary keys with ForeignKey, OneToOneField and AutoField.
place = Place.objects.create(pk=1, name="Giordanos", city="Chicago")
FormSet = inlineformset_factory(
Place, Owner, extra=2, can_delete=False, fields="__all__"
)
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_owner_set-0-name">Name:</label>'
'<input id="id_owner_set-0-name" type="text" name="owner_set-0-name" '
'maxlength="100">'
'<input type="hidden" name="owner_set-0-place" value="1" '
'id="id_owner_set-0-place">'
'<input type="hidden" name="owner_set-0-auto_id" '
'id="id_owner_set-0-auto_id"></p>',
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_owner_set-1-name">Name:</label>'
'<input id="id_owner_set-1-name" type="text" name="owner_set-1-name" '
'maxlength="100">'
'<input type="hidden" name="owner_set-1-place" value="1" '
'id="id_owner_set-1-place">'
'<input type="hidden" name="owner_set-1-auto_id" '
'id="id_owner_set-1-auto_id"></p>',
)
data = {
"owner_set-TOTAL_FORMS": "2",
"owner_set-INITIAL_FORMS": "0",
"owner_set-MAX_NUM_FORMS": "",
"owner_set-0-auto_id": "",
"owner_set-0-name": "Joe Perry",
"owner_set-1-auto_id": "",
"owner_set-1-name": "",
}
formset = FormSet(data, instance=place)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
(owner1,) = saved
self.assertEqual(owner1.name, "Joe Perry")
self.assertEqual(owner1.place.name, "Giordanos")
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_owner_set-0-name">Name:</label>'
'<input id="id_owner_set-0-name" type="text" name="owner_set-0-name" '
'value="Joe Perry" maxlength="100">'
'<input type="hidden" name="owner_set-0-place" value="1" '
'id="id_owner_set-0-place">'
'<input type="hidden" name="owner_set-0-auto_id" value="%d" '
'id="id_owner_set-0-auto_id"></p>' % owner1.auto_id,
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_owner_set-1-name">Name:</label>'
'<input id="id_owner_set-1-name" type="text" name="owner_set-1-name" '
'maxlength="100">'
'<input type="hidden" name="owner_set-1-place" value="1" '
'id="id_owner_set-1-place">'
'<input type="hidden" name="owner_set-1-auto_id" '
'id="id_owner_set-1-auto_id"></p>',
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_owner_set-2-name">Name:</label>'
'<input id="id_owner_set-2-name" type="text" name="owner_set-2-name" '
'maxlength="100">'
'<input type="hidden" name="owner_set-2-place" value="1" '
'id="id_owner_set-2-place">'
'<input type="hidden" name="owner_set-2-auto_id" '
'id="id_owner_set-2-auto_id"></p>',
)
data = {
"owner_set-TOTAL_FORMS": "3",
"owner_set-INITIAL_FORMS": "1",
"owner_set-MAX_NUM_FORMS": "",
"owner_set-0-auto_id": str(owner1.auto_id),
"owner_set-0-name": "Joe Perry",
"owner_set-1-auto_id": "",
"owner_set-1-name": "Jack Berry",
"owner_set-2-auto_id": "",
"owner_set-2-name": "",
}
formset = FormSet(data, instance=place)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
(owner2,) = saved
self.assertEqual(owner2.name, "Jack Berry")
self.assertEqual(owner2.place.name, "Giordanos")
# A custom primary key that is a ForeignKey or OneToOneField get
# rendered for the user to choose.
FormSet = modelformset_factory(OwnerProfile, fields="__all__")
formset = FormSet()
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_form-0-owner">Owner:</label>'
'<select name="form-0-owner" id="id_form-0-owner">'
'<option value="" selected>---------</option>'
'<option value="%d">Joe Perry at Giordanos</option>'
'<option value="%d">Jack Berry at Giordanos</option>'
"</select></p>"
'<p><label for="id_form-0-age">Age:</label>'
'<input type="number" name="form-0-age" id="id_form-0-age" min="0"></p>'
% (owner1.auto_id, owner2.auto_id),
)
owner1 = Owner.objects.get(name="Joe Perry")
FormSet = inlineformset_factory(
Owner, OwnerProfile, max_num=1, can_delete=False, fields="__all__"
)
self.assertEqual(FormSet.max_num, 1)
formset = FormSet(instance=owner1)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_ownerprofile-0-age">Age:</label>'
'<input type="number" name="ownerprofile-0-age" '
'id="id_ownerprofile-0-age" min="0">'
'<input type="hidden" name="ownerprofile-0-owner" value="%d" '
'id="id_ownerprofile-0-owner"></p>' % owner1.auto_id,
)
data = {
"ownerprofile-TOTAL_FORMS": "1",
"ownerprofile-INITIAL_FORMS": "0",
"ownerprofile-MAX_NUM_FORMS": "1",
"ownerprofile-0-owner": "",
"ownerprofile-0-age": "54",
}
formset = FormSet(data, instance=owner1)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
(profile1,) = saved
self.assertEqual(profile1.owner, owner1)
self.assertEqual(profile1.age, 54)
formset = FormSet(instance=owner1)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_ownerprofile-0-age">Age:</label>'
'<input type="number" name="ownerprofile-0-age" value="54" '
'id="id_ownerprofile-0-age" min="0">'
'<input type="hidden" name="ownerprofile-0-owner" value="%d" '
'id="id_ownerprofile-0-owner"></p>' % owner1.auto_id,
)
data = {
"ownerprofile-TOTAL_FORMS": "1",
"ownerprofile-INITIAL_FORMS": "1",
"ownerprofile-MAX_NUM_FORMS": "1",
"ownerprofile-0-owner": str(owner1.auto_id),
"ownerprofile-0-age": "55",
}
formset = FormSet(data, instance=owner1)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
(profile1,) = saved
self.assertEqual(profile1.owner, owner1)
self.assertEqual(profile1.age, 55)
def test_unique_true_enforces_max_num_one(self):
# ForeignKey with unique=True should enforce max_num=1
place = Place.objects.create(pk=1, name="Giordanos", city="Chicago")
FormSet = inlineformset_factory(
Place, Location, can_delete=False, fields="__all__"
)
self.assertEqual(FormSet.max_num, 1)
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_location_set-0-lat">Lat:</label>'
'<input id="id_location_set-0-lat" type="text" name="location_set-0-lat" '
'maxlength="100"></p>'
'<p><label for="id_location_set-0-lon">Lon:</label>'
'<input id="id_location_set-0-lon" type="text" name="location_set-0-lon" '
'maxlength="100">'
'<input type="hidden" name="location_set-0-place" value="1" '
'id="id_location_set-0-place">'
'<input type="hidden" name="location_set-0-id" '
'id="id_location_set-0-id"></p>',
)
def test_foreign_keys_in_parents(self):
self.assertEqual(type(_get_foreign_key(Restaurant, Owner)), models.ForeignKey)
self.assertEqual(
type(_get_foreign_key(MexicanRestaurant, Owner)), models.ForeignKey
)
def test_unique_validation(self):
FormSet = modelformset_factory(Product, fields="__all__", extra=1)
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "",
"form-0-slug": "car-red",
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
(product1,) = saved
self.assertEqual(product1.slug, "car-red")
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "",
"form-0-slug": "car-red",
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.errors, [{"slug": ["Product with this Slug already exists."]}]
)
def test_modelformset_validate_max_flag(self):
# If validate_max is set and max_num is less than TOTAL_FORMS in the
# data, then throw an exception. MAX_NUM_FORMS in the data is
# irrelevant here (it's output as a hint for the client but its
# value in the returned data is not checked)
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "2", # should be ignored
"form-0-price": "12.00",
"form-0-quantity": "1",
"form-1-price": "24.00",
"form-1-quantity": "2",
}
FormSet = modelformset_factory(
Price, fields="__all__", extra=1, max_num=1, validate_max=True
)
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ["Please submit at most 1 form."])
# Now test the same thing without the validate_max flag to ensure
# default behavior is unchanged
FormSet = modelformset_factory(Price, fields="__all__", extra=1, max_num=1)
formset = FormSet(data)
self.assertTrue(formset.is_valid())
def test_modelformset_min_num_equals_max_num_less_than(self):
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "2",
"form-0-slug": "car-red",
"form-1-slug": "car-blue",
"form-2-slug": "car-black",
}
FormSet = modelformset_factory(
Product,
fields="__all__",
extra=1,
max_num=2,
validate_max=True,
min_num=2,
validate_min=True,
)
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ["Please submit at most 2 forms."])
def test_modelformset_min_num_equals_max_num_more_than(self):
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "2",
"form-0-slug": "car-red",
}
FormSet = modelformset_factory(
Product,
fields="__all__",
extra=1,
max_num=2,
validate_max=True,
min_num=2,
validate_min=True,
)
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ["Please submit at least 2 forms."])
def test_unique_together_validation(self):
FormSet = modelformset_factory(Price, fields="__all__", extra=1)
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "",
"form-0-price": "12.00",
"form-0-quantity": "1",
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
(price1,) = saved
self.assertEqual(price1.price, Decimal("12.00"))
self.assertEqual(price1.quantity, 1)
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "",
"form-0-price": "12.00",
"form-0-quantity": "1",
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.errors,
[{"__all__": ["Price with this Price and Quantity already exists."]}],
)
def test_unique_together_with_inlineformset_factory(self):
# Also see bug #8882.
repository = Repository.objects.create(name="Test Repo")
FormSet = inlineformset_factory(Repository, Revision, extra=1, fields="__all__")
data = {
"revision_set-TOTAL_FORMS": "1",
"revision_set-INITIAL_FORMS": "0",
"revision_set-MAX_NUM_FORMS": "",
"revision_set-0-repository": repository.pk,
"revision_set-0-revision": "146239817507f148d448db38840db7c3cbf47c76",
"revision_set-0-DELETE": "",
}
formset = FormSet(data, instance=repository)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
(revision1,) = saved
self.assertEqual(revision1.repository, repository)
self.assertEqual(revision1.revision, "146239817507f148d448db38840db7c3cbf47c76")
# attempt to save the same revision against the same repo.
data = {
"revision_set-TOTAL_FORMS": "1",
"revision_set-INITIAL_FORMS": "0",
"revision_set-MAX_NUM_FORMS": "",
"revision_set-0-repository": repository.pk,
"revision_set-0-revision": "146239817507f148d448db38840db7c3cbf47c76",
"revision_set-0-DELETE": "",
}
formset = FormSet(data, instance=repository)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.errors,
[
{
"__all__": [
"Revision with this Repository and Revision already exists."
]
}
],
)
# unique_together with inlineformset_factory with overridden form
# fields Also see #9494
FormSet = inlineformset_factory(
Repository, Revision, fields=("revision",), extra=1
)
data = {
"revision_set-TOTAL_FORMS": "1",
"revision_set-INITIAL_FORMS": "0",
"revision_set-MAX_NUM_FORMS": "",
"revision_set-0-repository": repository.pk,
"revision_set-0-revision": "146239817507f148d448db38840db7c3cbf47c76",
"revision_set-0-DELETE": "",
}
formset = FormSet(data, instance=repository)
self.assertFalse(formset.is_valid())
def test_callable_defaults(self):
# Use of callable defaults (see bug #7975).
person = Person.objects.create(name="Ringo")
FormSet = inlineformset_factory(
Person, Membership, can_delete=False, extra=1, fields="__all__"
)
formset = FormSet(instance=person)
# Django will render a hidden field for model fields that have a
# callable default. This is required to ensure the value is tested for
# change correctly when determine what extra forms have changed to
# save.
self.assertEqual(len(formset.forms), 1) # this formset only has one form
form = formset.forms[0]
now = form.fields["date_joined"].initial()
result = form.as_p()
result = re.sub(
r"[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}(?:\.[0-9]+)?",
"__DATETIME__",
result,
)
self.assertHTMLEqual(
result,
'<p><label for="id_membership_set-0-date_joined">Date joined:</label>'
'<input type="text" name="membership_set-0-date_joined" '
'value="__DATETIME__" id="id_membership_set-0-date_joined">'
'<input type="hidden" name="initial-membership_set-0-date_joined" '
'value="__DATETIME__" '
'id="initial-membership_set-0-id_membership_set-0-date_joined"></p>'
'<p><label for="id_membership_set-0-karma">Karma:</label>'
'<input type="number" name="membership_set-0-karma" '
'id="id_membership_set-0-karma">'
'<input type="hidden" name="membership_set-0-person" value="%d" '
'id="id_membership_set-0-person">'
'<input type="hidden" name="membership_set-0-id" '
'id="id_membership_set-0-id"></p>' % person.id,
)
# test for validation with callable defaults. Validations rely on
# hidden fields
data = {
"membership_set-TOTAL_FORMS": "1",
"membership_set-INITIAL_FORMS": "0",
"membership_set-MAX_NUM_FORMS": "",
"membership_set-0-date_joined": now.strftime("%Y-%m-%d %H:%M:%S"),
"initial-membership_set-0-date_joined": now.strftime("%Y-%m-%d %H:%M:%S"),
"membership_set-0-karma": "",
}
formset = FormSet(data, instance=person)
self.assertTrue(formset.is_valid())
# now test for when the data changes
one_day_later = now + datetime.timedelta(days=1)
filled_data = {
"membership_set-TOTAL_FORMS": "1",
"membership_set-INITIAL_FORMS": "0",
"membership_set-MAX_NUM_FORMS": "",
"membership_set-0-date_joined": one_day_later.strftime("%Y-%m-%d %H:%M:%S"),
"initial-membership_set-0-date_joined": now.strftime("%Y-%m-%d %H:%M:%S"),
"membership_set-0-karma": "",
}
formset = FormSet(filled_data, instance=person)
self.assertFalse(formset.is_valid())
# now test with split datetime fields
class MembershipForm(forms.ModelForm):
date_joined = forms.SplitDateTimeField(initial=now)
class Meta:
model = Membership
fields = "__all__"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.fields["date_joined"].widget = forms.SplitDateTimeWidget()
FormSet = inlineformset_factory(
Person,
Membership,
form=MembershipForm,
can_delete=False,
extra=1,
fields="__all__",
)
data = {
"membership_set-TOTAL_FORMS": "1",
"membership_set-INITIAL_FORMS": "0",
"membership_set-MAX_NUM_FORMS": "",
"membership_set-0-date_joined_0": now.strftime("%Y-%m-%d"),
"membership_set-0-date_joined_1": now.strftime("%H:%M:%S"),
"initial-membership_set-0-date_joined": now.strftime("%Y-%m-%d %H:%M:%S"),
"membership_set-0-karma": "",
}
formset = FormSet(data, instance=person)
self.assertTrue(formset.is_valid())
def test_inlineformset_factory_with_null_fk(self):
# inlineformset_factory tests with fk having null=True. see #9462.
# create some data that will exhibit the issue
team = Team.objects.create(name="Red Vipers")
Player(name="Timmy").save()
Player(name="Bobby", team=team).save()
PlayerInlineFormSet = inlineformset_factory(Team, Player, fields="__all__")
formset = PlayerInlineFormSet()
self.assertQuerySetEqual(formset.get_queryset(), [])
formset = PlayerInlineFormSet(instance=team)
players = formset.get_queryset()
self.assertEqual(len(players), 1)
(player1,) = players
self.assertEqual(player1.team, team)
self.assertEqual(player1.name, "Bobby")
def test_inlineformset_with_arrayfield(self):
class SimpleArrayField(forms.CharField):
"""A proxy for django.contrib.postgres.forms.SimpleArrayField."""
def to_python(self, value):
value = super().to_python(value)
return value.split(",") if value else []
class BookForm(forms.ModelForm):
title = SimpleArrayField()
class Meta:
model = Book
fields = ["title"]
BookFormSet = inlineformset_factory(Author, Book, form=BookForm)
self.assertEqual(BookForm.Meta.fields, ["title"])
data = {
"book_set-TOTAL_FORMS": "3",
"book_set-INITIAL_FORMS": "0",
"book_set-MAX_NUM_FORMS": "",
"book_set-0-title": "test1,test2",
"book_set-1-title": "test1,test2",
"book_set-2-title": "test3,test4",
}
author = Author.objects.create(name="test")
formset = BookFormSet(data, instance=author)
self.assertEqual(BookForm.Meta.fields, ["title"])
self.assertEqual(
formset.errors,
[{}, {"__all__": ["Please correct the duplicate values below."]}, {}],
)
def test_inlineformset_with_jsonfield(self):
class BookForm(forms.ModelForm):
title = forms.JSONField()
class Meta:
model = Book
fields = ("title",)
BookFormSet = inlineformset_factory(Author, Book, form=BookForm)
data = {
"book_set-TOTAL_FORMS": "3",
"book_set-INITIAL_FORMS": "0",
"book_set-MAX_NUM_FORMS": "",
"book_set-0-title": {"test1": "test2"},
"book_set-1-title": {"test1": "test2"},
"book_set-2-title": {"test3": "test4"},
}
author = Author.objects.create(name="test")
formset = BookFormSet(data, instance=author)
self.assertEqual(
formset.errors,
[{}, {"__all__": ["Please correct the duplicate values below."]}, {}],
)
def test_model_formset_with_custom_pk(self):
# a formset for a Model that has a custom primary key that still needs
# to be added to the formset automatically
FormSet = modelformset_factory(
ClassyMexicanRestaurant, fields=["tacos_are_yummy"]
)
self.assertEqual(
sorted(FormSet().forms[0].fields), ["tacos_are_yummy", "the_restaurant"]
)
def test_model_formset_with_initial_model_instance(self):
# has_changed should compare model instance and primary key
# see #18898
FormSet = modelformset_factory(Poem, fields="__all__")
john_milton = Poet(name="John Milton")
john_milton.save()
data = {
"form-TOTAL_FORMS": 1,
"form-INITIAL_FORMS": 0,
"form-MAX_NUM_FORMS": "",
"form-0-name": "",
"form-0-poet": str(john_milton.id),
}
formset = FormSet(initial=[{"poet": john_milton}], data=data)
self.assertFalse(formset.extra_forms[0].has_changed())
def test_model_formset_with_initial_queryset(self):
# has_changed should work with queryset and list of pk's
# see #18898
FormSet = modelformset_factory(AuthorMeeting, fields="__all__")
Author.objects.create(pk=1, name="Charles Baudelaire")
data = {
"form-TOTAL_FORMS": 1,
"form-INITIAL_FORMS": 0,
"form-MAX_NUM_FORMS": "",
"form-0-name": "",
"form-0-created": "",
"form-0-authors": list(Author.objects.values_list("id", flat=True)),
}
formset = FormSet(initial=[{"authors": Author.objects.all()}], data=data)
self.assertFalse(formset.extra_forms[0].has_changed())
def test_prevent_duplicates_from_with_the_same_formset(self):
FormSet = modelformset_factory(Product, fields="__all__", extra=2)
data = {
"form-TOTAL_FORMS": 2,
"form-INITIAL_FORMS": 0,
"form-MAX_NUM_FORMS": "",
"form-0-slug": "red_car",
"form-1-slug": "red_car",
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset._non_form_errors, ["Please correct the duplicate data for slug."]
)
FormSet = modelformset_factory(Price, fields="__all__", extra=2)
data = {
"form-TOTAL_FORMS": 2,
"form-INITIAL_FORMS": 0,
"form-MAX_NUM_FORMS": "",
"form-0-price": "25",
"form-0-quantity": "7",
"form-1-price": "25",
"form-1-quantity": "7",
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset._non_form_errors,
[
"Please correct the duplicate data for price and quantity, which must "
"be unique."
],
)
# Only the price field is specified, this should skip any unique
# checks since the unique_together is not fulfilled. This will fail
# with a KeyError if broken.
FormSet = modelformset_factory(Price, fields=("price",), extra=2)
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "",
"form-0-price": "24",
"form-1-price": "24",
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
FormSet = inlineformset_factory(Author, Book, extra=0, fields="__all__")
author = Author.objects.create(pk=1, name="Charles Baudelaire")
Book.objects.create(pk=1, author=author, title="Les Paradis Artificiels")
Book.objects.create(pk=2, author=author, title="Les Fleurs du Mal")
Book.objects.create(pk=3, author=author, title="Flowers of Evil")
book_ids = author.book_set.order_by("id").values_list("id", flat=True)
data = {
"book_set-TOTAL_FORMS": "2",
"book_set-INITIAL_FORMS": "2",
"book_set-MAX_NUM_FORMS": "",
"book_set-0-title": "The 2008 Election",
"book_set-0-author": str(author.id),
"book_set-0-id": str(book_ids[0]),
"book_set-1-title": "The 2008 Election",
"book_set-1-author": str(author.id),
"book_set-1-id": str(book_ids[1]),
}
formset = FormSet(data=data, instance=author)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset._non_form_errors, ["Please correct the duplicate data for title."]
)
self.assertEqual(
formset.errors,
[{}, {"__all__": ["Please correct the duplicate values below."]}],
)
FormSet = modelformset_factory(Post, fields="__all__", extra=2)
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "",
"form-0-title": "blah",
"form-0-slug": "Morning",
"form-0-subtitle": "foo",
"form-0-posted": "2009-01-01",
"form-1-title": "blah",
"form-1-slug": "Morning in Prague",
"form-1-subtitle": "rawr",
"form-1-posted": "2009-01-01",
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset._non_form_errors,
[
"Please correct the duplicate data for title which must be unique for "
"the date in posted."
],
)
self.assertEqual(
formset.errors,
[{}, {"__all__": ["Please correct the duplicate values below."]}],
)
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "",
"form-0-title": "foo",
"form-0-slug": "Morning in Prague",
"form-0-subtitle": "foo",
"form-0-posted": "2009-01-01",
"form-1-title": "blah",
"form-1-slug": "Morning in Prague",
"form-1-subtitle": "rawr",
"form-1-posted": "2009-08-02",
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset._non_form_errors,
[
"Please correct the duplicate data for slug which must be unique for "
"the year in posted."
],
)
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "",
"form-0-title": "foo",
"form-0-slug": "Morning in Prague",
"form-0-subtitle": "rawr",
"form-0-posted": "2008-08-01",
"form-1-title": "blah",
"form-1-slug": "Prague",
"form-1-subtitle": "rawr",
"form-1-posted": "2009-08-02",
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset._non_form_errors,
[
"Please correct the duplicate data for subtitle which must be unique "
"for the month in posted."
],
)
def test_prevent_change_outer_model_and_create_invalid_data(self):
author = Author.objects.create(name="Charles")
other_author = Author.objects.create(name="Walt")
AuthorFormSet = modelformset_factory(Author, fields="__all__")
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MAX_NUM_FORMS": "",
"form-0-id": str(author.id),
"form-0-name": "Charles",
"form-1-id": str(other_author.id), # A model not in the formset's queryset.
"form-1-name": "Changed name",
}
# This formset is only for Walt Whitman and shouldn't accept data for
# other_author.
formset = AuthorFormSet(
data=data, queryset=Author.objects.filter(id__in=(author.id,))
)
self.assertTrue(formset.is_valid())
formset.save()
# The name of other_author shouldn't be changed and new models aren't
# created.
self.assertSequenceEqual(Author.objects.all(), [author, other_author])
def test_validation_without_id(self):
AuthorFormSet = modelformset_factory(Author, fields="__all__")
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "",
"form-0-name": "Charles",
}
formset = AuthorFormSet(data)
self.assertEqual(
formset.errors,
[{"id": ["This field is required."]}],
)
def test_validation_with_child_model_without_id(self):
BetterAuthorFormSet = modelformset_factory(BetterAuthor, fields="__all__")
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "",
"form-0-name": "Charles",
"form-0-write_speed": "10",
}
formset = BetterAuthorFormSet(data)
self.assertEqual(
formset.errors,
[{"author_ptr": ["This field is required."]}],
)
def test_validation_with_invalid_id(self):
AuthorFormSet = modelformset_factory(Author, fields="__all__")
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "",
"form-0-id": "abc",
"form-0-name": "Charles",
}
formset = AuthorFormSet(data)
self.assertEqual(
formset.errors,
[
{
"id": [
"Select a valid choice. That choice is not one of the "
"available choices."
]
}
],
)
def test_validation_with_nonexistent_id(self):
AuthorFormSet = modelformset_factory(Author, fields="__all__")
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "",
"form-0-id": "12345",
"form-0-name": "Charles",
}
formset = AuthorFormSet(data)
self.assertEqual(
formset.errors,
[
{
"id": [
"Select a valid choice. That choice is not one of the "
"available choices."
]
}
],
)
def test_initial_form_count_empty_data(self):
AuthorFormSet = modelformset_factory(Author, fields="__all__")
formset = AuthorFormSet({})
self.assertEqual(formset.initial_form_count(), 0)
def test_edit_only(self):
charles = Author.objects.create(name="Charles Baudelaire")
AuthorFormSet = modelformset_factory(Author, fields="__all__", edit_only=True)
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "0",
"form-0-name": "Arthur Rimbaud",
"form-1-name": "Walt Whitman",
}
formset = AuthorFormSet(data)
self.assertIs(formset.is_valid(), True)
formset.save()
self.assertSequenceEqual(Author.objects.all(), [charles])
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": charles.pk,
"form-0-name": "Arthur Rimbaud",
"form-1-name": "Walt Whitman",
}
formset = AuthorFormSet(data)
self.assertIs(formset.is_valid(), True)
formset.save()
charles.refresh_from_db()
self.assertEqual(charles.name, "Arthur Rimbaud")
self.assertSequenceEqual(Author.objects.all(), [charles])
def test_edit_only_inlineformset_factory(self):
charles = Author.objects.create(name="Charles Baudelaire")
book = Book.objects.create(author=charles, title="Les Paradis Artificiels")
AuthorFormSet = inlineformset_factory(
Author,
Book,
can_delete=False,
fields="__all__",
edit_only=True,
)
data = {
"book_set-TOTAL_FORMS": "4",
"book_set-INITIAL_FORMS": "1",
"book_set-MAX_NUM_FORMS": "0",
"book_set-0-id": book.pk,
"book_set-0-title": "Les Fleurs du Mal",
"book_set-0-author": charles.pk,
"book_set-1-title": "Flowers of Evil",
"book_set-1-author": charles.pk,
}
formset = AuthorFormSet(data, instance=charles)
self.assertIs(formset.is_valid(), True)
formset.save()
book.refresh_from_db()
self.assertEqual(book.title, "Les Fleurs du Mal")
self.assertSequenceEqual(Book.objects.all(), [book])
def test_edit_only_object_outside_of_queryset(self):
charles = Author.objects.create(name="Charles Baudelaire")
walt = Author.objects.create(name="Walt Whitman")
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-0-id": walt.pk,
"form-0-name": "Parth Patil",
}
AuthorFormSet = modelformset_factory(Author, fields="__all__", edit_only=True)
formset = AuthorFormSet(data, queryset=Author.objects.filter(pk=charles.pk))
self.assertIs(formset.is_valid(), True)
formset.save()
self.assertCountEqual(Author.objects.all(), [charles, walt])
def test_edit_only_formset_factory_with_basemodelformset(self):
charles = Author.objects.create(name="Charles Baudelaire")
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = "__all__"
class BaseAuthorFormSet(BaseModelFormSet):
def __init__(self, *args, **kwargs):
self.model = Author
super().__init__(*args, **kwargs)
AuthorFormSet = formset_factory(AuthorForm, formset=BaseAuthorFormSet)
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": charles.pk,
"form-0-name": "Shawn Dong",
"form-1-name": "Walt Whitman",
}
formset = AuthorFormSet(data)
self.assertIs(formset.is_valid(), True)
formset.save()
self.assertEqual(Author.objects.count(), 2)
charles.refresh_from_db()
self.assertEqual(charles.name, "Shawn Dong")
self.assertEqual(Author.objects.count(), 2)
| ModelFormsetTest |
python | django__django | tests/staticfiles_tests/test_finders.py | {
"start": 1438,
"end": 1926
} | class ____(TestFinders, StaticFilesTestCase):
"""
Test AppDirectoriesFinder.
"""
def setUp(self):
super().setUp()
self.finder = finders.AppDirectoriesFinder()
test_file_path = os.path.join(
TEST_ROOT, "apps", "test", "static", "test", "file1.txt"
)
self.find_first = (os.path.join("test", "file1.txt"), test_file_path)
self.find_all = (os.path.join("test", "file1.txt"), [test_file_path])
| TestAppDirectoriesFinder |
python | Textualize__rich | rich/markdown.py | {
"start": 7798,
"end": 8187
} | class ____(MarkdownElement):
"""MarkdownElement corresponding to `tbody_open` and `tbody_close`."""
def __init__(self) -> None:
self.rows: list[TableRowElement] = []
def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
assert isinstance(child, TableRowElement)
self.rows.append(child)
return False
| TableBodyElement |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/elements.py | {
"start": 182472,
"end": 182836
} | class ____(
util.memoized_property["TypeEngine[_T]"]
):
"""memoized property, but dont memoize NullType"""
def __get__(self, obj, cls):
if obj is None:
return self
result = self.fget(obj)
if not result._isnull:
obj.__dict__[self.__name__] = result
return result
| _memoized_property_but_not_nulltype |
python | huggingface__transformers | src/transformers/models/distilbert/modeling_distilbert.py | {
"start": 21693,
"end": 26202
} | class ____(DistilBertPreTrainedModel):
def __init__(self, config: PreTrainedConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.distilbert = DistilBertModel(config)
self.pre_classifier = nn.Linear(config.dim, config.dim)
self.classifier = nn.Linear(config.dim, config.num_labels)
self.dropout = nn.Dropout(config.seq_classif_dropout)
# Initialize weights and apply final processing
self.post_init()
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings
"""
return self.distilbert.get_position_embeddings()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embedding matrix. If position embeddings are learned, increasing the size
will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
size will add correct vectors at the end following the position encoding algorithm, whereas reducing
the size will remove vectors from the end.
"""
self.distilbert.resize_position_embeddings(new_num_position_embeddings)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[SequenceClassifierOutput, tuple[torch.Tensor, ...]]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
distilbert_output = self.distilbert(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
position_ids=position_ids,
return_dict=True,
**kwargs,
)
hidden_state = distilbert_output[0] # (bs, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs, dim)
pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
pooled_output = nn.ReLU()(pooled_output) # (bs, dim)
pooled_output = self.dropout(pooled_output) # (bs, dim)
logits = self.classifier(pooled_output) # (bs, num_labels)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=distilbert_output.hidden_states,
attentions=distilbert_output.attentions,
)
@auto_docstring
| DistilBertForSequenceClassification |
python | joke2k__faker | tests/providers/test_ssn.py | {
"start": 7852,
"end": 8794
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("de_DE")
self.rvnr_pattern: Pattern = re.compile(r"\d{8}[A-Z]\d{3}")
self.kvnr_pattern: Pattern = re.compile(r"[A-Z]\d{19}")
Faker.seed(0)
def test_vat_id(self):
for _ in range(100):
assert re.search(r"^DE\d{9}$", self.fake.vat_id())
def test_rvnr(self):
for _ in range(100):
rvnr = self.fake.rvnr()
assert self.rvnr_pattern.fullmatch(rvnr)
def test_rvnr_birthdate(self):
for _ in range(100):
birthdate: datetime.date = self.fake.date_object()
rvnr = self.fake.rvnr(birthdate)
assert self.rvnr_pattern.fullmatch(rvnr)
assert rvnr[2:8] == birthdate.strftime("%d%m%y")
def test_kvnr(self):
for _ in range(100):
kvnr = self.fake.kvnr()
assert self.kvnr_pattern.fullmatch(kvnr)
| TestDeDe |
python | sqlalchemy__sqlalchemy | test/orm/test_dynamic.py | {
"start": 31497,
"end": 41744
} | class ____:
run_inserts = None
def _list_collection(self, collection):
if self.lazy == "dynamic":
return list(collection)
sess = inspect(collection.instance).session
return sess.scalars(collection.select()).all()
def test_persistence(self, user_address_fixture):
addresses = self.tables.addresses
User, Address = user_address_fixture()
sess = fixture_session()
u1 = User(name="jack")
a1 = Address(email_address="foo")
sess.add_all([u1, a1])
sess.flush()
eq_(
sess.connection().scalar(
select(func.count(cast(1, Integer))).where(
addresses.c.user_id != None
)
), # noqa
0,
)
u1 = sess.get(User, u1.id)
u1.addresses.add(a1)
sess.flush()
eq_(
sess.connection()
.execute(
select(addresses).where(addresses.c.user_id != None) # noqa
)
.fetchall(),
[(a1.id, u1.id, "foo")],
)
u1.addresses.remove(a1)
sess.flush()
eq_(
sess.connection().scalar(
select(func.count(cast(1, Integer))).where(
addresses.c.user_id != None
)
), # noqa
0,
)
u1.addresses.add(a1)
sess.flush()
eq_(
sess.connection()
.execute(
select(addresses).where(addresses.c.user_id != None) # noqa
)
.fetchall(),
[(a1.id, u1.id, "foo")],
)
a2 = Address(email_address="bar")
u1.addresses.remove(a1)
u1.addresses.add(a2)
sess.flush()
eq_(
sess.connection()
.execute(
select(addresses).where(addresses.c.user_id != None) # noqa
)
.fetchall(),
[(a2.id, u1.id, "bar")],
)
def test_hasattr(self, user_address_fixture):
User, Address = user_address_fixture()
u1 = User(name="jack")
assert "addresses" not in u1.__dict__
u1.addresses = [Address(email_address="test")]
assert "addresses" in u1.__dict__
def test_collection_set(self, user_address_fixture):
addresses = self.tables.addresses
User, Address = user_address_fixture(
addresses_args={"order_by": addresses.c.email_address}
)
sess = fixture_session(
autoflush=True,
)
u1 = User(name="jack")
a1 = Address(email_address="a1")
a2 = Address(email_address="a2")
a3 = Address(email_address="a3")
a4 = Address(email_address="a4")
sess.add(u1)
u1.addresses = [a1, a3]
eq_(self._list_collection(u1.addresses), [a1, a3])
if User.addresses.property.lazy == "write_only":
with self._expect_no_iteration():
u1.addresses = [a1, a2, a4]
return
u1.addresses = [a1, a2, a4]
eq_(list(u1.addresses), [a1, a2, a4])
u1.addresses = [a2, a3]
eq_(list(u1.addresses), [a2, a3])
u1.addresses = []
eq_(list(u1.addresses), [])
def test_noload_add(self, user_address_fixture):
# test that a load of User.addresses is not emitted
# when flushing an add
User, Address = user_address_fixture()
sess = fixture_session()
u1 = User(name="jack", addresses=[Address(email_address="a1")])
sess.add(u1)
sess.commit()
u1_id = u1.id
sess.expire_all()
u1.addresses.add(Address(email_address="a2"))
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"SELECT users.id, users.name "
"FROM users WHERE users.id = :pk_1",
lambda ctx: [{"pk_1": u1_id}],
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: [{"email_address": "a2", "user_id": u1_id}],
),
)
def test_noload_remove(self, user_address_fixture):
# test that a load of User.addresses is not emitted
# when flushing a remove
User, Address = user_address_fixture()
sess = fixture_session()
u1 = User(name="jack", addresses=[Address(email_address="a1")])
a2 = Address(email_address="a2")
u1.addresses.add(a2)
sess.add(u1)
sess.commit()
u1_id = u1.id
a2_id = a2.id
sess.expire_all()
u1.addresses.remove(a2)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"SELECT addresses.id, addresses.email_address "
"FROM addresses "
"WHERE addresses.id = :pk_1",
lambda ctx: [{"pk_1": a2_id}],
),
CompiledSQL(
"UPDATE addresses SET user_id=:user_id WHERE addresses.id = "
":addresses_id",
lambda ctx: [{"addresses_id": a2_id, "user_id": None}],
),
CompiledSQL(
"SELECT users.id, users.name "
"FROM users WHERE users.id = :pk_1",
lambda ctx: [{"pk_1": u1_id}],
),
)
def test_rollback(self, user_address_fixture):
User, Address = user_address_fixture()
sess = fixture_session(expire_on_commit=False, autoflush=True)
u1 = User(name="jack")
u1.addresses.add(Address(email_address="lala@hoho.com"))
sess.add(u1)
sess.flush()
sess.commit()
u1.addresses.add(Address(email_address="foo@bar.com"))
if self.lazy == "dynamic":
stmt = u1.addresses.statement
else:
stmt = u1.addresses.select()
eq_(
sess.scalars(stmt.order_by(Address.id)).all(),
[
Address(email_address="lala@hoho.com"),
Address(email_address="foo@bar.com"),
],
)
sess.rollback()
eq_(
sess.scalars(stmt).all(),
[Address(email_address="lala@hoho.com")],
)
def test_self_referential(self):
Node, nodes = self.classes.Node, self.tables.nodes
self.mapper_registry.map_imperatively(
Node,
nodes,
properties={
"children": relationship(
Node, lazy="dynamic", order_by=nodes.c.id
)
},
)
sess = fixture_session()
n2, n3 = Node(), Node()
n1 = Node(children=[n2, n3])
sess.add(n1)
sess.commit()
eq_(n1.children.all(), [n2, n3])
def test_remove_orphans(self, user_address_fixture):
addresses = self.tables.addresses
User, Address = user_address_fixture(
addresses_args={
"order_by": addresses.c.id,
"backref": "user",
"cascade": "all, delete-orphan",
}
)
sess = fixture_session(
autoflush=True,
)
u = User(name="ed")
u.addresses.add_all(
[Address(email_address=letter) for letter in "abcdef"]
)
sess.add(u)
if self.lazy == "dynamic":
stmt = u.addresses.statement
else:
stmt = u.addresses.select()
for a in sess.scalars(
stmt.filter(Address.email_address.in_(["c", "e", "f"]))
):
u.addresses.remove(a)
eq_(
{ad for ad, in sess.query(Address.email_address)},
{"a", "b", "d"},
)
@testing.combinations(True, False, argnames="autoflush")
@testing.combinations(True, False, argnames="saveuser")
def test_backref(self, autoflush, saveuser, user_address_fixture):
User, Address = user_address_fixture(
addresses_args={"backref": "user"}
)
sess = fixture_session(
autoflush=autoflush,
)
u = User(name="buffy")
a = Address(email_address="foo@bar.com")
a.user = u
if saveuser:
sess.add(u)
else:
sess.add(a)
if not autoflush:
sess.flush()
assert u in sess
assert a in sess
eq_(self._list_collection(u.addresses), [a])
a.user = None
if not autoflush:
eq_(self._list_collection(u.addresses), [a])
if not autoflush:
sess.flush()
eq_(self._list_collection(u.addresses), [])
def test_backref_events(self, user_address_fixture):
User, Address = user_address_fixture(
addresses_args={"backref": "user"}
)
u1 = User()
a1 = Address()
u1.addresses.add(a1)
is_(a1.user, u1)
def test_no_deref(self, user_address_fixture):
User, Address = user_address_fixture(
addresses_args={"backref": "user"}
)
with fixture_session() as session:
user = User()
user.name = "joe"
user.fullname = "Joe User"
user.password = "Joe's secret"
address = Address()
address.email_address = "joe@joesdomain.example"
address.user = user
session.add(user)
session.commit()
def query1():
session = fixture_session()
user = session.query(User).first()
return self._list_collection(user.addresses)
def query2():
session = fixture_session()
return self._list_collection(session.query(User).first().addresses)
def query3():
session = fixture_session()
return self._list_collection(session.query(User).first().addresses)
eq_(query1(), [Address(email_address="joe@joesdomain.example")])
eq_(query2(), [Address(email_address="joe@joesdomain.example")])
eq_(query3(), [Address(email_address="joe@joesdomain.example")])
| _UOWTests |
python | pytorch__pytorch | test/inductor/test_ordered_set.py | {
"start": 35340,
"end": 38065
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.OrderedSet = OrderedSet((2, 4, 6))
def test_eq(self): # SF bug 643115
self.assertEqual(self.OrderedSet, OrderedSet({2: 1, 4: 3, 6: 5}))
def test_union_subset(self):
result = self.OrderedSet | OrderedSet([2])
self.assertEqual(result, OrderedSet((2, 4, 6)))
def test_union_superset(self):
result = self.OrderedSet | OrderedSet([2, 4, 6, 8])
self.assertEqual(result, OrderedSet([2, 4, 6, 8]))
def test_union_overlap(self):
result = self.OrderedSet | OrderedSet([3, 4, 5])
self.assertEqual(result, OrderedSet([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
result = self.OrderedSet | OrderedSet([8])
self.assertEqual(result, OrderedSet([2, 4, 6, 8]))
def test_intersection_subset(self):
result = self.OrderedSet & OrderedSet((2, 4))
self.assertEqual(result, OrderedSet((2, 4)))
def test_intersection_superset(self):
result = self.OrderedSet & OrderedSet([2, 4, 6, 8])
self.assertEqual(result, OrderedSet([2, 4, 6]))
def test_intersection_overlap(self):
result = self.OrderedSet & OrderedSet([3, 4, 5])
self.assertEqual(result, OrderedSet([4]))
def test_intersection_non_overlap(self):
result = self.OrderedSet & OrderedSet([8])
self.assertEqual(result, empty_set)
def test_isdisjoint_subset(self):
result = self.OrderedSet.isdisjoint(OrderedSet((2, 4)))
self.assertEqual(result, False)
def test_isdisjoint_superset(self):
result = self.OrderedSet.isdisjoint(OrderedSet([2, 4, 6, 8]))
self.assertEqual(result, False)
def test_isdisjoint_overlap(self):
result = self.OrderedSet.isdisjoint(OrderedSet([3, 4, 5]))
self.assertEqual(result, False)
def test_isdisjoint_non_overlap(self):
result = self.OrderedSet.isdisjoint(OrderedSet([8]))
self.assertEqual(result, True)
def test_sym_difference_subset(self):
result = self.OrderedSet ^ OrderedSet((2, 4))
self.assertEqual(result, OrderedSet([6]))
def test_sym_difference_superset(self):
result = self.OrderedSet ^ OrderedSet((2, 4, 6, 8))
self.assertEqual(result, OrderedSet([8]))
def test_sym_difference_overlap(self):
result = self.OrderedSet ^ OrderedSet((3, 4, 5))
self.assertEqual(result, OrderedSet([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
result = self.OrderedSet ^ OrderedSet([8])
self.assertEqual(result, OrderedSet([2, 4, 6, 8]))
# ==============================================================================
| TestBinaryOps |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/stats.py | {
"start": 3921,
"end": 4727
} | class ____(StatsWriter):
"""
Write all stats that we receive to the timer gauges, so we can track them offline easily
"""
@staticmethod
def sanitize_string(s: str) -> str:
"""
Clean up special characters in the category and value names.
"""
return s.replace("/", ".").replace(" ", "")
def write_stats(
self, category: str, values: Dict[str, StatsSummary], step: int
) -> None:
for val, stats_summary in values.items():
set_gauge(
GaugeWriter.sanitize_string(f"{category}.{val}.mean"),
float(stats_summary.mean),
)
set_gauge(
GaugeWriter.sanitize_string(f"{category}.{val}.sum"),
float(stats_summary.sum),
)
| GaugeWriter |
python | huggingface__transformers | src/transformers/models/deepseek_vl/modular_deepseek_vl.py | {
"start": 6792,
"end": 6976
} | class ____(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {"padding": False},
"common_kwargs": {"return_tensors": "pt"},
}
| DeepseekVLProcessorKwargs |
python | walkccc__LeetCode | solutions/1129. Shortest Path with Alternating Colors/1129.py | {
"start": 24,
"end": 77
} | class ____(Enum):
INIT = 0
RED = 1
BLUE = 2
| Color |
python | doocs__leetcode | solution/1600-1699/1647.Minimum Deletions to Make Character Frequencies Unique/Solution.py | {
"start": 0,
"end": 361
} | class ____:
def minDeletions(self, s: str) -> int:
cnt = Counter(s)
ans, pre = 0, inf
for v in sorted(cnt.values(), reverse=True):
if pre == 0:
ans += v
elif v >= pre:
ans += v - pre + 1
pre -= 1
else:
pre = v
return ans
| Solution |
python | ZoranPandovski__al-go-rithms | data_structures/Queue/python/danielcustduran_queue.py | {
"start": 184,
"end": 874
} | class ____:
def __init__(self, head=None):
self.storage = [head]
def enqueue(self, new_element):
self.storage.append(new_element)
def peek(self):
if self.storage:
return self.storage[0]
def dequeue(self):
item_to_dequeue = self.storage[:1][0]
self.storage = self.storage[1:]
return item_to_dequeue
# Setup
q = Queue(1)
q.enqueue(2)
q.enqueue(3)
# Test peek
# Should be 1
print(q.peek())
# Test dequeue
# Should be 1
print(q.dequeue())
# Test enqueue
q.enqueue(4)
# Should be 2
print(q.dequeue())
# Should be 3
print(q.dequeue())
# Should be 4
print(q.dequeue())
q.enqueue(5)
# Should be 5
print(q.peek())
| Queue |
python | numba__numba | numba/tests/test_sys_monitoring.py | {
"start": 1188,
"end": 34017
} | class ____(TestCase):
# Tests the interaction of the Numba dispatcher with `sys.monitoring`.
#
# Note that it looks like a lot of these try..finally type patterns could
# be written using a context manager, this is true, but it is not written
# like that deliberately as a context manager adds implementation details
# onto the stack which makes it harder to debug tests.
def setUp(self):
# First... check if there's other monitoring stuff registered (e.g. test
# is running under cProfile or coverage), skip if so.
monitor_kinds = []
for i in range(6): # there are 5 tool IDs
if sys.monitoring.get_tool(i) is not None:
monitor_kinds.append(TOOL2MONITORTYPE[i])
if monitor_kinds:
msg = ("Cannot run monitoring tests when other monitors are "
"active, found monitor(s) of type: "
f"{', '.join(monitor_kinds)}")
self.skipTest(msg)
# set up some standard functions and answers for use throughout
self.foo, self.call_foo = generate_usecase()
self.arg = 10
self.foo_result = self.arg + 5 + 1
self.call_foo_result = 2 * self.foo_result
# pretend to be a profiler in the majority of these unit tests
self.tool_id = sys.monitoring.PROFILER_ID
def gather_mock_calls_multithreads(self, mockcalls):
# Gather mock-calls for the `self.foo` and `self.call_foo`
matched = Counter()
target_codeobjs = {self.call_foo.__code__, self.foo.__code__}
for cb_args in mockcalls._mock_call_args_list:
(codeobj, *args) = cb_args.args
if codeobj in target_codeobjs:
matched[(codeobj, *args)] += 1
return matched
def check_py_start_calls_multithreads(self, allcalls):
# Checks that PY_START calls were correctly captured for a
# `self.call_foo(self.arg)` call in multithreads
matched = self.gather_mock_calls_multithreads(allcalls[PY_START])
self.assertEqual(len(matched), 2) # two types of call
# Find the resume op, this is where the code for `call_foo` "starts"
inst = [x for x in dis.get_instructions(self.call_foo)
if x.opname == "RESUME"]
offset = inst[0].offset
self.assertEqual(matched[self.call_foo.__code__, offset], 2)
self.assertEqual(matched[self.foo.__code__, 0], 2)
self.assertEqual(matched.total(), 4)
def check_py_start_calls(self, allcalls):
# Checks that PY_START calls were correctly captured for a
# `self.call_foo(self.arg)` call.
mockcalls = allcalls[PY_START]
self.assertEqual(mockcalls.call_count, 2)
# Find the resume op, this is where the code for `call_foo` "starts"
inst = [x for x in dis.get_instructions(self.call_foo)
if x.opname == "RESUME"]
offset = inst[0].offset
# Numba always reports the start location as offset 0.
calls = (call(self.call_foo.__code__, offset),
call(self.foo.__code__, 0))
mockcalls.assert_has_calls(calls)
def check_py_return_calls_multithreads(self, allcalls):
# Checks that PY_RETURN calls were correctly captured for a
# `self.call_foo(self.arg)` call.
matched = self.gather_mock_calls_multithreads(allcalls[PY_RETURN])
offset = [x for x in dis.get_instructions(self.call_foo)][-1].offset
self.assertEqual(matched[self.foo.__code__, 0, self.foo_result], 2)
self.assertEqual(
matched[self.call_foo.__code__, offset, self.call_foo_result], 2
)
self.assertEqual(matched.total(), 4)
def check_py_return_calls(self, allcalls):
# Checks that PY_RETURN calls were correctly captured for a
# `self.call_foo(self.arg)` call.
mockcalls = allcalls[PY_RETURN]
self.assertEqual(mockcalls.call_count, 2)
# These are in the order the returns were encountered. Return from `foo`
# occurred first, followed by return from `call_foo`.
# NOTE: it is a known issue that Numba reports the PY_RETURN event as
# occurring at offset 0. At present there's no information about the
# location that the return occurred propagating from the machine code
# back to the dispatcher (where the monitoring events are handled).
offset = [x for x in dis.get_instructions(self.call_foo)][-1].offset
calls = [call(self.foo.__code__, 0, self.foo_result),
call(self.call_foo.__code__, offset, self.call_foo_result)]
mockcalls.assert_has_calls(calls)
def run_with_events(self, function, args, events, tool_id=None,
barrier=None):
# Runs function with args with monitoring set for events on `tool_id`
# (if present, else just uses the default of "PROFILER_ID") returns a
# dictionary event->callback.
try:
if tool_id is None:
_tool_id = self.tool_id
else:
_tool_id = tool_id
sys.monitoring.use_tool_id(_tool_id, "custom_monitor")
callbacks = {}
event_bitmask = 0
for event in events:
callback = Mock()
sys.monitoring.register_callback(_tool_id, event, callback)
callbacks[event] = callback
event_bitmask |= event
# only start monitoring once callbacks are registered
sys.monitoring.set_events(_tool_id, event_bitmask)
if barrier is not None:
# Wait for all threads to have enabled events.
barrier()
function(*args)
finally:
# clean up state
if barrier is not None:
# Wait for all threads to finish `function()`
# This makes sure all threads have a chance to see the events
# from other threads.
barrier()
sys.monitoring.set_events(_tool_id, NO_EVENTS)
for event in events:
sys.monitoring.register_callback(_tool_id, event, None)
sys.monitoring.free_tool_id(_tool_id)
return callbacks
def test_start_event(self):
# test event PY_START
cb = self.run_with_events(self.call_foo, (self.arg,), (PY_START,))
# Check...
self.assertEqual(len(cb), 1)
self.check_py_start_calls(cb)
def test_return_event(self):
# test event PY_RETURN
cb = self.run_with_events(self.call_foo, (self.arg,), (PY_RETURN,))
# Check...
self.assertEqual(len(cb), 1)
self.check_py_return_calls(cb)
def test_call_event_chain(self):
# test event PY_START and PY_RETURN monitored at the same time
cb = self.run_with_events(self.call_foo, (self.arg,),
(PY_START, PY_RETURN))
# Check...
self.assertEqual(len(cb), 2)
self.check_py_return_calls(cb)
self.check_py_start_calls(cb)
# --------------------------------------------------------------------------
# NOTE: About the next two tests...
# Numba doesn't support "local event" level monitoring, it's implemented
# in CPython via adjusting the code object bytecode to use
# "instrumented" opcodes. When the interpreter encounters an
# instrumented opcode it triggers the event handling pathways. As Numba
# doesn't interpret the bytecode instruction-at-a-time there's not
# really any way to support this. Two things to check...
# 1. The an instrumented code object doesn't trigger events in
# the dispatcher.
# 2. That Numba can compile instrumented functions (it should be able
# to without any problem as the instrumented bytecode should not
# leak into `.co_code`.).
def test_instrumented_code_does_not_trigger_numba_events(self):
# 1. from above.
@jit('int64(int64)',)
def foo(x):
return x + 3
try:
tool_id = self.tool_id
sys.monitoring.use_tool_id(tool_id, "custom_monitor")
callbacks = {}
event_bitmask = 0
events = (PY_START, PY_RETURN)
for event in events:
callback = Mock()
sys.monitoring.register_callback(tool_id, event, callback)
callbacks[event] = callback
event_bitmask |= event
sys.monitoring.set_local_events(tool_id, foo.__code__,
event_bitmask)
result = foo(self.arg)
finally:
for event in events:
sys.monitoring.register_callback(tool_id, event, None)
sys.monitoring.set_local_events(tool_id, foo.__code__, 0)
sys.monitoring.free_tool_id(tool_id)
# check
self.assertEqual(result, foo.py_func(self.arg))
self.assertEqual(len(callbacks), 2)
callbacks[PY_START].assert_not_called()
callbacks[PY_RETURN].assert_not_called()
def test_instrumented_code_can_be_compiled(self):
# 2. from above.
def foo(x):
return x + 1
try:
tool_id = self.tool_id
sys.monitoring.use_tool_id(tool_id, "custom_monitor")
sys.monitoring.set_local_events(tool_id, foo.__code__, PY_START)
sys.monitoring.register_callback(tool_id, PY_START, Mock())
# test compile
result = jit(foo)(self.arg)
self.assertEqual(result, foo(self.arg))
finally:
sys.monitoring.register_callback(tool_id, PY_START, None)
sys.monitoring.set_local_events(tool_id, foo.__code__, 0)
sys.monitoring.free_tool_id(tool_id)
def test_unhandled_events_are_ignored(self):
# Check an unhandled event e.g. PY_YIELD isn't reported.
def generate(dec):
@dec('void()')
def producer():
yield 10
@dec('int64()')
def consumer():
p = producer()
return next(p)
return consumer
event = sys.monitoring.events.PY_YIELD
# check that pure python reports
wrapper = lambda sig: lambda fn: fn
py_consumer = generate(wrapper)
py_cb = self.run_with_events(py_consumer, (), (event,))
py_cb[event].assert_called_once()
# check the numba does not report
nb_consumer = generate(jit)
nb_cb = self.run_with_events(nb_consumer, (), (event,))
nb_cb[event].assert_not_called()
def test_event_with_no_callback_runs(self):
# This checks the situation where an event is being monitored but
# there's no callback associated with the event. In the dispatcher C
# code the loop over tools will be entered, but nothing will get called
# as the "instrument" is missing (NULL).
try:
event = PY_START
tool_id = self.tool_id
sys.monitoring.use_tool_id(tool_id, "custom_monitor")
sys.monitoring.set_events(tool_id, event)
# NO CALLBACK IS REGISTERED!
active_events = sys.monitoring.get_events(tool_id)
self.assertEqual(active_events, event)
result = self.call_foo(self.arg)
active_events = sys.monitoring.get_events(tool_id)
self.assertEqual(active_events, event)
self.assertEqual(result, self.call_foo_result)
finally:
sys.monitoring.set_events(tool_id, NO_EVENTS)
sys.monitoring.free_tool_id(tool_id)
def test_disable_from_callback(self):
# Event callbacks can disable a _local_ event at a specific location to
# prevent it triggering in the future by returning
# `sys.monitoring.DISABLE`. As this only applies to local events, doing
# this should have absolutely no impact for the global events that Numba
# supports.
callback = Mock(return_value=sys.monitoring.DISABLE)
try:
event = PY_START
tool_id = self.tool_id
sys.monitoring.use_tool_id(tool_id, "custom_monitor")
sys.monitoring.set_events(tool_id, event)
sys.monitoring.register_callback(tool_id, event, callback)
active_events = sys.monitoring.get_events(tool_id)
self.assertEqual(active_events, event)
result = self.call_foo(self.arg)
active_events = sys.monitoring.get_events(tool_id)
self.assertEqual(active_events, event)
self.assertEqual(result, self.call_foo_result)
callback.assert_called()
finally:
# It is necessary to restart events that have been disabled. The
# "disabled" state of the `PY_START` event for the tool
# `self.tool_id` "leaks" into subsequent tests. These subsequent
# tests then end up failing as events that should have been
# triggered are not triggered due to the state leak! It's not really
# clear why this happens, if it is part of the design or a side
# effect of the design, or if this behaviour is simply a bug in
# CPython itself.
sys.monitoring.restart_events()
sys.monitoring.register_callback(tool_id, event, None)
sys.monitoring.set_events(tool_id, NO_EVENTS)
sys.monitoring.free_tool_id(tool_id)
def test_mutation_from_objmode(self):
try:
# Check that it's possible to enable an event (mutate the event
# state)from an `objmode` block. Monitoring for PY_RETURN is set in
# objmode once the function starts executing.
tool_id = self.tool_id
sys.monitoring.use_tool_id(tool_id, "custom_monitor")
event = PY_RETURN
# register the callback... note that the event isn't switched on yet
callback = Mock()
sys.monitoring.register_callback(tool_id, event, callback)
def objmode_enable_event(switch_on_event):
if switch_on_event:
sys.monitoring.set_events(tool_id, event)
@_enable_sysmon
@jit('int64(int64)')
def foo(enable):
with objmode:
objmode_enable_event(enable)
return enable + 7
# this should not trigger the return callback
foo(0)
callback.assert_not_called()
# this should trigger the return callback
foo(1)
# switch off the event so the callback mock is protected from
# mutation.
sys.monitoring.set_events(tool_id, NO_EVENTS)
# check what happened
callback.assert_called()
# 2 calls, 1 is the return from the objmode_enable_event, the other
# is the return from foo.
self.assertEqual(callback.call_count, 2)
finally:
sys.monitoring.set_events(tool_id, NO_EVENTS)
sys.monitoring.register_callback(tool_id, event, None)
sys.monitoring.free_tool_id(tool_id)
def test_multiple_tool_id(self):
# Check that multiple tools will work across different combinations of
# events that Numba dispatcher supports, namely:
# (NO_EVENTS, PY_START, PY_RETURN).
# the use of NO_EVENTS is superfluous, it is to demonstrate usage.
tool_ids_2_events = {sys.monitoring.DEBUGGER_ID: (NO_EVENTS,),
sys.monitoring.COVERAGE_ID: (PY_START,),
sys.monitoring.PROFILER_ID: (PY_RETURN,),
sys.monitoring.OPTIMIZER_ID:
(PY_START, PY_RETURN,),}
all_callbacks = {}
try:
for tool_id, events in tool_ids_2_events.items():
sys.monitoring.use_tool_id(tool_id, f"custom_monitor_{tool_id}")
event_bitmask = 0
callbacks = {}
all_callbacks[tool_id] = callbacks
for event in events:
callback = Mock()
# Can't set an event for NO_EVENTS!
if event != NO_EVENTS:
sys.monitoring.register_callback(tool_id, event,
callback)
callbacks[event] = callback
event_bitmask |= event
# only start monitoring once callbacks are registered
for tool_id in tool_ids_2_events.keys():
sys.monitoring.set_events(tool_id, event_bitmask)
self.call_foo(self.arg)
finally:
# clean up state
for tool_id, events in tool_ids_2_events.items():
for event in events:
# Can't remove an event for NO_EVENTS!
if event != NO_EVENTS:
sys.monitoring.register_callback(tool_id, event, None)
sys.monitoring.set_events(tool_id, NO_EVENTS)
sys.monitoring.free_tool_id(tool_id)
# Now check all_callbacks...
# check debugger tool slot
dbg_tool = all_callbacks[sys.monitoring.DEBUGGER_ID]
self.assertEqual(len(dbg_tool), 1) # one event to capture
callback = dbg_tool[NO_EVENTS]
callback.assert_not_called()
# check coverage tool slot
cov_tool = all_callbacks[sys.monitoring.COVERAGE_ID]
self.assertEqual(len(cov_tool), 1) # one event to capture
self.check_py_start_calls(cov_tool)
# check profiler tool slot
prof_tool = all_callbacks[sys.monitoring.PROFILER_ID]
self.assertEqual(len(prof_tool), 1) # one event to capture
self.check_py_return_calls(prof_tool)
# check optimiser tool slot
opt_tool = all_callbacks[sys.monitoring.OPTIMIZER_ID]
self.assertEqual(len(opt_tool), 2) # two events to capture
self.check_py_start_calls(opt_tool)
self.check_py_return_calls(opt_tool)
def test_raising_under_monitoring(self):
# Check that Numba can raise an exception whilst monitoring is running
# and that 1. `RAISE` is issued 2. `PY_UNWIND` is issued, 3. that
# `PY_RETURN` is not issued.
ret_callback = Mock()
raise_callback = Mock()
unwind_callback = Mock()
msg = 'exception raised'
@_enable_sysmon
@jit('()')
def foo():
raise ValueError(msg)
store_raised = None
try:
tool_id = self.tool_id
sys.monitoring.use_tool_id(tool_id, "custom_monitor")
sys.monitoring.register_callback(tool_id, PY_RETURN, ret_callback)
sys.monitoring.register_callback(tool_id, RAISE, raise_callback)
sys.monitoring.register_callback(tool_id, PY_UNWIND,
unwind_callback)
sys.monitoring.set_events(tool_id, PY_RETURN | RAISE | PY_UNWIND)
try:
foo()
except ValueError as raises:
store_raised = raises
# switch off monitoring
sys.monitoring.set_events(tool_id, NO_EVENTS)
# check that the ret_callback was called once (by Numba unpickle to
# fetch the exception info out of the stored bytes).
ret_callback.assert_called_once()
# and that elements that are feasible to check about the call are
# as expected
the_call = ret_callback.call_args_list[0]
self.assertEqual(the_call.args[0], _numba_unpickle.__code__)
self.assertEqual(the_call.args[2][0], ValueError)
self.assertEqual(the_call.args[2][1][0], msg)
# check that the RAISE event callback was triggered
raise_callback.assert_called()
numba_unpickle_call = raise_callback.call_args_list[0]
self.assertEqual(numba_unpickle_call.args[0],
_numba_unpickle.__code__)
self.assertIsInstance(numba_unpickle_call.args[2], KeyError)
foo_call = raise_callback.call_args_list[1]
self.assertEqual(foo_call.args[0], foo.py_func.__code__)
self.assertIsInstance(foo_call.args[2], ValueError)
self.assertIn(msg, str(foo_call.args[2]))
# check that PY_UNWIND event callback was called
unwind_callback.assert_called_once()
unwind_call = unwind_callback.call_args_list[0]
self.assertEqual(unwind_call.args[0], foo.py_func.__code__)
self.assertIsInstance(unwind_call.args[2], ValueError)
self.assertIn(msg, str(unwind_call.args[2]))
finally:
sys.monitoring.set_events(tool_id, NO_EVENTS)
sys.monitoring.register_callback(tool_id, PY_RETURN, None)
sys.monitoring.register_callback(tool_id, RAISE, None)
sys.monitoring.register_callback(tool_id, PY_UNWIND, None)
sys.monitoring.free_tool_id(tool_id)
self.assertIn(msg, str(store_raised))
def test_stop_iteration_under_monitoring(self):
# Check that Numba can raise an StopIteration exception whilst
# monitoring is running and that:
# 1. RAISE is issued for an explicitly raised StopIteration exception.
# 2. PY_RETURN is issued appropriately for the unwinding stack
# 3. STOP_ITERATION is not issued as there is no implicit StopIteration
# raised.
return_callback = Mock()
raise_callback = Mock()
stopiter_callback = Mock()
msg = 'exception raised'
@_enable_sysmon
@jit('()')
def foo():
raise StopIteration(msg)
store_raised = None
try:
tool_id = self.tool_id
sys.monitoring.use_tool_id(tool_id, "custom_monitor")
sys.monitoring.register_callback(tool_id, PY_RETURN,
return_callback)
sys.monitoring.register_callback(tool_id, RAISE,
raise_callback)
sys.monitoring.register_callback(tool_id, STOP_ITERATION,
stopiter_callback)
sys.monitoring.set_events(tool_id,
PY_RETURN | STOP_ITERATION | RAISE)
try:
foo()
except StopIteration as raises:
store_raised = raises
# switch off monitoring
sys.monitoring.set_events(tool_id, NO_EVENTS)
# check that the return_callback was called once (by Numba unpickle
# to fetch the exception info out of the stored bytes).
return_callback.assert_called_once()
# and that elements that are feasible to check about the call are
# as expected
the_call = return_callback.call_args_list[0]
self.assertEqual(the_call.args[0], _numba_unpickle.__code__)
self.assertEqual(the_call.args[2][0], StopIteration)
self.assertEqual(the_call.args[2][1][0], msg)
# check that the RAISE event callback was triggered
raise_callback.assert_called()
# check that it's 3 long (numba unpickle, jit(foo), the test method)
self.assertEqual(raise_callback.call_count, 3)
# check the numba pickle call
numba_unpickle_call = raise_callback.call_args_list[0]
self.assertEqual(numba_unpickle_call.args[0],
_numba_unpickle.__code__)
self.assertIsInstance(numba_unpickle_call.args[2], KeyError)
# check the jit(foo) call
foo_call = raise_callback.call_args_list[1]
self.assertEqual(foo_call.args[0], foo.py_func.__code__)
self.assertIsInstance(foo_call.args[2], StopIteration)
self.assertIn(msg, str(foo_call.args[2]))
# check the test method call
meth_call = raise_callback.call_args_list[2]
test_method_code = sys._getframe().f_code
self.assertEqual(meth_call.args[0], test_method_code)
self.assertIsInstance(meth_call.args[2], StopIteration)
self.assertIn(msg, str(meth_call.args[2]))
# check that the STOP_ITERATION event was not triggered
stopiter_callback.assert_not_called()
finally:
sys.monitoring.set_events(tool_id, NO_EVENTS)
sys.monitoring.register_callback(tool_id, PY_RETURN, None)
sys.monitoring.register_callback(tool_id, STOP_ITERATION, None)
sys.monitoring.register_callback(tool_id, RAISE, None)
sys.monitoring.free_tool_id(tool_id)
self.assertIn(msg, str(store_raised))
def test_raising_callback_unwinds_from_jit_on_success_path(self):
# An event callback can legitimately raise an exception, this test
# makes sure Numba's dispatcher handles it ok on the "successful path",
# i.e. the JIT compiled function didn't raise an exception at runtime.
msg = "deliberately broken callback"
callback = Mock(side_effect=ValueError(msg))
store_raised = None
try:
event = PY_START
tool_id = self.tool_id
sys.monitoring.use_tool_id(tool_id, "custom_monitor")
sys.monitoring.set_events(tool_id, event)
sys.monitoring.register_callback(tool_id, event, callback)
self.foo(self.arg)
except ValueError as raises:
store_raised = raises
finally:
sys.monitoring.register_callback(tool_id, event, None)
sys.monitoring.set_events(tool_id, NO_EVENTS)
sys.monitoring.free_tool_id(tool_id)
callback.assert_called_once()
self.assertIn(msg, str(store_raised))
def test_raising_callback_unwinds_from_jit_on_raising_path(self):
# An event callback can legitimately raise an exception, this test
# makes sure Numba's dispatcher handles it ok on the
# "unsuccessful path", i.e. the JIT compiled function raised an
# exception at runtime. This test checks the RAISE event, as the
# callback itself raises, it overrides the exception coming from the
# JIT compiled function.
msg_callback = "deliberately broken callback"
msg_execution = "deliberately broken execution"
callback = Mock(side_effect=ValueError(msg_callback))
class LocalException(Exception):
pass
@_enable_sysmon
@jit("()")
def raising():
raise LocalException(msg_execution)
store_raised = None
try:
event = RAISE
tool_id = self.tool_id
sys.monitoring.use_tool_id(tool_id, "custom_monitor")
sys.monitoring.set_events(tool_id, event)
sys.monitoring.register_callback(tool_id, event, callback)
raising()
except ValueError as raises:
store_raised = raises
finally:
sys.monitoring.register_callback(tool_id, event, None)
sys.monitoring.set_events(tool_id, NO_EVENTS)
sys.monitoring.free_tool_id(tool_id)
callback.assert_called()
# Called 3x (numba unpickle, ValueError in callback, the test method)
self.assertEqual(callback.call_count, 3)
# check the numba unpickle call
numba_unpickle_call = callback.call_args_list[0]
self.assertEqual(numba_unpickle_call.args[0], _numba_unpickle.__code__)
self.assertIsInstance(numba_unpickle_call.args[2], KeyError)
# check the jit(raising) call
raising_call = callback.call_args_list[1]
self.assertEqual(raising_call.args[0], raising.py_func.__code__)
self.assertIs(raising_call.args[2], callback.side_effect)
# check the test method call
meth_call = callback.call_args_list[2]
test_method_code = sys._getframe().f_code
self.assertEqual(meth_call.args[0], test_method_code)
self.assertIs(meth_call.args[2], callback.side_effect)
# check the stored exception is the expected exception
self.assertIs(store_raised, callback.side_effect)
def test_raising_callback_unwinds_from_jit_on_unwind_path(self):
# An event callback can legitimately raise an exception, this test
# makes sure Numba's dispatcher handles it ok on the
# "unsuccessful path", i.e. the JIT compiled function raised an
# exception at runtime. This test checks the PY_UNWIND event. CPython
# seems to not notice the PY_UNWIND coming from the exception arising
# from the raise in the event callback, it just has the PY_UNWIND from
# the raise in the JIT compiled function.
msg_callback = "deliberately broken callback"
msg_execution = "deliberately broken execution"
callback = Mock(side_effect=ValueError(msg_callback))
class LocalException(Exception):
pass
@_enable_sysmon
@jit("()")
def raising():
raise LocalException(msg_execution)
store_raised = None
try:
event = PY_UNWIND
tool_id = self.tool_id
sys.monitoring.use_tool_id(tool_id, "custom_monitor")
sys.monitoring.set_events(tool_id, event)
sys.monitoring.register_callback(tool_id, event, callback)
raising()
except ValueError as raises:
store_raised = raises
finally:
sys.monitoring.register_callback(tool_id, event, None)
sys.monitoring.set_events(tool_id, NO_EVENTS)
sys.monitoring.free_tool_id(tool_id)
callback.assert_called_once()
# check the jit(raising) call
raising_call = callback.call_args_list[0]
self.assertEqual(raising_call.args[0], raising.py_func.__code__)
self.assertEqual(type(raising_call.args[2]), LocalException)
self.assertEqual(str(raising_call.args[2]), msg_execution)
# check the stored_raise
self.assertIs(store_raised, callback.side_effect)
def test_monitoring_multiple_threads(self):
# Two threads, different tools and events registered on each thread.
# Each test creates a global event capturing. The threads use barriers
# to wait for each other to start and stop capturing. This way they
# see the events from each other. One thread is capturing PY_START
# and the other is capturing PY_RETURN.
barrier = threading.Barrier(2)
def barrier_cb():
barrier.wait()
def t1_work(self, q):
try:
# test event PY_START on a "debugger tool"
cb = self.run_with_events(self.call_foo, (self.arg,),
(PY_START,),
tool_id=sys.monitoring.DEBUGGER_ID,
barrier=barrier_cb)
# Check...
self.assertEqual(len(cb), 1)
self.check_py_start_calls_multithreads(cb)
except Exception as e:
q.put(''.join(traceback.format_exception(e)))
def t2_work(self, q):
try:
# test event PY_RETURN on a "coverage tool"
cb = self.run_with_events(self.call_foo, (self.arg,),
(PY_RETURN,),
tool_id=sys.monitoring.COVERAGE_ID,
barrier=barrier_cb)
# Check...
self.assertEqual(len(cb), 1)
self.check_py_return_calls_multithreads(cb)
except Exception as e:
q.put(''.join(traceback.format_exception(e)))
q1 = queue.Queue()
t1 = threading.Thread(target=t1_work, args=(self, q1))
q2 = queue.Queue()
t2 = threading.Thread(target=t2_work, args=(self, q2))
threads = (t1, t2)
for t in threads:
t.start()
for t in threads:
t.join()
# make sure there were no exceptions
def assert_empty_queue(q):
if q.qsize() != 0:
while not q.empty():
print(q.get())
self.fail("queue supposed to be empty")
assert_empty_queue(q1)
assert_empty_queue(q2)
@unittest.skipUnless(PYVERSION >= (3, 12), "needs Python 3.12+")
| TestMonitoring |
python | sanic-org__sanic | sanic/application/state.py | {
"start": 689,
"end": 3610
} | class ____:
"""Application state.
This class is used to store the state of the application. It is
instantiated by the application and is available as `app.state`.
"""
app: Sanic
asgi: bool = field(default=False)
coffee: bool = field(default=False)
fast: bool = field(default=False)
host: str = field(default="")
port: int = field(default=0)
ssl: Optional[SSLContext] = field(default=None)
sock: Optional[socket] = field(default=None)
unix: Optional[str] = field(default=None)
mode: Mode = field(default=Mode.PRODUCTION)
reload_dirs: set[Path] = field(default_factory=set)
auto_reload: bool = field(default=False)
server: Server = field(default=Server.SANIC)
is_running: bool = field(default=False)
is_started: bool = field(default=False)
is_stopping: bool = field(default=False)
verbosity: int = field(default=0)
workers: int = field(default=0)
primary: bool = field(default=True)
server_info: list[ApplicationServerInfo] = field(default_factory=list)
# This property relates to the ApplicationState instance and should
# not be changed except in the __post_init__ method
_init: bool = field(default=False)
def __post_init__(self) -> None:
self._init = True
def __setattr__(self, name: str, value: Any) -> None:
if self._init and name == "_init":
raise RuntimeError(
"Cannot change the value of _init after instantiation"
)
super().__setattr__(name, value)
if self._init and hasattr(self, f"set_{name}"):
getattr(self, f"set_{name}")(value)
def set_mode(self, value: Union[str, Mode]):
if hasattr(self.app, "error_handler"):
self.app.error_handler.debug = self.app.debug
if getattr(self.app, "configure_logging", False) and self.app.debug:
logger.setLevel(logging.DEBUG)
def set_verbosity(self, value: int) -> None:
"""Set the verbosity level.
Args:
value (int): Verbosity level.
"""
VerbosityFilter.verbosity = value
@property
def is_debug(self) -> bool:
"""Check if the application is in debug mode.
Returns:
bool: `True` if the application is in debug mode, `False`
otherwise.
"""
return self.mode is Mode.DEBUG
@property
def stage(self) -> ServerStage:
"""Get the server stage.
Returns:
ServerStage: Server stage.
"""
if not self.server_info:
return ServerStage.STOPPED
if all(info.stage is ServerStage.SERVING for info in self.server_info):
return ServerStage.SERVING
elif any(
info.stage is ServerStage.SERVING for info in self.server_info
):
return ServerStage.PARTIAL
return ServerStage.STOPPED
| ApplicationState |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/generate_erd/pipeline.py | {
"start": 4793,
"end": 7308
} | class ____(Step):
context: ConnectorContext
title = "Upload DBML file to dbdocs.io"
def __init__(self, context: PipelineContext) -> None:
super().__init__(context)
async def _run(self, erd_directory: Directory) -> StepResult:
if not self.context.dbdocs_token:
raise ValueError(
"In order to publish to dbdocs, DBDOCS_TOKEN needs to be provided. Either pass the value or skip the publish step"
)
dbdocs_container = (
self.dagger_client.container()
.from_("node:lts-bullseye-slim")
.with_exec(["npm", "install", "-g", "dbdocs"], use_entrypoint=True)
.with_env_variable("DBDOCS_TOKEN", self.context.dbdocs_token.value)
.with_workdir("/airbyte_dbdocs")
.with_file("/airbyte_dbdocs/dbdocs.dbml", erd_directory.file("source.dbml"))
)
db_docs_build = ["dbdocs", "build", "dbdocs.dbml", f"--project={self.context.connector.technical_name}"]
await dbdocs_container.with_exec(db_docs_build).stdout()
# TODO: produce link to dbdocs in output logs
return StepResult(step=self, status=StepStatus.SUCCESS)
async def run_connector_generate_erd_pipeline(
context: ConnectorContext,
semaphore: "Semaphore",
skip_steps: List[str],
) -> Report:
context.targeted_platforms = [LOCAL_BUILD_PLATFORM]
steps_to_run: STEP_TREE = []
steps_to_run.append([StepToRun(id=CONNECTOR_TEST_STEP_ID.BUILD, step=BuildConnectorImages(context))])
steps_to_run.append(
[
StepToRun(
id=CONNECTOR_TEST_STEP_ID.DBML_FILE,
step=GenerateDbml(context, CONNECTOR_TEST_STEP_ID.LLM_RELATIONSHIPS in skip_steps),
args=lambda results: {"connector_to_discover": results[CONNECTOR_TEST_STEP_ID.BUILD].output[LOCAL_BUILD_PLATFORM]},
depends_on=[CONNECTOR_TEST_STEP_ID.BUILD],
),
]
)
if CONNECTOR_TEST_STEP_ID.PUBLISH_ERD not in skip_steps:
steps_to_run.append(
[
StepToRun(
id=CONNECTOR_TEST_STEP_ID.PUBLISH_ERD,
step=UploadDbmlSchema(context),
args=lambda results: {"erd_directory": results[CONNECTOR_TEST_STEP_ID.DBML_FILE].output},
depends_on=[CONNECTOR_TEST_STEP_ID.DBML_FILE],
),
]
)
return await run_connector_steps(context, semaphore, steps_to_run)
| UploadDbmlSchema |
python | euske__pdfminer | pdfminer/psparser.py | {
"start": 367,
"end": 472
} | class ____:
"""Base class for all PS or PDF-related data types."""
pass
## PSLiteral
##
| PSObject |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 94874,
"end": 97195
} | class ____(SimpleHandlerTestCase):
def get_handlers(self):
test = self
self.server_error = None
# Manually set a content-length that doesn't match the actual content.
class TooHigh(RequestHandler):
def get(self):
self.set_header("Content-Length", "42")
try:
self.finish("ok")
except Exception as e:
test.server_error = e
raise
class TooLow(RequestHandler):
def get(self):
self.set_header("Content-Length", "2")
try:
self.finish("hello")
except Exception as e:
test.server_error = e
raise
return [("/high", TooHigh), ("/low", TooLow)]
def test_content_length_too_high(self):
# When the content-length is too high, the connection is simply
# closed without completing the response. An error is logged on
# the server.
with ExpectLog(app_log, "(Uncaught exception|Exception in callback)"):
with ExpectLog(
gen_log,
"(Cannot send error response after headers written"
"|Failed to flush partial response)",
):
with self.assertRaises(HTTPClientError):
self.fetch("/high", raise_error=True)
self.assertEqual(
str(self.server_error), "Tried to write 40 bytes less than Content-Length"
)
def test_content_length_too_low(self):
# When the content-length is too low, the connection is closed
# without writing the last chunk, so the client never sees the request
# complete (which would be a framing error).
with ExpectLog(app_log, "(Uncaught exception|Exception in callback)"):
with ExpectLog(
gen_log,
"(Cannot send error response after headers written"
"|Failed to flush partial response)",
):
with self.assertRaises(HTTPClientError):
self.fetch("/low", raise_error=True)
self.assertEqual(
str(self.server_error), "Tried to write more data than Content-Length"
)
| IncorrectContentLengthTest |
python | doocs__leetcode | solution/3100-3199/3195.Find the Minimum Area to Cover All Ones I/Solution.py | {
"start": 0,
"end": 413
} | class ____:
def minimumArea(self, grid: List[List[int]]) -> int:
x1 = y1 = inf
x2 = y2 = -inf
for i, row in enumerate(grid):
for j, x in enumerate(row):
if x == 1:
x1 = min(x1, i)
y1 = min(y1, j)
x2 = max(x2, i)
y2 = max(y2, j)
return (x2 - x1 + 1) * (y2 - y1 + 1)
| Solution |
python | kamyu104__LeetCode-Solutions | Python/account-balance-after-rounded-purchase.py | {
"start": 36,
"end": 242
} | class ____(object):
def accountBalanceAfterPurchase(self, purchaseAmount):
"""
:type purchaseAmount: int
:rtype: int
"""
return 100-(purchaseAmount+5)//10*10
| Solution |
python | lxml__lxml | src/lxml/tests/test_etree.py | {
"start": 179193,
"end": 184808
} | class ____(_XIncludeTestCase):
from lxml import ElementInclude
def include(self, tree, loader=None, max_depth=None):
self.ElementInclude.include(tree.getroot(), loader=loader, max_depth=max_depth)
XINCLUDE = {}
XINCLUDE["Recursive1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source code of Recursive2.xml:</p>
<xi:include href="Recursive2.xml"/>
</document>
"""
XINCLUDE["Recursive2.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source code of Recursive3.xml:</p>
<xi:include href="Recursive3.xml"/>
</document>
"""
XINCLUDE["Recursive3.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source code of Recursive1.xml:</p>
<xi:include href="Recursive1.xml"/>
</document>
"""
XINCLUDE["NonRecursive1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is multiple times the source code of NonRecursive3.xml:</p>
<xi:include href="NonRecursive3.xml"/>
<xi:include href="NonRecursive3.xml"/>
<p>The following is multiple times the source code of Leaf.xml:</p>
<xi:include href="Leaf.xml"/>
<xi:include href="Leaf.xml"/>
<xi:include href="Leaf.xml"/>
<p>One more time the source code of NonRecursive3.xml:</p>
<xi:include href="NonRecursive3.xml"/>
</document>
"""
XINCLUDE["NonRecursive2.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is multiple times the source code of NonRecursive3.xml:</p>
<xi:include href="NonRecursive3.xml"/>
<xi:include href="NonRecursive3.xml"/>
</document>
"""
XINCLUDE["NonRecursive3.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is multiple times the source code of Leaf.xml:</p>
<xi:include href="Leaf.xml"/>
<xi:include href="Leaf.xml"/>
</document>
"""
XINCLUDE["Leaf.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>No further includes</p>
</document>
"""
def xinclude_loader(self, href, parse="xml", encoding=None):
try:
data = textwrap.dedent(self.XINCLUDE[href])
except KeyError:
raise OSError("resource not found")
if parse == "xml":
data = etree.fromstring(data)
return data
def test_xinclude_failures(self):
# Test infinitely recursive includes.
document = self.xinclude_loader("Recursive1.xml").getroottree()
with self.assertRaises(self.ElementInclude.FatalIncludeError) as cm:
self.include(document, self.xinclude_loader)
self.assertEqual(str(cm.exception),
"recursive include of 'Recursive2.xml' detected")
# Test 'max_depth' limitation.
document = self.xinclude_loader("Recursive1.xml").getroottree()
with self.assertRaises(self.ElementInclude.FatalIncludeError) as cm:
self.include(document, self.xinclude_loader, max_depth=None)
self.assertEqual(str(cm.exception),
"recursive include of 'Recursive2.xml' detected")
document = self.xinclude_loader("Recursive1.xml").getroottree()
with self.assertRaises(self.ElementInclude.LimitedRecursiveIncludeError) as cm:
self.include(document, self.xinclude_loader, max_depth=0)
self.assertEqual(str(cm.exception),
"maximum xinclude depth reached when including file Recursive2.xml")
document = self.xinclude_loader("Recursive1.xml").getroottree()
with self.assertRaises(self.ElementInclude.LimitedRecursiveIncludeError) as cm:
self.include(document, self.xinclude_loader, max_depth=1)
self.assertEqual(str(cm.exception),
"maximum xinclude depth reached when including file Recursive3.xml")
document = self.xinclude_loader("Recursive1.xml").getroottree()
with self.assertRaises(self.ElementInclude.LimitedRecursiveIncludeError) as cm:
self.include(document, self.xinclude_loader, max_depth=2)
self.assertEqual(str(cm.exception),
"maximum xinclude depth reached when including file Recursive1.xml")
document = self.xinclude_loader("Recursive1.xml").getroottree()
with self.assertRaises(self.ElementInclude.FatalIncludeError) as cm:
self.include(document, self.xinclude_loader, max_depth=3)
self.assertEqual(str(cm.exception),
"recursive include of 'Recursive2.xml' detected")
def test_multiple_include_of_same_file(self):
# Test that including the same file multiple times, but on the same level
# is not detected as recursive include
document = self.xinclude_loader("NonRecursive3.xml").getroottree()
self.include(document, self.xinclude_loader)
# same but for more than one level
document = self.xinclude_loader("NonRecursive1.xml").getroottree()
self.include(document, self.xinclude_loader)
# same but no Leaf.xml in top-level file
document = self.xinclude_loader("NonRecursive2.xml").getroottree()
self.include(document, self.xinclude_loader)
| ElementIncludeTestCase |
python | django-haystack__django-haystack | test_haystack/elasticsearch7_tests/test_backend.py | {
"start": 56905,
"end": 58597
} | class ____(TestCase):
def setUp(self):
super().setUp()
# Wipe it clean.
clear_elasticsearch_index()
# Stow.
self.old_ui = connections["elasticsearch"].get_unified_index()
self.ui = UnifiedIndex()
self.srtsi = Elasticsearch7RoundTripSearchIndex()
self.ui.build(indexes=[self.srtsi])
connections["elasticsearch"]._index = self.ui
self.sb = connections["elasticsearch"].get_backend()
self.sqs = SearchQuerySet("elasticsearch")
# Fake indexing.
mock = MockModel()
mock.id = 1
self.sb.update(self.srtsi, [mock])
def tearDown(self):
# Restore.
connections["elasticsearch"]._index = self.old_ui
super().tearDown()
def test_round_trip(self):
results = self.sqs.filter(id="core.mockmodel.1")
# Sanity check.
self.assertEqual(results.count(), 1)
# Check the individual fields.
result = results[0]
self.assertEqual(result.id, "core.mockmodel.1")
self.assertEqual(result.text, "This is some example text.")
self.assertEqual(result.name, "Mister Pants")
self.assertEqual(result.is_active, True)
self.assertEqual(result.post_count, 25)
self.assertEqual(result.average_rating, 3.6)
self.assertEqual(result.price, "24.99")
self.assertEqual(result.pub_date, datetime.date(2009, 11, 21))
self.assertEqual(result.created, datetime.datetime(2009, 11, 21, 21, 31, 00))
self.assertEqual(result.tags, ["staff", "outdoor", "activist", "scientist"])
self.assertEqual(result.sites, [3, 5, 1])
| LiveElasticsearch7RoundTripTestCase |
python | sympy__sympy | sympy/holonomic/holonomicerrors.py | {
"start": 476,
"end": 615
} | class ____(BaseHolonomicError):
def __init__(self, m):
self.m = m
def __str__(self):
return self.m
| NotHolonomicError |
python | ray-project__ray | python/ray/data/collate_fn.py | {
"start": 5354,
"end": 5857
} | class ____(CollateFn[Dict[str, np.ndarray]]):
"""Collate function that takes a dictionary of numpy arrays as the input batch type."""
def __call__(self, batch: Dict[str, np.ndarray]) -> "CollatedData":
"""Convert a batch of numpy arrays to collated format.
Args:
batch: The input dictionary of numpy arrays batch to collate.
Returns:
The collated data in the format expected by the model.
"""
...
@DeveloperAPI
| NumpyBatchCollateFn |
python | pypa__pipenv | pipenv/vendor/tomlkit/exceptions.py | {
"start": 2535,
"end": 2817
} | class ____(ParseError):
"""
An unexpected character was found during parsing.
"""
def __init__(self, line: int, col: int, char: str) -> None:
message = f"Unexpected character: {char!r}"
super().__init__(line, col, message=message)
| UnexpectedCharError |
python | lepture__authlib | authlib/oauth2/rfc6749/resource_protector.py | {
"start": 350,
"end": 2887
} | class ____:
"""Base token validator class. Subclass this validator to register
into ResourceProtector instance.
"""
TOKEN_TYPE = "bearer"
def __init__(self, realm=None, **extra_attributes):
self.realm = realm
self.extra_attributes = extra_attributes
@staticmethod
def scope_insufficient(token_scopes, required_scopes):
if not required_scopes:
return False
token_scopes = scope_to_list(token_scopes)
if not token_scopes:
return True
token_scopes = set(token_scopes)
for scope in required_scopes:
resource_scopes = set(scope_to_list(scope))
if token_scopes.issuperset(resource_scopes):
return False
return True
def authenticate_token(self, token_string):
"""A method to query token from database with the given token string.
Developers MUST re-implement this method. For instance::
def authenticate_token(self, token_string):
return get_token_from_database(token_string)
:param token_string: A string to represent the access_token.
:return: token
"""
raise NotImplementedError()
def validate_request(self, request):
"""A method to validate if the HTTP request is valid or not. Developers MUST
re-implement this method. For instance, your server requires a
"X-Device-Version" in the header::
def validate_request(self, request):
if "X-Device-Version" not in request.headers:
raise InvalidRequestError()
Usually, you don't have to detect if the request is valid or not. If you have
to, you MUST re-implement this method.
:param request: instance of HttpRequest
:raise: InvalidRequestError
"""
def validate_token(self, token, scopes, request):
"""A method to validate if the authorized token is valid, if it has the
permission on the given scopes. Developers MUST re-implement this method.
e.g, check if token is expired, revoked::
def validate_token(self, token, scopes, request):
if not token:
raise InvalidTokenError()
if token.is_expired() or token.is_revoked():
raise InvalidTokenError()
if not match_token_scopes(token, scopes):
raise InsufficientScopeError()
"""
raise NotImplementedError()
| TokenValidator |
python | streamlit__streamlit | lib/streamlit/elements/lib/mutable_status_container.py | {
"start": 1339,
"end": 7101
} | class ____(DeltaGenerator):
@staticmethod
def _create(
parent: DeltaGenerator,
label: str,
expanded: bool = False,
state: States = "running",
width: WidthWithoutContent = "stretch",
) -> StatusContainer:
expandable_proto = BlockProto.Expandable()
expandable_proto.expanded = expanded
expandable_proto.label = label or ""
if state == "running":
expandable_proto.icon = "spinner"
elif state == "complete":
expandable_proto.icon = ":material/check:"
elif state == "error":
expandable_proto.icon = ":material/error:"
else:
raise StreamlitAPIException(
f"Unknown state ({state}). Must be one of 'running', 'complete', or 'error'."
)
block_proto = BlockProto()
block_proto.allow_empty = True
block_proto.expandable.CopyFrom(expandable_proto)
validate_width(width=width)
block_proto.width_config.CopyFrom(get_width_config(width))
delta_path: list[int] = (
parent._active_dg._cursor.delta_path if parent._active_dg._cursor else []
)
status_container = cast(
"StatusContainer",
parent._block(block_proto=block_proto, dg_type=StatusContainer),
)
# Apply initial configuration
status_container._delta_path = delta_path
status_container._current_proto = block_proto
status_container._current_state = state
# We need to sleep here for a very short time to prevent issues when
# the status is updated too quickly. If an .update() directly follows the
# the initialization, sometimes only the latest update is applied.
# Adding a short timeout here allows the frontend to render the update before.
time.sleep(0.05)
return status_container
def __init__(
self,
root_container: int | None,
cursor: Cursor | None,
parent: DeltaGenerator | None,
block_type: str | None,
) -> None:
super().__init__(root_container, cursor, parent, block_type)
# Initialized in `_create()`:
self._current_proto: BlockProto | None = None
self._current_state: States | None = None
self._delta_path: list[int] | None = None
def update(
self,
*,
label: str | None = None,
expanded: bool | None = None,
state: States | None = None,
) -> None:
"""Update the status container.
Only specified arguments are updated. Container contents and unspecified
arguments remain unchanged.
Parameters
----------
label : str or None
A new label of the status container. If None, the label is not
changed.
expanded : bool or None
The new expanded state of the status container. If None,
the expanded state is not changed.
state : "running", "complete", "error", or None
The new state of the status container. This mainly changes the
icon. If None, the state is not changed.
"""
if self._current_proto is None or self._delta_path is None:
raise RuntimeError(
"StatusContainer is not correctly initialized. This should never happen."
)
msg = ForwardMsg()
msg.metadata.delta_path[:] = self._delta_path
msg.delta.add_block.CopyFrom(self._current_proto)
if expanded is not None:
msg.delta.add_block.expandable.expanded = expanded
else:
msg.delta.add_block.expandable.ClearField("expanded")
if label is not None:
msg.delta.add_block.expandable.label = label
if state is not None:
if state == "running":
msg.delta.add_block.expandable.icon = "spinner"
elif state == "complete":
msg.delta.add_block.expandable.icon = ":material/check:"
elif state == "error":
msg.delta.add_block.expandable.icon = ":material/error:"
else:
raise StreamlitAPIException(
f"Unknown state ({state}). Must be one of 'running', 'complete', or 'error'."
)
self._current_state = state
self._current_proto = msg.delta.add_block
enqueue_message(msg)
def __enter__(self) -> Self: # type: ignore[override]
# This is a little dubious: we're returning a different type than
# our superclass' `__enter__` function. Maybe DeltaGenerator.__enter__
# should always return `self`?
super().__enter__()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> Literal[False]:
# Only update if the current state is running
if self._current_state == "running":
# We need to sleep here for a very short time to prevent issues when
# the status is updated too quickly. If an .update() is directly followed
# by the exit of the context manager, sometimes only the last update
# (to complete) is applied. Adding a short timeout here allows the frontend
# to render the update before.
time.sleep(0.05)
if exc_type is not None:
# If an exception was raised in the context,
# we want to update the status to error.
self.update(state="error")
else:
self.update(state="complete")
return super().__exit__(exc_type, exc_val, exc_tb)
| StatusContainer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.